From 6cbf8bbb195b67a069f3569b9f1d65c665a61dd5 Mon Sep 17 00:00:00 2001 From: "daiwenxun.vendor" Date: Tue, 18 Oct 2022 15:53:29 +0800 Subject: [PATCH 01/57] first commit --- requirements/optional.txt | 1 + tools/analysis_tools/get_flops.py | 92 +++++++++++++++++++++++++++++++ 2 files changed, 93 insertions(+) create mode 100644 tools/analysis_tools/get_flops.py diff --git a/requirements/optional.txt b/requirements/optional.txt index 3674e2b90b..89ee1b4791 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -10,3 +10,4 @@ PyTurboJPEG soundfile tensorboard wandb +fvcore diff --git a/tools/analysis_tools/get_flops.py b/tools/analysis_tools/get_flops.py new file mode 100644 index 0000000000..2fede7b401 --- /dev/null +++ b/tools/analysis_tools/get_flops.py @@ -0,0 +1,92 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import torch + +try: + from fvcore.nn import (ActivationCountAnalysis, FlopCountAnalysis, + flop_count_str, flop_count_table, parameter_count) +except ImportError: + print('You may need to install fvcore for flops computation, ' + 'and you can use `pip install -r requirements/optional.txt` ' + 'to set up the environment') +from fvcore.nn.print_model_statistics import _format_size +from mmengine import Config + +from mmaction.registry import MODELS +from mmaction.utils import register_all_modules + + +def parse_args(): + parser = argparse.ArgumentParser(description='Get model flops and params') + parser.add_argument('config', help='config file path') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[224, 224], + help='input image size') + args = parser.parse_args() + return args + + +def main(): + + args = parse_args() + + if len(args.shape) == 1: + input_shape = (1, 3, args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = (1, 3) + tuple(args.shape) + elif len(args.shape) == 4: + # n, c, h, w = args.shape + input_shape = tuple(args.shape) + elif len(args.shape) == 5: + # n, c, t, h, w = args.shape + input_shape = tuple(args.shape) + else: + raise ValueError('invalid input shape') + + cfg = Config.fromfile(args.config) + + register_all_modules() + model = MODELS.build(cfg.model) + model = model.cuda() + model.eval() + + if hasattr(model, 'extract_feat'): + model.forward = model.extract_feat + else: + raise NotImplementedError( + 'FLOPs counter is currently not currently supported with {}'. + format(model.__class__.__name__)) + + inputs = (torch.randn((1, *input_shape)), ) + flops_ = FlopCountAnalysis(model, inputs) + activations_ = ActivationCountAnalysis(model, inputs) + + flops = _format_size(flops_.total()) + activations = _format_size(activations_.total()) + params = _format_size(parameter_count(model)['']) + + flop_table = flop_count_table( + flops=flops_, + activations=activations_, + show_param_shapes=True, + ) + flop_str = flop_count_str(flops=flops_, activations=activations_) + + print('\n' + flop_str) + print('\n' + flop_table) + + split_line = '=' * 30 + print(f'{split_line}\nInput shape: {input_shape}\n' + f'Flops: {flops}\nParams: {params}\n' + f'Activation: {activations}\n{split_line}') + print('!!!Please be cautious if you use the results in papers. ' + 'You may need to check if all ops are supported and verify that the ' + 'flops computation is correct.') + + +if __name__ == '__main__': + main() From 57f598184bd9609921005b265464168f4b427e66 Mon Sep 17 00:00:00 2001 From: Dai-Wenxun Date: Tue, 18 Oct 2022 16:09:01 +0800 Subject: [PATCH 02/57] fix lint --- requirements/optional.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/optional.txt b/requirements/optional.txt index 89ee1b4791..d2204c6441 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -1,5 +1,6 @@ av>=9.0 future +fvcore imgaug librosa lmdb @@ -10,4 +11,3 @@ PyTurboJPEG soundfile tensorboard wandb -fvcore From eea85ee09f08a1e0f1c12db5b392c02356e02f0f Mon Sep 17 00:00:00 2001 From: Dai-Wenxun Date: Tue, 18 Oct 2022 17:09:42 +0800 Subject: [PATCH 03/57] use cpu --- tools/analysis_tools/get_flops.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/analysis_tools/get_flops.py b/tools/analysis_tools/get_flops.py index 2fede7b401..6f5a6454db 100644 --- a/tools/analysis_tools/get_flops.py +++ b/tools/analysis_tools/get_flops.py @@ -51,7 +51,6 @@ def main(): register_all_modules() model = MODELS.build(cfg.model) - model = model.cuda() model.eval() if hasattr(model, 'extract_feat'): From da7ecac97a110d5d3e02906f507fcf6c4ea763a9 Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Thu, 20 Oct 2022 15:34:24 +0800 Subject: [PATCH 04/57] [CI] modify CI config to fix upstream library dependency (#2000) --- .circleci/test.yml | 6 +++--- .github/workflows/merge_stage_test.yml | 16 ++++++++-------- .github/workflows/pr_stage_test.yml | 14 +++++++------- 3 files changed, 18 insertions(+), 18 deletions(-) diff --git a/.circleci/test.yml b/.circleci/test.yml index 4421ca082b..7b9abc002e 100644 --- a/.circleci/test.yml +++ b/.circleci/test.yml @@ -66,7 +66,7 @@ jobs: pip install -U openmim mim install 'mmcv >= 2.0.0rc1' pip install git+ssh://git@github.com/open-mmlab/mmdetection.git@dev-3.x - pip install git+https://github.com/open-mmlab/mmclassification.git@1.x + pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x pip install -r requirements.txt - when: condition: @@ -124,8 +124,8 @@ jobs: docker exec mmaction pip install git+https://git@github.com/open-mmlab/mmengine.git@main docker exec mmaction pip install -U openmim docker exec mmaction mim install 'mmcv >= 2.0.0rc1' - docker exec mmaction pip install git+https://git@github.com/open-mmlab/mmdetection.git@3.x - docker exec mmaction pip install git+https://github.com/open-mmlab/mmclassification.git@1.x + docker exec mmaction pip install git+https://git@github.com/open-mmlab/mmdetection.git@dev-3.x + docker exec mmaction pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x docker exec mmaction pip install -r requirements.txt - when: condition: diff --git a/.github/workflows/merge_stage_test.yml b/.github/workflows/merge_stage_test.yml index a89d78367c..22a6390700 100644 --- a/.github/workflows/merge_stage_test.yml +++ b/.github/workflows/merge_stage_test.yml @@ -58,9 +58,9 @@ jobs: pip install -U openmim mim install 'mmcv >= 2.0.0rc1' - name: Install MMDet - run: pip install git+https://github.com/open-mmlab/mmdetection.git@3.x + run: pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x - name: Install MMCls - run: pip install git+https://github.com/open-mmlab/mmclassification.git@1.x + run: pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x - name: Install PytorchVideo run: pip install pytorchvideo if: ${{matrix.torchvision == '0.10.0'}} @@ -126,9 +126,9 @@ jobs: pip install -U openmim mim install 'mmcv >= 2.0.0rc1' - name: Install MMDet - run: pip install git+https://github.com/open-mmlab/mmdetection.git@3.x + run: pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x - name: Install MMCls - run: pip install git+https://github.com/open-mmlab/mmclassification.git@1.x + run: pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x - name: Install unittest dependencies run: pip install -r requirements.txt - name: Install PytorchVideo @@ -189,8 +189,8 @@ jobs: pip install git+https://github.com/open-mmlab/mmengine.git@main pip install -U openmim mim install 'mmcv >= 2.0.0rc1' - pip install git+https://github.com/open-mmlab/mmdetection.git@3.x - pip install git+https://github.com/open-mmlab/mmclassification.git@1.x + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x pip install -r requirements.txt - name: Install PytorchVideo run: python -m pip install pytorchvideo @@ -226,8 +226,8 @@ jobs: pip install git+https://github.com/open-mmlab/mmengine.git@main pip install -U openmim mim install 'mmcv >= 2.0.0rc1' - pip install git+https://github.com/open-mmlab/mmdetection.git@3.x - pip install git+https://github.com/open-mmlab/mmclassification.git@1.x + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x pip install -r requirements.txt - name: Install PytorchVideo run: python -m pip install pytorchvideo diff --git a/.github/workflows/pr_stage_test.yml b/.github/workflows/pr_stage_test.yml index b14578dd93..74c1145b5c 100644 --- a/.github/workflows/pr_stage_test.yml +++ b/.github/workflows/pr_stage_test.yml @@ -48,9 +48,9 @@ jobs: pip install -U openmim mim install 'mmcv >= 2.0.0rc1' - name: Install MMDet - run: pip install git+https://github.com/open-mmlab/mmdetection.git@3.x + run: pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x - name: Install MMCls - run: pip install git+https://github.com/open-mmlab/mmclassification.git@1.x + run: pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x - name: Install unittest dependencies run: pip install -r requirements.txt - name: Install PytorchVideo @@ -113,8 +113,8 @@ jobs: pip install git+https://github.com/open-mmlab/mmengine.git@main pip install -U openmim mim install 'mmcv >= 2.0.0rc1' - pip install git+https://github.com/open-mmlab/mmdetection.git@3.x - pip install git+https://github.com/open-mmlab/mmclassification.git@1.x + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x pip install -r requirements.txt - name: Install PytorchVideo run: python -m pip install pytorchvideo @@ -145,7 +145,7 @@ jobs: - name: Upgrade pip run: | python -V - pip install pip --upgrade + python -m pip install pip --upgrade - name: Install librosa and soundfile run: python -m pip install librosa soundfile - name: Install lmdb @@ -159,8 +159,8 @@ jobs: pip install git+https://github.com/open-mmlab/mmengine.git@main pip install -U openmim mim install 'mmcv >= 2.0.0rc1' - pip install git+https://github.com/open-mmlab/mmdetection.git@3.x - pip install git+https://github.com/open-mmlab/mmclassification.git@1.x + pip install git+https://github.com/open-mmlab/mmdetection.git@dev-3.x + pip install git+https://github.com/open-mmlab/mmclassification.git@dev-1.x pip install -r requirements.txt - name: Install PytorchVideo run: python -m pip install pytorchvideo From b75ca202a144a7b4cae51b36b3cd8744ac031365 Mon Sep 17 00:00:00 2001 From: wxDai Date: Fri, 21 Oct 2022 16:17:50 +0800 Subject: [PATCH 05/57] [Fix] fix recognizer3d ut (#1988) --- configs/_base_/models/i3d_r50.py | 5 +- configs/_base_/models/r2plus1d_r34.py | 13 +- configs/_base_/models/slowfast_r50.py | 13 +- configs/_base_/models/slowonly_r50.py | 13 +- mmaction/datasets/transforms/formatting.py | 16 +- .../data_preprocessors/data_preprocessor.py | 46 +-- mmaction/models/recognizers/recognizer3d.py | 20 +- mmaction/structures/action_data_sample.py | 18 + tests/models/base.py | 3 +- tests/models/recognizers/test_recognizer3d.py | 320 ++++-------------- 10 files changed, 140 insertions(+), 327 deletions(-) diff --git a/configs/_base_/models/i3d_r50.py b/configs/_base_/models/i3d_r50.py index bb43133b34..fb55b9b06f 100644 --- a/configs/_base_/models/i3d_r50.py +++ b/configs/_base_/models/i3d_r50.py @@ -25,9 +25,6 @@ type='ActionDataPreprocessor', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], - format_shape='NCTHW'), - # model training and testing settings - train_cfg=None, - test_cfg=None) + format_shape='NCTHW')) # This setting refers to https://github.com/open-mmlab/mmaction/blob/master/mmaction/models/tenons/backbones/resnet_i3d.py#L329-L332 # noqa: E501 diff --git a/configs/_base_/models/r2plus1d_r34.py b/configs/_base_/models/r2plus1d_r34.py index 24323c72ed..86db7df22f 100644 --- a/configs/_base_/models/r2plus1d_r34.py +++ b/configs/_base_/models/r2plus1d_r34.py @@ -1,7 +1,3 @@ -preprocess_cfg = dict( - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - format_shape='NCTHW') # model settings model = dict( type='Recognizer3D', @@ -28,7 +24,8 @@ dropout_ratio=0.5, init_std=0.01, average_clips='prob'), - data_preprocessor=dict(type='ActionDataPreprocessor', **preprocess_cfg), - # model training and testing settings - train_cfg=None, - test_cfg=None) + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + format_shape='NCTHW')) diff --git a/configs/_base_/models/slowfast_r50.py b/configs/_base_/models/slowfast_r50.py index 714f8160cd..6e4ec69103 100644 --- a/configs/_base_/models/slowfast_r50.py +++ b/configs/_base_/models/slowfast_r50.py @@ -1,7 +1,3 @@ -preprocess_cfg = dict( - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - format_shape='NCTHW') # model settings model = dict( type='Recognizer3D', @@ -39,7 +35,8 @@ spatial_type='avg', dropout_ratio=0.5, average_clips='prob'), - data_preprocessor=dict(type='ActionDataPreprocessor', **preprocess_cfg), - # model training and testing settings - train_cfg=None, - test_cfg=None) + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + format_shape='NCTHW')) diff --git a/configs/_base_/models/slowonly_r50.py b/configs/_base_/models/slowonly_r50.py index 2b563426c0..7566d5afc8 100644 --- a/configs/_base_/models/slowonly_r50.py +++ b/configs/_base_/models/slowonly_r50.py @@ -1,8 +1,3 @@ -preprocess_cfg = dict( - mean=[123.675, 116.28, 103.53], - std=[58.395, 57.12, 57.375], - format_shape='NCTHW') - model = dict( type='Recognizer3D', backbone=dict( @@ -22,6 +17,8 @@ spatial_type='avg', dropout_ratio=0.5, average_clips='prob'), - data_preprocessor=dict(type='ActionDataPreprocessor', **preprocess_cfg), - train_cfg=None, - test_cfg=None) + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + format_shape='NCTHW')) diff --git a/mmaction/datasets/transforms/formatting.py b/mmaction/datasets/transforms/formatting.py index 1608391d45..84d0e899eb 100644 --- a/mmaction/datasets/transforms/formatting.py +++ b/mmaction/datasets/transforms/formatting.py @@ -1,4 +1,6 @@ # Copyright (c) OpenMMLab. All rights reserved. +from typing import Sequence + import numpy as np import torch from mmcv.transforms import BaseTransform, to_tensor @@ -23,20 +25,24 @@ class PackActionInputs(BaseTransform): 'gt_labels': 'labels', } - def __init__(self, - meta_keys=('img_shape', 'img_key', 'video_id', 'timestamp')): + def __init__( + self, + meta_keys: Sequence[str] = ('img_shape', 'img_key', 'video_id', + 'timestamp') + ) -> None: self.meta_keys = meta_keys def transform(self, results: dict) -> dict: """Method to pack the input data. + Args: results (dict): Result dict from the data pipeline. Returns: dict: - - 'inputs' (Tensor): The forward data of models. - - 'data_sample' (ActionDataSample): The annotation info of the - sample. + - 'inputs' (torch.Tensor): The forward data of models. + - 'data_sample' (:obj:`ActionDataSample`): The annotation + info of the sample. """ packed_results = dict() if 'imgs' in results: diff --git a/mmaction/models/data_preprocessors/data_preprocessor.py b/mmaction/models/data_preprocessors/data_preprocessor.py index 4a2886d586..e41f32be74 100644 --- a/mmaction/models/data_preprocessors/data_preprocessor.py +++ b/mmaction/models/data_preprocessors/data_preprocessor.py @@ -1,11 +1,10 @@ # Copyright (c) OpenMMLab. All rights reserved. -from typing import Sequence, Union +from typing import Optional, Sequence, Union import torch from mmengine.model import BaseDataPreprocessor, stack_batch from mmaction.registry import MODELS -from mmaction.utils import OptConfigType @MODELS.register_module() @@ -14,26 +13,27 @@ class ActionDataPreprocessor(BaseDataPreprocessor): Args: mean (Sequence[float or int, optional): The pixel mean of channels - of images or stacked optical flow. Default: None. + of images or stacked optical flow. Defaults to None. std (Sequence[float or int], optional): The pixel standard deviation - of channels of images or stacked optical flow. Default: None. + of channels of images or stacked optical flow. Defaults to None. pad_size_divisor (int): The size of padded image should be - divisible by ``pad_size_divisor``. Default: 1. - pad_value (float or int): The padded pixel value. Default: 0. + divisible by ``pad_size_divisor``. Defaults to 1. + pad_value (float or int): The padded pixel value. Defaults to 0. to_rgb (bool): Whether to convert image from BGR to RGB. - Default: False. - blending (dict or ConfigDict, optional): Config for batch blending. - Default: None. - format_shape (str): Format shape of input data. Default: 'NCHW'. + Defaults to False. + blending (dict, optional): Config for batch blending. + Defaults to None. + format_shape (str): Format shape of input data. + Defaults to ``'NCHW'``. """ def __init__(self, - mean: Sequence[Union[float, int]] = None, - std: Sequence[Union[float, int]] = None, + mean: Optional[Sequence[Union[float, int]]] = None, + std: Optional[Sequence[Union[float, int]]] = None, pad_size_divisor: int = 1, pad_value: Union[float, int] = 0, to_rgb: bool = False, - blending: OptConfigType = None, + blending: Optional[dict] = None, format_shape: str = 'NCHW') -> None: super().__init__() self.pad_size_divisor = pad_size_divisor @@ -54,12 +54,14 @@ def __init__(self, else: raise ValueError(f'Invalid format shape: {format_shape}') - self.register_buffer('mean', - torch.tensor(mean).view(normalizer_shape), - False) - self.register_buffer('std', - torch.tensor(std).view(normalizer_shape), - False) + self.register_buffer( + 'mean', + torch.tensor(mean, dtype=torch.float32).view(normalizer_shape), + False) + self.register_buffer( + 'std', + torch.tensor(std, dtype=torch.float32).view(normalizer_shape), + False) else: self._enable_normalize = False @@ -77,10 +79,10 @@ def forward(self, data: Sequence[dict], training: bool = False) -> tuple: training (bool): Whether to enable training time augmentation. Returns: - Tuple[Tensor, list]: Data in the same format as the model - input. + Tuple[torch.Tensor, list]: Data in the same format as the model + input. """ - data = super().forward(data) + data = self.cast_data(data) inputs, data_samples = data['inputs'], data['data_samples'] # --- Pad and stack -- diff --git a/mmaction/models/recognizers/recognizer3d.py b/mmaction/models/recognizers/recognizer3d.py index d972e46e5f..9de211d618 100644 --- a/mmaction/models/recognizers/recognizer3d.py +++ b/mmaction/models/recognizers/recognizer3d.py @@ -3,7 +3,7 @@ from torch import Tensor from mmaction.registry import MODELS -from mmaction.utils import SampleList +from mmaction.utils import OptSampleList from .base import BaseRecognizer @@ -14,20 +14,20 @@ class Recognizer3D(BaseRecognizer): def extract_feat(self, inputs: Tensor, stage: str = 'neck', - data_samples: SampleList = None, + data_samples: OptSampleList = None, test_mode: bool = False) -> tuple: """Extract features of different stages. Args: - inputs (Tensor): The input data. + inputs (torch.Tensor): The input data. stage (str): Which stage to output the feature. - Defaults to ``neck``. - data_samples (List[:obj:`ActionDataSample`]): Action data + Defaults to ``'neck'``. + data_samples (list[:obj:`ActionDataSample`], optional): Action data samples, which are only needed in training. Defaults to None. - test_mode: (bool): Whether in test mode. Defaults to False. + test_mode (bool): Whether in test mode. Defaults to False. Returns: - Tensor: The extracted features. + torch.Tensor: The extracted features. dict: A dict recording the kwargs for downstream pipeline. These keys are usually included: ``loss_aux``. @@ -46,7 +46,7 @@ def extract_feat(self, # 4) `num_clips` in `SampleFrames` or its subclass if `clip_len != 1` inputs = inputs.view((-1, ) + inputs.shape[2:]) - # Check settings of test. + # Check settings of test if test_mode: if self.test_cfg is not None: loss_predict_kwargs['fcn_test'] = self.test_cfg.get( @@ -86,7 +86,7 @@ def extract_feat(self, return x, loss_predict_kwargs else: - # Return features extracted through backbone. + # Return features extracted through backbone x = self.backbone(inputs) if stage == 'backbone': return x, loss_predict_kwargs @@ -95,7 +95,7 @@ def extract_feat(self, if self.with_neck: x, loss_aux = self.neck(x, data_samples=data_samples) - # Return features extracted through neck. + # Return features extracted through neck loss_predict_kwargs['loss_aux'] = loss_aux if stage == 'neck': return x, loss_predict_kwargs diff --git a/mmaction/structures/action_data_sample.py b/mmaction/structures/action_data_sample.py index a33ba8c01f..7de628d993 100644 --- a/mmaction/structures/action_data_sample.py +++ b/mmaction/structures/action_data_sample.py @@ -1,9 +1,27 @@ # Copyright (c) OpenMMLab. All rights reserved. +from typing import Union + +import numpy as np +import torch from mmengine.structures import BaseDataElement, InstanceData, LabelData class ActionDataSample(BaseDataElement): + def set_gt_labels(self, value: Union[int, + np.ndarray]) -> 'ActionDataSample': + """Set label of ``gt_labels``.""" + if isinstance(value, int): + value = torch.LongTensor([value]) + elif isinstance(value, np.ndarray): + value = torch.from_numpy(value) + else: + raise TypeError(f'Type {type(value)} is not an ' + f'available label type.') + + self.gt_labels = LabelData(item=value) + return self + @property def gt_labels(self): return self._gt_labels diff --git a/tests/models/base.py b/tests/models/base.py index 50a19becd3..4d74f531da 100644 --- a/tests/models/base.py +++ b/tests/models/base.py @@ -21,7 +21,7 @@ def generate_backbone_demo_inputs(input_shape=(1, 3, 64, 64)): Args: input_shape (tuple): input batch dimensions. - Default: (1, 3, 64, 64). + Defaults to ``(1, 3, 64, 64)``. """ imgs = np.random.random(input_shape) imgs = torch.FloatTensor(imgs) @@ -29,6 +29,7 @@ def generate_backbone_demo_inputs(input_shape=(1, 3, 64, 64)): return imgs +# TODO Remove this API def generate_recognizer_demo_inputs( input_shape=(1, 3, 3, 224, 224), model_type='2D'): """Create a superset of inputs needed to run test or train batches. diff --git a/tests/models/recognizers/test_recognizer3d.py b/tests/models/recognizers/test_recognizer3d.py index 6f503ede63..090c5ea6d1 100644 --- a/tests/models/recognizers/test_recognizer3d.py +++ b/tests/models/recognizers/test_recognizer3d.py @@ -1,16 +1,39 @@ # Copyright (c) OpenMMLab. All rights reserved. +from unittest.mock import MagicMock + import torch from mmaction.registry import MODELS +from mmaction.structures import ActionDataSample from mmaction.utils import register_all_modules -from ..base import generate_recognizer_demo_inputs, get_recognizer_cfg +from ..base import get_recognizer_cfg + + +def train_test_step(cfg, input_shape): + recognizer = MODELS.build(cfg.model) + num_classes = cfg.model.cls_head.num_classes + data_batch = { + 'inputs': [torch.randint(0, 256, input_shape)], + 'data_samples': [ActionDataSample().set_gt_labels(2)] + } + # test train_step + optim_wrapper = MagicMock() + loss_vars = recognizer.train_step(data_batch, optim_wrapper) + assert 'loss' in loss_vars + assert 'loss_cls' in loss_vars + optim_wrapper.update_params.assert_called_once() + + # test test_step + with torch.no_grad(): + predictions = recognizer.test_step(data_batch) + score = predictions[0].pred_scores.item + assert len(predictions) == 1 + assert score.shape, torch.Size([num_classes]) + assert torch.min(score) >= 0 + assert torch.max(score) <= 1 -def assert_output(x): - if not isinstance(x, torch.Tensor): - assert isinstance(x, tuple) - for i in x: - assert isinstance(i, torch.Tensor) + return loss_vars, predictions def test_i3d(): @@ -19,55 +42,8 @@ def test_i3d(): 'i3d/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb.py') config.model['backbone']['pretrained2d'] = False config.model['backbone']['pretrained'] = None - - recognizer = MODELS.build(config.model) - - input_shape = (1, 3, 3, 8, 32, 32) - demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D') - - imgs = demo_inputs['imgs'] - gt_labels = demo_inputs['gt_labels'] - - # parrots 3dconv is only implemented on gpu - if torch.__version__ == 'parrots': - if torch.cuda.is_available(): - recognizer = recognizer.cuda() - imgs = imgs.cuda() - gt_labels = gt_labels.cuda() - losses = recognizer(imgs, gt_labels) - assert isinstance(losses, dict) - - # Test forward test - with torch.no_grad(): - img_list = [img[None, :] for img in imgs] - for one_img in img_list: - recognizer(one_img, None, return_loss=False) - - # Test forward gradcam - recognizer(imgs, gradcam=True) - for one_img in img_list: - recognizer(one_img, gradcam=True) - - # Test forward dummy - recognizer.forward_dummy(imgs, softmax=False) - res = recognizer.forward_dummy(imgs, softmax=True)[0] - assert torch.min(res) >= 0 - assert torch.max(res) <= 1 - - else: - losses = recognizer(imgs, gt_labels) - assert_output(losses) - - # Test forward test - with torch.no_grad(): - img_list = [img[None, :] for img in imgs] - for one_img in img_list: - recognizer(one_img, None, return_loss=False) - - # Test forward gradcam - recognizer(imgs, gradcam=True) - for one_img in img_list: - recognizer(one_img, gradcam=True) + input_shape = (1, 3, 8, 64, 64) # M C T H W + train_test_step(config, input_shape=input_shape) def test_r2plus1d(): @@ -77,107 +53,16 @@ def test_r2plus1d(): config.model['backbone']['pretrained2d'] = False config.model['backbone']['pretrained'] = None config.model['backbone']['norm_cfg'] = dict(type='BN3d') - - recognizer = MODELS.build(config.model) - - input_shape = (1, 3, 3, 8, 32, 32) - demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D') - - imgs = demo_inputs['imgs'] - gt_labels = demo_inputs['gt_labels'] - - # parrots 3dconv is only implemented on gpu - if torch.__version__ == 'parrots': - if torch.cuda.is_available(): - recognizer = recognizer.cuda() - imgs = imgs.cuda() - gt_labels = gt_labels.cuda() - losses = recognizer(imgs, gt_labels) - assert isinstance(losses, dict) - - # Test forward test - with torch.no_grad(): - img_list = [img[None, :] for img in imgs] - for one_img in img_list: - recognizer(one_img, None, return_loss=False) - - # Test forward gradcam - recognizer(imgs, gradcam=True) - for one_img in img_list: - recognizer(one_img, gradcam=True) - else: - losses = recognizer(imgs, gt_labels) - assert_output(losses) - - # Test forward test - with torch.no_grad(): - img_list = [img[None, :] for img in imgs] - for one_img in img_list: - recognizer(one_img, None, return_loss=False) - - # Test forward gradcam - recognizer(imgs, gradcam=True) - for one_img in img_list: - recognizer(one_img, gradcam=True) + input_shape = (1, 3, 8, 64, 64) # M C T H W + train_test_step(config, input_shape=input_shape) def test_slowfast(): register_all_modules() config = get_recognizer_cfg( 'slowfast/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb.py') - - recognizer = MODELS.build(config.model) - - input_shape = (1, 3, 3, 16, 32, 32) - demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D') - - imgs = demo_inputs['imgs'] - gt_labels = demo_inputs['gt_labels'] - - # parrots 3dconv is only implemented on gpu - if torch.__version__ == 'parrots': - if torch.cuda.is_available(): - recognizer = recognizer.cuda() - imgs = imgs.cuda() - gt_labels = gt_labels.cuda() - losses = recognizer(imgs, gt_labels) - assert isinstance(losses, dict) - - # Test forward test - with torch.no_grad(): - img_list = [img[None, :] for img in imgs] - for one_img in img_list: - recognizer(one_img, None, return_loss=False) - - # Test forward gradcam - recognizer(imgs, gradcam=True) - for one_img in img_list: - recognizer(one_img, gradcam=True) - else: - losses = recognizer(imgs, gt_labels) - assert_output(losses) - - # Test forward test - with torch.no_grad(): - img_list = [img[None, :] for img in imgs] - for one_img in img_list: - recognizer(one_img, None, return_loss=False) - - # Test forward gradcam - recognizer(imgs, gradcam=True) - for one_img in img_list: - recognizer(one_img, gradcam=True) - - # Test the feature max_testing_views - if config.model.test_cfg is None: - config.model.test_cfg = {'max_testing_views': 1} - else: - config.model.test_cfg['max_testing_views'] = 1 - recognizer = MODELS.build(config.model) - with torch.no_grad(): - img_list = [img[None, :] for img in imgs] - for one_img in img_list: - recognizer(one_img, None, return_loss=False) + input_shape = (1, 3, 16, 64, 64) # M C T H W + train_test_step(config, input_shape=input_shape) def test_csn(): @@ -186,77 +71,8 @@ def test_csn(): 'csn/ircsn_ig65m-pretrained-r152_8xb12-32x2x1-58e_kinetics400-rgb.py') config.model['backbone']['pretrained2d'] = False config.model['backbone']['pretrained'] = None - - recognizer = MODELS.build(config.model) - - input_shape = (1, 3, 3, 8, 32, 32) - demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D') - - imgs = demo_inputs['imgs'] - gt_labels = demo_inputs['gt_labels'] - - # parrots 3dconv is only implemented on gpu - if torch.__version__ == 'parrots': - if torch.cuda.is_available(): - recognizer = recognizer.cuda() - imgs = imgs.cuda() - gt_labels = gt_labels.cuda() - losses = recognizer(imgs, gt_labels) - assert isinstance(losses, dict) - - # Test forward test - with torch.no_grad(): - img_list = [img[None, :] for img in imgs] - for one_img in img_list: - recognizer(one_img, None, return_loss=False) - - # Test forward gradcam - recognizer(imgs, gradcam=True) - for one_img in img_list: - recognizer(one_img, gradcam=True) - else: - losses = recognizer(imgs, gt_labels) - assert_output(losses) - - # Test forward test - with torch.no_grad(): - img_list = [img[None, :] for img in imgs] - for one_img in img_list: - recognizer(one_img, None, return_loss=False) - - # Test forward gradcam - recognizer(imgs, gradcam=True) - for one_img in img_list: - recognizer(one_img, gradcam=True) - - -def test_tpn(): - register_all_modules() - config = get_recognizer_cfg( - 'tpn/tpn-slowonly_r50_8xb8-8x8x1-150e_kinetics400-rgb.py') - config.model['backbone']['pretrained'] = None - - recognizer = MODELS.build(config.model) - - input_shape = (1, 8, 3, 1, 32, 32) - demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D') - - imgs = demo_inputs['imgs'] - gt_labels = demo_inputs['gt_labels'] - - losses = recognizer(imgs, gt_labels) - assert_output(losses) - - # Test forward test - with torch.no_grad(): - img_list = [img[None, :] for img in imgs] - for one_img in img_list: - recognizer(one_img, None, return_loss=False) - - # Test forward gradcam - recognizer(imgs, gradcam=True) - for one_img in img_list: - recognizer(one_img, gradcam=True) + input_shape = (1, 3, 8, 64, 64) # M C T H W + train_test_step(config, input_shape=input_shape) def test_timesformer(): @@ -265,28 +81,8 @@ def test_timesformer(): 'timesformer/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb.py') config.model['backbone']['pretrained'] = None config.model['backbone']['img_size'] = 32 - - recognizer = MODELS.build(config.model) - - input_shape = (1, 3, 3, 8, 32, 32) - demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D') - - imgs = demo_inputs['imgs'] - gt_labels = demo_inputs['gt_labels'] - - losses = recognizer(imgs, gt_labels) - assert_output(losses) - - # Test forward test - with torch.no_grad(): - img_list = [img[None, :] for img in imgs] - for one_img in img_list: - recognizer(one_img, None, return_loss=False) - - # Test forward gradcam - recognizer(imgs, gradcam=True) - for one_img in img_list: - recognizer(one_img, gradcam=True) + input_shape = (1, 3, 8, 32, 32) # M C T H W + train_test_step(config, input_shape=input_shape) def test_c3d(): @@ -295,25 +91,27 @@ def test_c3d(): 'c3d/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb.py') config.model['backbone']['pretrained'] = None config.model['backbone']['out_dim'] = 512 + input_shape = (1, 3, 16, 28, 28) # M C T H W + train_test_step(config, input_shape=input_shape) - recognizer = MODELS.build(config.model) - - input_shape = (1, 3, 3, 16, 28, 28) - demo_inputs = generate_recognizer_demo_inputs(input_shape, '3D') - - imgs = demo_inputs['imgs'] - gt_labels = demo_inputs['gt_labels'] - losses = recognizer(imgs, gt_labels) - assert_output(losses) +def test_slowonly(): + register_all_modules() + config = get_recognizer_cfg( + 'slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb.py') + config.model['backbone']['pretrained2d'] = False + config.model['backbone']['pretrained'] = None + input_shape = (1, 3, 4, 32, 32) # M C T H W + train_test_step(config, input_shape=input_shape) - # Test forward test - with torch.no_grad(): - img_list = [img[None, :] for img in imgs] - for one_img in img_list: - recognizer(one_img, None, return_loss=False) - # Test forward gradcam - recognizer(imgs, gradcam=True) - for one_img in img_list: - recognizer(one_img, gradcam=True) +def test_tpn_slowonly(): + register_all_modules() + config = get_recognizer_cfg('tpn/tpn-slowonly_imagenet-pretrained-r50_' + '8xb8-8x8x1-150e_kinetics400-rgb.py') + config.model['backbone']['pretrained2d'] = False + config.model['backbone']['pretrained'] = None + input_shape = (1, 3, 4, 48, 48) # M C T H W + loss_vars, _ = train_test_step(config, input_shape=input_shape) + assert 'loss_aux' in loss_vars + assert loss_vars['loss_cls'] + loss_vars['loss_aux'] == loss_vars['loss'] From 93932d920b2728a66e8bffe9825ad824e627fdfc Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Fri, 21 Oct 2022 04:21:02 -0400 Subject: [PATCH 06/57] fix slowonly (#2006) --- configs/recognition/slowonly/README.md | 18 +++++++++--------- configs/recognition/slowonly/metafile.yml | 6 +++--- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/configs/recognition/slowonly/README.md b/configs/recognition/slowonly/README.md index 64c9c3d7bd..0e0dc178e1 100644 --- a/configs/recognition/slowonly/README.md +++ b/configs/recognition/slowonly/README.md @@ -22,20 +22,20 @@ We present SlowFast networks for video recognition. Our model involves (i) a Slo | frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | inference time(video/s) | gpu_mem(M) | config | ckpt | log | | :---------------------: | :--------------: | :------------: | :--: | :------------------: | :------: | :------: | :------: | :---------------: | :---------------------: | :--------: | :----------------: | :--------------: | :-------------: | -| 4x16x1 | Linear+Cosine | short-side 320 | 8 | ResNet50 | None | 72.68 | 90.68 | 10 clips x 3 crop | x | 5799 | [config](/configs/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb/slowonly_r50_4x16x1_256e_8xb16_kinetics400_rgb_20220901-f6a40d08.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb/slowonly_r50_4x16x1_256e_8xb16_kinetics400_rgb.log) | -| 8x8x1 | Linear+Cosine | short-side 320 | 8 | ResNet50 | None | 74.82 | 91.80 | 10 clips x 3 crop | x | 11089 | [config](/configs/recognition/slowonly/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb_20220901-2132fc87.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb.log) | -| 8x8x1 | Linear+Cosine | short-side 320 | 8 | ResNet101 | None | 76.28 | 92.70 | 10 clips x 3 crop | x | 16516 | [config](/configs/recognition/slowonly/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb_20220901-e6281431.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb.log) | -| 4x16x1 | Linear+MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 74.83 | 91.60 | 10 clips x 3 crop | x | 5797 | [config](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb_20220901-e7b65fad.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb.log) | -| 8x8x1 | Linear+MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 75.96 | 92.40 | 10 clips x 3 crop | x | 11089 | [config](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb_20220901-df42dc84.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb.log) | -| 4x16x1 | Linear+MultiStep | short-side 320 | 8 | ResNet50 (NonLocalEmbedGauss) | ImageNet | 74.84 | 91.41 | 10 clips x 3 crop | x | 8198 | [config](/configs/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb_20220901-cf739c75.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb.log) | -| 8x8x1 | Linear+MultiStep | short-side 320 | 8 | ResNet50 (NonLocalEmbedGauss) | ImageNet | 76.35 | 92.18 | 10 clips x 3 crop | x | 11089 | [config](/configs/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb_20220901-df42dc84.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb.log) | +| 4x16x1 | Linear+Cosine | short-side 320 | 8 | ResNet50 | None | 72.97 | 90.88 | 10 clips x 3 crop | x | 5799 | [config](/configs/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb/slowonly_r50_4x16x1_256e_8xb16_kinetics400-rgb_20220901-f6a40d08.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb/slowonly_r50_4x16x1_256e_8xb16_kinetics400-rgb.log) | +| 8x8x1 | Linear+Cosine | short-side 320 | 8 | ResNet50 | None | 75.15 | 92.11 | 10 clips x 3 crop | x | 11089 | [config](/configs/recognition/slowonly/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb_20220901-2132fc87.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb.log) | +| 8x8x1 | Linear+Cosine | short-side 320 | 8 | ResNet101 | None | 76.59 | 92.80 | 10 clips x 3 crop | x | 16516 | [config](/configs/recognition/slowonly/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb_20220901-e6281431.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb.log) | +| 4x16x1 | Linear+MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 75.12 | 91.72 | 10 clips x 3 crop | x | 5797 | [config](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb_20220901-e7b65fad.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb.log) | +| 8x8x1 | Linear+MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 76.45 | 92.55 | 10 clips x 3 crop | x | 11089 | [config](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb_20220901-df42dc84.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb.log) | +| 4x16x1 | Linear+MultiStep | short-side 320 | 8 | ResNet50 (NonLocalEmbedGauss) | ImageNet | 75.07 | 91.69 | 10 clips x 3 crop | x | 8198 | [config](/configs/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb_20220901-cf739c75.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb.log) | +| 8x8x1 | Linear+MultiStep | short-side 320 | 8 | ResNet50 (NonLocalEmbedGauss) | ImageNet | 76.65 | 92.47 | 10 clips x 3 crop | x | 17087 | [config](/configs/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb_20220901-df42dc84.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb.log) | ### Kinetics-700 | frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | inference time(video/s) | gpu_mem(M) | config | ckpt | log | | :---------------------: | :--------------: | :------------: | :--: | :------: | :------: | :------: | :------: | :---------------: | :---------------------: | :--------: | :--------------------: | :------------------: | :-----------------: | -| 4x16x1 | Linear+MultiStep | short-side 320 | 8x2 | ResNet50 | ImageNet | 65.23 | 86.14 | 10 clips x 3 crop | x | 5797 | [config](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb_20221013-98b1b0a7.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb.log) | -| 8x8x1 | Linear+MultiStep | short-side 320 | 8x2 | ResNet50 | ImageNet | 67.36 | 87.64 | 10 clips x 3 crop | x | 11089 | [config](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb_20221013-15b93b10.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb.log) | +| 4x16x1 | Linear+MultiStep | short-side 320 | 8x2 | ResNet50 | ImageNet | 65.52 | 86.39 | 10 clips x 3 crop | x | 5826 | [config](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb_20221013-98b1b0a7.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb.log) | +| 8x8x1 | Linear+MultiStep | short-side 320 | 8x2 | ResNet50 | ImageNet | 67.67 | 87.80 | 10 clips x 3 crop | x | 11089 | [config](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb_20221013-15b93b10.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb.log) | Note: diff --git a/configs/recognition/slowonly/metafile.yml b/configs/recognition/slowonly/metafile.yml index a3b2cf4bc9..0e3f284bff 100644 --- a/configs/recognition/slowonly/metafile.yml +++ b/configs/recognition/slowonly/metafile.yml @@ -3,7 +3,7 @@ Collections: README: configs/recognition/slowonly/README.md Paper: URL: https://arxiv.org/abs/1812.03982 - Title: 'Slowonly Networks for Video Recognition' + Title: 'SlowFast Networks for Video Recognition' Models: - Name: slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb @@ -26,8 +26,8 @@ Models: Metrics: Top 1 Accuracy: 72.68 Top 5 Accuracy: 90.68 - Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb/slowonly_r50_4x16x1_256e_8xb16_kinetics400_rgb.log - Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb/slowonly_r50_4x16x1_256e_8xb16_kinetics400_rgb_20220901-f6a40d08.pth + Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb/slowonly_r50_4x16x1_256e_8xb16_kinetics400-rgb.log + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb/slowonly_r50_4x16x1_256e_8xb16_kinetics400-rgb_20220901-f6a40d08.pth - Name: slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb Config: configs/recognition/slowonly/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb.py From 908af8a16f2165feb074f78855bd31009094dd8b Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Mon, 24 Oct 2022 23:54:35 -0400 Subject: [PATCH 07/57] [Feature] Support VideoMAE (#1942) --- configs/recognition/videomae/README.md | 63 +++ configs/recognition/videomae/metafile.yml | 43 ++ ...6_videomae-k400-pre_16x4x1_kinetics-400.py | 61 +++ ...6_videomae-k400-pre_16x4x1_kinetics-400.py | 6 + mmaction/datasets/transforms/loading.py | 23 +- mmaction/models/backbones/__init__.py | 3 +- mmaction/models/backbones/vit_mae.py | 376 ++++++++++++++++++ tests/models/backbones/test_vit_mae.py | 32 ++ tools/deployment/publish_model.py | 2 +- 9 files changed, 596 insertions(+), 13 deletions(-) create mode 100644 configs/recognition/videomae/README.md create mode 100644 configs/recognition/videomae/metafile.yml create mode 100644 configs/recognition/videomae/vit-base-p16_videomae-k400-pre_16x4x1_kinetics-400.py create mode 100644 configs/recognition/videomae/vit-large-p16_videomae-k400-pre_16x4x1_kinetics-400.py create mode 100644 mmaction/models/backbones/vit_mae.py create mode 100644 tests/models/backbones/test_vit_mae.py diff --git a/configs/recognition/videomae/README.md b/configs/recognition/videomae/README.md new file mode 100644 index 0000000000..0172b19ca8 --- /dev/null +++ b/configs/recognition/videomae/README.md @@ -0,0 +1,63 @@ +# VideoMAE + +[VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) + + + +## Abstract + + + +Pre-training video transformers on extra large-scale datasets is generally required to achieve premier performance on relatively small datasets. In this paper, we show that video masked autoencoders (VideoMAE) are data-efficient learners for self-supervised video pre-training (SSVP). We are inspired by the recent ImageMAE and propose customized video tube masking with an extremely high ratio. This simple design makes video reconstruction a more challenging self-supervision task, thus encouraging extracting more effective video representations during this pre-training process. We obtain three important findings on SSVP: (1) An extremely high proportion of masking ratio (i.e., 90% to 95%) still yields favorable performance of VideoMAE. The temporally redundant video content enables a higher masking ratio than that of images. (2) VideoMAE achieves impressive results on very small datasets (i.e., around 3k-4k videos) without using any extra data. (3) VideoMAE shows that data quality is more important than data quantity for SSVP. Domain shift between pre-training and target datasets is an important issue. Notably, our VideoMAE with the vanilla ViT can achieve 85.8% on Kinetics-400, 75.3% on Something-Something V2, 90.8% on UCF101, and 61.1% on HMDB51, without using any extra data. + + + +
+ +
+ +## Results and Models + +### Kinetics-400 + +| frame sampling strategy | resolution | backbone | top1 acc | top5 acc | reference top1 acc | reference top5 acc | testing protocol | FLOPs | params | config | ckpt | +| :---------------------: | :------------: | :------: | :------: | :------: | :--------------------------------: | :--------------------------------: | :---------------: | :---: | :----: | :--------------------: | :-------------------: | +| 16x4x1 | short-side 320 | ViT-B | 81.3 | 95.0 | 81.5 \[[VideoMAE](https://github.com/MCG-NJU/VideoMAE/blob/main/MODEL_ZOO.md)\] | 95.1 \[[VideoMAE](https://github.com/MCG-NJU/VideoMAE/blob/main/MODEL_ZOO.md)\] | 5 clips x 3 crops | 180G | 87M | [config](/configs/recognition/videomae/vit-base-p16_videomae-k400-pre_16x4x1_kinetics-400.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/videomae/vit-base-p16_videomae-k400-pre_16x4x1_kinetics-400_20221013-860a3cd3.pth) \[1\] | +| 16x4x1 | short-side 320 | ViT-L | 85.3 | 96.7 | 85.2 \[[VideoMAE](https://github.com/MCG-NJU/VideoMAE/blob/main/MODEL_ZOO.md)\] | 96.8 \[[VideoMAE](https://github.com/MCG-NJU/VideoMAE/blob/main/MODEL_ZOO.md)\] | 5 clips x 3 crops | 597G | 305M | [config](/configs/recognition/videomae/vit-large-p16_videomae-k400-pre_16x4x1_kinetics-400.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/videomae/vit-large-p16_videomae-k400-pre_16x4x1_kinetics-400_20221013-229dbb03.pth) \[1\] | + +\[1\] The models are ported from the repo [VideoMAE](https://github.com/MCG-NJU/VideoMAE) and tested on our data. Currently, we only support the testing of VideoMAE models, training will be available soon. + +1. The values in columns named after "reference" are the results of the original repo. +2. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available. + +For more details on data preparation, you can refer to [preparing_kinetics](/tools/data/kinetics/README.md). + +## Test + +You can use the following command to test a model. + +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test ViT-base model on Kinetics-400 dataset and dump the result to a pkl file. + +```shell +python tools/test.py configs/recognition/videomae/vit-base-p16_videomae-k400-pre_16x4x1_kinetics-400.py \ + checkpoints/SOME_CHECKPOINT.pth --dump result.pkl +``` + +For more details, you can refer to the **Test** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). + +## Citation + +```BibTeX +@misc{feichtenhofer2020x3d, + title={X3D: Expanding Architectures for Efficient Video Recognition}, + author={Christoph Feichtenhofer}, + year={2020}, + eprint={2004.04730}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` diff --git a/configs/recognition/videomae/metafile.yml b/configs/recognition/videomae/metafile.yml new file mode 100644 index 0000000000..ce3700fa1a --- /dev/null +++ b/configs/recognition/videomae/metafile.yml @@ -0,0 +1,43 @@ +Collections: +- Name: VideoMAE + README: configs/recognition/videomae/README.md + Paper: + URL: https://arxiv.org/abs/2203.12602 + Title: "VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training" + +Models: + - Name: vit-base-p16_videomae-k400-pre_16x4x1_kinetics-400 + Config: configs/recognition/videomae/vit-base-p16_videomae-k400-pre_16x4x1_kinetics-400.py + In Collection: VideoMAE + Metadata: + Architecture: ViT-B + Resolution: short-side 320 + Modality: RGB + Converted From: + Weights: https://github.com/MCG-NJU/VideoMAE/blob/main/MODEL_ZOO.md + Code: https://github.com/MCG-NJU/VideoMAE/ + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 81.3 + Top 5 Accuracy: 95.0 + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/videomae/vit-base-p16_videomae-k400-pre_16x4x1_kinetics-400_20221013-860a3cd3.pth + + - Name: vit-large-p16_videomae-k400-pre_16x4x1_kinetics-400 + Config: configs/recognition/videomae/vit-large-p16_videomae-k400-pre_16x4x1_kinetics-400.py + In Collection: VideoMAE + Metadata: + Architecture: ViT-L + Resolution: short-side 320 + Modality: RGB + Converted From: + Weights: https://github.com/MCG-NJU/VideoMAE/blob/main/MODEL_ZOO.md + Code: https://github.com/MCG-NJU/VideoMAE/ + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 85.3 + Top 5 Accuracy: 96.7 + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/videomae/vit-large-p16_videomae-k400-pre_16x4x1_kinetics-400_20221013-229dbb03.pth diff --git a/configs/recognition/videomae/vit-base-p16_videomae-k400-pre_16x4x1_kinetics-400.py b/configs/recognition/videomae/vit-base-p16_videomae-k400-pre_16x4x1_kinetics-400.py new file mode 100644 index 0000000000..d6f6e26a5f --- /dev/null +++ b/configs/recognition/videomae/vit-base-p16_videomae-k400-pre_16x4x1_kinetics-400.py @@ -0,0 +1,61 @@ +_base_ = ['../../_base_/default_runtime.py'] + +# model settings +model = dict( + type='Recognizer3D', + backbone=dict( + type='VisionTransformer', + img_size=224, + patch_size=16, + embed_dims=768, + depth=12, + num_heads=12, + mlp_ratio=4, + qkv_bias=True, + num_frames=16, + norm_cfg=dict(type='LN', eps=1e-6)), + cls_head=dict( + type='TimeSformerHead', + num_classes=400, + in_channels=768, + average_clips='prob'), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + format_shape='NCTHW')) + +# dataset settings +dataset_type = 'VideoDataset' +data_root_val = 'data/kinetics400/videos_val' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=16, + frame_interval=4, + num_clips=5, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 224)), + dict(type='ThreeCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +test_evaluator = dict(type='AccMetric') +test_cfg = dict(type='TestLoop') diff --git a/configs/recognition/videomae/vit-large-p16_videomae-k400-pre_16x4x1_kinetics-400.py b/configs/recognition/videomae/vit-large-p16_videomae-k400-pre_16x4x1_kinetics-400.py new file mode 100644 index 0000000000..1733dc289f --- /dev/null +++ b/configs/recognition/videomae/vit-large-p16_videomae-k400-pre_16x4x1_kinetics-400.py @@ -0,0 +1,6 @@ +_base_ = ['vit-base_videomae-k400-pre_16x4x1_kinetics-400.py'] + +# model settings +model = dict( + backbone=dict(embed_dims=1024, depth=24, num_heads=16), + cls_head=dict(in_channels=1024)) diff --git a/mmaction/datasets/transforms/loading.py b/mmaction/datasets/transforms/loading.py index 427271d6da..8bcdcd2db1 100644 --- a/mmaction/datasets/transforms/loading.py +++ b/mmaction/datasets/transforms/loading.py @@ -174,9 +174,7 @@ def _get_train_clips(self, num_frames): def _get_test_clips(self, num_frames): """Get clip offsets in test mode. - Calculate the average interval for selected frames, and shift them - fixedly by avg_interval/2. If set twice_sample True, it will sample - frames together without fixed shift. If the total number of frames is + If the total number of frames is not enough, it will return all zero indices. Args: @@ -185,15 +183,18 @@ def _get_test_clips(self, num_frames): Returns: np.ndarray: Sampled frame indices in test mode. """ - ori_clip_len = self.clip_len * self.frame_interval - avg_interval = (num_frames - ori_clip_len + 1) / float(self.num_clips) - if num_frames > ori_clip_len - 1: - base_offsets = np.arange(self.num_clips) * avg_interval - clip_offsets = (base_offsets + avg_interval / 2.0).astype(np.int32) - if self.twice_sample: - clip_offsets = np.concatenate([clip_offsets, base_offsets]) + k = 2 if self.twice_sample else 1 + num_clips = self.num_clips * k + ori_clip_len = (self.clip_len - 1) * self.frame_interval + 1 + max_offset = max(num_frames - ori_clip_len, 0) + + if num_clips > 1: + num_segments = num_clips - 1 + offset_between = max_offset / float(num_segments) + clip_offsets = np.arange(num_clips) * offset_between + clip_offsets = np.round(clip_offsets).astype(np.int32) else: - clip_offsets = np.zeros((self.num_clips, ), dtype=np.int32) + clip_offsets = np.array([max_offset // 2]) return clip_offsets def _sample_clips(self, num_frames): diff --git a/mmaction/models/backbones/__init__.py b/mmaction/models/backbones/__init__.py index ce197a9c23..373185ee4a 100644 --- a/mmaction/models/backbones/__init__.py +++ b/mmaction/models/backbones/__init__.py @@ -16,11 +16,12 @@ from .swin import SwinTransformer3D from .tanet import TANet from .timesformer import TimeSformer +from .vit_mae import VisionTransformer from .x3d import X3D __all__ = [ 'C3D', 'ResNet', 'ResNet3d', 'ResNetTSM', 'ResNet2Plus1d', 'ResNet3dSlowFast', 'ResNet3dSlowOnly', 'ResNet3dCSN', 'ResNetTIN', 'X3D', 'ResNet3dLayer', 'MobileNetV2TSM', 'MobileNetV2', 'TANet', 'TimeSformer', - 'STGCN', 'AGCN', 'ResNetAudio', 'SwinTransformer3D' + 'STGCN', 'AGCN', 'ResNetAudio', 'SwinTransformer3D', 'VisionTransformer' ] diff --git a/mmaction/models/backbones/vit_mae.py b/mmaction/models/backbones/vit_mae.py new file mode 100644 index 0000000000..c61ac24a70 --- /dev/null +++ b/mmaction/models/backbones/vit_mae.py @@ -0,0 +1,376 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional + +import torch +import torch.nn.functional as F +from mmcv.cnn import build_norm_layer +from mmcv.cnn.bricks import DropPath +from mmcv.cnn.bricks.transformer import FFN, PatchEmbed +from mmengine.logging import MMLogger +from mmengine.model import BaseModule +from mmengine.runner.checkpoint import _load_checkpoint, load_state_dict +from mmengine.utils import to_2tuple +from torch import Tensor, nn + +from mmaction.registry import MODELS +from mmaction.utils import ConfigType, OptConfigType + + +class Attention(BaseModule): + """Multi-head Self-attention. + + Args: + embed_dims (int): Dimensions of embedding. + num_heads (int): Number of parallel attention heads. + qkv_bias (bool): If True, add a learnable bias to q and v. + Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + attn_drop_rate (float): Dropout ratio of attention weight. + Defaults to 0. + drop_rate (float): Dropout ratio of output. Defaults to 0. + init_cfg (dict or ConfigDict, optional): The Config + for initialization. Defaults to None. + """ + + def __init__(self, + embed_dims: int, + num_heads: int = 8, + qkv_bias: bool = True, + qk_scale: Optional[float] = None, + attn_drop_rate: float = 0., + drop_rate: float = 0., + init_cfg: OptConfigType = None, + **kwargs) -> None: + super().__init__(init_cfg=init_cfg) + self.embed_dims = embed_dims + self.num_heads = num_heads + head_embed_dims = embed_dims // num_heads + + self.scale = qk_scale or head_embed_dims**-0.5 + + if qkv_bias: + self._init_qv_bias() + + self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=False) + self.attn_drop = nn.Dropout(attn_drop_rate) + self.proj = nn.Linear(embed_dims, embed_dims) + self.proj_drop = nn.Dropout(drop_rate) + + def _init_qv_bias(self) -> None: + self.q_bias = nn.Parameter(torch.zeros(self.embed_dims)) + self.v_bias = nn.Parameter(torch.zeros(self.embed_dims)) + + def forward(self, x: Tensor) -> Tensor: + """Defines the computation performed at every call. + + Args: + x (Tensor): The input data with size of (B, N, C). + Returns: + Tensor: The output of the attention block, same size as inputs. + """ + B, N, C = x.shape + + if hasattr(self, 'q_bias'): + k_bias = torch.zeros_like(self.v_bias, requires_grad=False) + qkv_bias = torch.cat((self.q_bias, k_bias, self.v_bias)) + qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) + else: + qkv = self.qkv(x) + + qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + + q = q * self.scale + attn = q @ k.transpose(-2, -1) + + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, -1) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(BaseModule): + """The basic block in the Vision Transformer. + + Args: + embed_dims (int): Dimensions of embedding. + num_heads (int): Number of parallel attention heads. + mlp_ratio (int): The ratio between the hidden layer and the + input layer in the FFN. Defaults to 4. + qkv_bias (bool): If True, add a learnable bias to q and v. + Defaults to True. + qk_scale (float): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + drop_rate (float): Dropout ratio of output. Defaults to 0. + attn_drop_rate (float): Dropout ratio of attention weight. + Defaults to 0. + drop_path_rate (float): Dropout ratio of the residual branch. + Defaults to 0. + init_values (float): Value to init the multiplier of the + residual branch. Defaults to 0. + act_cfg (dict or ConfigDict): Config for activation layer in FFN. + Defaults to `dict(type='GELU')`. + norm_cfg (dict or ConfigDict): Config for norm layers. + Defaults to `dict(type='LN', eps=1e-6)`. + init_cfg (dict or ConfigDict, optional): The Config + for initialization. Defaults to None. + """ + + def __init__(self, + embed_dims: int, + num_heads: int, + mlp_ratio: int = 4., + qkv_bias: bool = True, + qk_scale: Optional[float] = None, + drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + init_values: float = 0.0, + act_cfg: ConfigType = dict(type='GELU'), + norm_cfg: ConfigType = dict(type='LN', eps=1e-6), + init_cfg: OptConfigType = None, + **kwargs) -> None: + super().__init__(init_cfg=init_cfg) + self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1] + self.attn = Attention( + embed_dims, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop_rate=attn_drop_rate, + drop_rate=drop_rate) + + self.drop_path = nn.Identity() + if drop_path_rate > 0.: + self.drop_path = DropPath(drop_path_rate) + self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1] + + mlp_hidden_dim = int(embed_dims * mlp_ratio) + self.mlp = FFN( + embed_dims=embed_dims, + feedforward_channels=mlp_hidden_dim, + act_cfg=act_cfg, + ffn_drop=drop_rate, + add_identity=False) + + self._init_gammas(init_values, embed_dims) + + def _init_gammas(self, init_values: float, dim: int) -> None: + if type(init_values) == float and init_values > 0: + self.gamma_1 = nn.Parameter( + init_values * torch.ones(dim), requires_grad=True) + self.gamma_2 = nn.Parameter( + init_values * torch.ones(dim), requires_grad=True) + + def forward(self, x: Tensor) -> Tensor: + """Defines the computation performed at every call. + + Args: + x (Tensor): The input data with size of (B, N, C). + Returns: + Tensor: The output of the transformer block, same size as inputs. + """ + if hasattr(self, 'gamma_1'): + x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) + x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) + else: + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +def get_sinusoid_encoding(n_position: int, embed_dims: int) -> Tensor: + """Generate sinusoid encoding table. + + Sinusoid encoding is a kind of relative position encoding method came from + `Attention Is All You Need`_. + Args: + n_position (int): The length of the input token. + embed_dims (int): The position embedding dimension. + Returns: + :obj:`torch.FloatTensor`: The sinusoid encoding table of size + (1, n_position, embed_dims) + """ + + vec = torch.arange(embed_dims, dtype=torch.float64) + vec = (vec - vec % 2) / embed_dims + vec = torch.pow(10000, -vec).view(1, -1) + + sinusoid_table = torch.arange(n_position).view(-1, 1) * vec + sinusoid_table[:, 0::2].sin_() # dim 2i + sinusoid_table[:, 1::2].cos_() # dim 2i+1 + + sinusoid_table = sinusoid_table.to(torch.float32) + + return sinusoid_table.unsqueeze(0) + + +@MODELS.register_module() +class VisionTransformer(BaseModule): + """Vision Transformer with support for patch or hybrid CNN input stage. An + impl of `VideoMAE: Masked Autoencoders are Data-Efficient Learners for + Self-Supervised Video Pre-Training `_ + + Args: + img_size (int or tuple): Size of input image. + Defaults to 224. + patch_size (int): Spatial size of one patch. Defaults to 16. + in_channels (int): The number of channels of he input. + Defaults to 3. + embed_dims (int): Dimensions of embedding. Defaults to 768. + depth (int): number of blocks in the transformer. + Defaults to 12. + num_heads (int): Number of parallel attention heads in + TransformerCoder. Defaults to 12. + mlp_ratio (int): The ratio between the hidden layer and the + input layer in the FFN. Defaults to 4. + qkv_bias (bool): If True, add a learnable bias to q and v. + Defaults to True. + qk_scale (float, optional): Override default qk scale of + ``head_dim ** -0.5`` if set. Defaults to None. + drop_rate (float): Dropout ratio of output. Defaults to 0. + attn_drop_rate (float): Dropout ratio of attention weight. + Defaults to 0. + drop_path_rate (float): Dropout ratio of the residual branch. + Defaults to 0. + norm_cfg (dict or Configdict): Config for norm layers. + Defaults to `dict(type='LN', eps=1e-6)`. + init_values (float): Value to init the multiplier of the residual + branch. Defaults to 0. + use_learnable_pos_emb (bool): If True, use learnable positional + embedding, othersize use sinusoid encoding. Defaults to False. + num_frames (int): Number of frames in the video. Defaults to 16. + tubelet_size (int): Temporal size of one patch. Defaults to 2. + use_mean_pooling (bool): If True, take the mean pooling over all + positions. Defaults to True. + init_cfg (dict or Configdict, optional): The Config for initialization. + Defaults to None. + pretrained (str, optional): Name of pretrained model. Default: None. + """ + + def __init__(self, + img_size: int = 224, + patch_size: int = 16, + in_channels: int = 3, + embed_dims: int = 768, + depth: int = 12, + num_heads: int = 12, + mlp_ratio: int = 4., + qkv_bias: bool = True, + qk_scale: int = None, + drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0., + norm_cfg: ConfigType = dict(type='LN', eps=1e-6), + init_values: int = 0., + use_learnable_pos_emb: bool = False, + num_frames: int = 16, + tubelet_size: int = 2, + use_mean_pooling: int = True, + init_cfg: Optional[ConfigType] = None, + pretrained: Optional[str] = None, + **kwargs) -> None: + super().__init__(init_cfg=init_cfg) + self.pretrained = pretrained + patch_size = to_2tuple(patch_size) + img_size = to_2tuple(img_size) + + self.patch_embed = PatchEmbed( + in_channels=in_channels, + embed_dims=embed_dims, + conv_type='Conv3d', + kernel_size=(tubelet_size, ) + patch_size, + stride=(tubelet_size, ) + patch_size, + padding=(0, 0, 0), + dilation=(1, 1, 1)) + + num_patches = (img_size[1] // patch_size[1]) * \ + (img_size[0] // patch_size[0]) * \ + (num_frames // tubelet_size) + + if use_learnable_pos_emb: + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches, embed_dims)) + nn.init.trunc_normal_(self.pos_embed, std=.02) + else: + # sine-cosine positional embeddings is on the way + pos_embed = get_sinusoid_encoding(num_patches, embed_dims) + self.register_buffer('pos_embed', pos_embed) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth decay rule + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] + + self.blocks = nn.ModuleList([ + Block( + embed_dims=embed_dims, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop_rate=drop_rate, + attn_drop_rate=attn_drop_rate, + drop_path_rate=dpr[i], + norm_cfg=norm_cfg, + init_values=init_values) for i in range(depth) + ]) + + if use_mean_pooling: + self.norm = nn.Identity() + self.fc_norm = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + self.fc_norm = None + + def init_weights(self) -> None: + """Initiate the parameters either from existing checkpoint or from + scratch.""" + + if isinstance(self.pretrained, str): + logger = MMLogger.get_current_instance() + logger.info(f'load model from: {self.pretrained}') + + state_dict = _load_checkpoint(self.pretrained) + if 'state_dict' in state_dict: + state_dict = state_dict['state_dict'] + load_state_dict(self, state_dict, strict=False, logger=logger) + elif self.pretrained is None: + for m in self.modules(): + if isinstance(m, nn.Linear): + nn.init.trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + else: + raise TypeError('pretrained must be a str or None') + + def forward(self, x: Tensor) -> Tensor: + """Defines the computation performed at every call. + + Args: + x (Tensor): The input data. + Returns: + Tensor: The feature of the input + samples extracted by the backbone. + """ + x = self.patch_embed(x)[0] + B, _, _ = x.size() + + x = x + self.pos_embed + x = self.pos_drop(x) + + for blk in self.blocks: + x = blk(x) + + x = self.norm(x) + if self.fc_norm is not None: + return self.fc_norm(x.mean(1)) + else: + return x[:, 0] diff --git a/tests/models/backbones/test_vit_mae.py b/tests/models/backbones/test_vit_mae.py new file mode 100644 index 0000000000..f1e575977a --- /dev/null +++ b/tests/models/backbones/test_vit_mae.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmaction.models import VisionTransformer + + +def test_vit_backbone(): + """Test vit backbone.""" + x = torch.randn(1, 3, 8, 64, 64) + model = VisionTransformer( + img_size=64, + num_frames=8, + qkv_bias=True, + drop_path_rate=0.2, + init_values=0.1) + model.init_weights() + + assert model(x).shape == torch.Size([1, 768]) + model.eval() + assert model(x).shape == torch.Size([1, 768]) + + model = VisionTransformer( + img_size=64, + num_frames=8, + use_learnable_pos_emb=True, + drop_rate=0.1, + use_mean_pooling=False) + model.init_weights() + + assert model(x).shape == torch.Size([1, 768]) + model.eval() + assert model(x).shape == torch.Size([1, 768]) diff --git a/tools/deployment/publish_model.py b/tools/deployment/publish_model.py index 931ca7fc7a..e116d3e72b 100644 --- a/tools/deployment/publish_model.py +++ b/tools/deployment/publish_model.py @@ -26,7 +26,7 @@ def process_checkpoint(in_file, out_file): del checkpoint[k] unnecessary_params = ['data_preprocessor.mean', 'data_preprocessor.std'] for k in unnecessary_params: - if k in checkpoint['state_dict']: + if 'state_dict' in checkpoint and k in checkpoint['state_dict']: del checkpoint['state_dict'][k] # if it is necessary to remove some sensitive data in checkpoint['meta'], # add the code here. From 713bee5ad060b74b97b72b6acf4f4061025faf42 Mon Sep 17 00:00:00 2001 From: wxDai Date: Thu, 27 Oct 2022 13:38:16 +0800 Subject: [PATCH 08/57] [Fix] fix swin3d config and readme format (#2010) --- configs/recognition/swin/README.md | 22 ++-- configs/recognition/swin/metafile.yml | 122 ++++++++++++++++++ ...re_8xb8-amp-32x2x1-30e_kinetics400-rgb.py} | 0 ...e_16xb8-amp-32x2x1-30e_kinetics700-rgb.py} | 4 +- ...re_8xb8-amp-32x2x1-30e_kinetics400-rgb.py} | 0 ...re_8xb8-amp-32x2x1-30e_kinetics400-rgb.py} | 0 ...re_8xb8-amp-32x2x1-30e_kinetics400-rgb.py} | 0 model-index.yml | 1 + tests/models/recognizers/test_recognizer3d.py | 10 ++ 9 files changed, 146 insertions(+), 13 deletions(-) create mode 100644 configs/recognition/swin/metafile.yml rename configs/recognition/swin/{swin-base_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py => swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py} (100%) rename configs/recognition/swin/{swin-large_p244-w877-in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py => swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py} (96%) rename configs/recognition/swin/{swin-large_p244-w877-in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py => swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py} (100%) rename configs/recognition/swin/{swin-small_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py => swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py} (100%) rename configs/recognition/swin/{swin-tiny_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py => swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py} (100%) diff --git a/configs/recognition/swin/README.md b/configs/recognition/swin/README.md index 81ae4f288d..c36a47b39e 100644 --- a/configs/recognition/swin/README.md +++ b/configs/recognition/swin/README.md @@ -20,18 +20,18 @@ The vision community is witnessing a modeling shift from CNNs to Transformers, w ### Kinetics-400 -| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | reference top1 acc | reference top1 acc | testing protocol | inference time(video/s) | gpu_mem(M) | params | config | ckpt | log | -| :---------------------: | :-----------: | :--: | :------: | :----------: | :------: | :------: | :--------------------: | :--------------------: | :--------------: | :---------------------: | :--------: | :----: | :--------: | :------: | :-----: | -| 32x2x1 | short-side 320 | 8 | Swin-T | ImageNet-1k | 78.29 | 93.58 | [78.46](https://github.com/SwinTransformer/Video-Swin-Transformer/blob/master/configs/recognition/swin/swin_tiny_patch244_window877_kinetics400_1k.py) | [93.46](https://github.com/SwinTransformer/Video-Swin-Transformer/blob/master/configs/recognition/swin/swin_tiny_patch244_window877_kinetics400_1k.py) | 4 clips x 3 crop | x | 21072 | 28.2M | [config](/configs/recognition/swin/swin-tiny_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-tiny_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-tiny_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-241016b2.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-tiny_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-tiny_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log) | -| 32x2x1 | short-side 320 | 8 | Swin-S | ImageNet-1k | 80.23 | 94.32 | [80.23](https://github.com/SwinTransformer/Video-Swin-Transformer/blob/master/configs/recognition/swin/swin_small_patch244_window877_kinetics400_1k.py) | [94.16](https://github.com/SwinTransformer/Video-Swin-Transformer/blob/master/configs/recognition/swin/swin_small_patch244_window877_kinetics400_1k.py) | 4 clips x 3 crop | x | 33632 | 49.8M | [config](/configs/recognition/swin/swin-small_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-small_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-small_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-e91ab986.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-small_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-small_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log) | -| 32x2x1 | short-side 320 | 8 | Swin-B | ImageNet-1k | 80.21 | 94.32 | [80.27](https://github.com/SwinTransformer/Video-Swin-Transformer/blob/master/configs/recognition/swin/swin_base_patch244_window877_kinetics400_1k.py) | [94.42](https://github.com/SwinTransformer/Video-Swin-Transformer/blob/master/configs/recognition/swin/swin_base_patch244_window877_kinetics400_1k.py) | 4 clips x 3 crop | x | 45143 | 88.0M | [config](/configs/recognition/swin/swin-base_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-base_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-base_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-182ec6cc.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-base_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-base_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log) | -| 32x2x1 | short-side 320 | 8 | Swin-L | ImageNet-22k | 83.15 | 95.76 | 83.1\* | 95.9\* | 4 clips x 3 crop | x | 68881 | 197.0M | [config](/configs/recognition/swin/swin-large_p244-w877-in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large_p244-w877-in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-large_p244-w877-in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-78ad8b11.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large_p244-w877-in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-large_p244-w877-in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log) | +| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | reference top1 acc | reference top1 acc | testing protocol | gpu_mem(M) | FLOPs | params | config | ckpt | log | +| :---------------------: | :------------: | :--: | :------: | :----------: | :------: | :------: | :-----------------------: | :-----------------------: | :---------------: | :--------: | :---: | :----: | :-----------: | :---------: | :---------: | +| 32x2x1 | short-side 320 | 8 | Swin-T | ImageNet-1k | 78.29 | 93.58 | 78.46 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 93.46 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 4 clips x 3 crops | 21072 | 88G | 28.2M | [config](/configs/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-241016b2.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log) | +| 32x2x1 | short-side 320 | 8 | Swin-S | ImageNet-1k | 80.23 | 94.32 | 80.23 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 94.16 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 4 clips x 3 crops | 33632 | 166G | 49.8M | [config](/configs/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-e91ab986.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log) | +| 32x2x1 | short-side 320 | 8 | Swin-B | ImageNet-1k | 80.21 | 94.32 | 80.27 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 94.42 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 4 clips x 3 crops | 45143 | 282G | 88.0M | [config](/configs/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-182ec6cc.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log) | +| 32x2x1 | short-side 320 | 8 | Swin-L | ImageNet-22k | 83.15 | 95.76 | 83.1\* | 95.9\* | 4 clips x 3 crops | 68881 | 604G | 197M | [config](/configs/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-78ad8b11.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log) | ### Kinetics-700 -| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | inference time(video/s) | gpu_mem(M) | params | config | ckpt | log | -| :---------------------: | :------------: | :--: | :------: | :----------: | :------: | :------: | :--------------: | :---------------------: | :--------: | :----: | :----------------------: | :--------------------: | :--------------------: | -| 32x2x1 | short-side 320 | 16 | Swin-L | ImageNet-22k | 75.26 | 92.44 | 4 clips x 3 crop | x | 68898 | 197.4M | [config](/configs/recognition/swin/swin-large_p244-w877-in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large_p244-w877-in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb/swin-large_p244-w877-in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb_20220930-f8d74db7.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large_p244-w877-in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb/swin-large_p244-w877-in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py.log) | +| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | gpu_mem(M) | FLOPs | params | config | ckpt | log | +| :---------------------: | :------------: | :--: | :------: | :----------: | :------: | :------: | :---------------: | :--------: | :---: | :----: | :----------------------------: | :--------------------------: | :-------------------------: | +| 32x2x1 | short-side 320 | 16 | Swin-L | ImageNet-22k | 75.26 | 92.44 | 4 clips x 3 crops | 68898 | 604G | 197M | [config](/configs/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb_20220930-f8d74db7.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py.log) | 1. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size and the original batch size. 2. The values in columns named after "reference" are the results got by testing on our dataset, using the checkpoints provided by the author with same model settings. `*` means that the numbers are copied from the paper. @@ -51,7 +51,7 @@ python tools/train.py ${CONFIG_FILE} [optional arguments] Example: train VideoSwin model on Kinetics-400 dataset in a deterministic option with periodic validation. ```shell -python tools/train.py configs/recognition/swin/swin-tiny_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py \ +python tools/train.py configs/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py \ --cfg-options randomness.seed=0 randomness.deterministic=True ``` @@ -68,7 +68,7 @@ python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] Example: test VideoSwin model on Kinetics-400 dataset and dump the result to a pkl file. ```shell -python tools/test.py configs/recognition/swin/swin-tiny_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py \ +python tools/test.py configs/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py \ checkpoints/SOME_CHECKPOINT.pth --dump result.pkl ``` diff --git a/configs/recognition/swin/metafile.yml b/configs/recognition/swin/metafile.yml new file mode 100644 index 0000000000..b557d6e2ed --- /dev/null +++ b/configs/recognition/swin/metafile.yml @@ -0,0 +1,122 @@ +Collections: +- Name: Swin + README: configs/recognition/swin/README.md + Paper: + URL: https://arxiv.org/abs/2106.13230 + Title: 'Video Swin Transformer' + +Models: + - Name: swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb + Config: configs/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py + In Collection: Swin + Metadata: + Architecture: Swin-T + Batch Size: 8 + Epochs: 30 + FLOPs: 88G + Parameters: 28.2M + Pretrained: ImageNet-1K + Resolution: short-side 320 + Training Data: Kinetics-400 + Training Resources: 8 GPUs + Modality: RGB + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 78.29 + Top 5 Accuracy: 93.58 + Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-241016b2.pth + + - Name: swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb + Config: configs/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py + In Collection: Swin + Metadata: + Architecture: Swin-S + Batch Size: 8 + Epochs: 30 + FLOPs: 166G + Parameters: 49.8M + Pretrained: ImageNet-1K + Resolution: short-side 320 + Training Data: Kinetics-400 + Training Resources: 8 GPUs + Modality: RGB + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 80.23 + Top 5 Accuracy: 94.32 + Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-e91ab986.pth + + - Name: swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb + Config: configs/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py.py + In Collection: Swin + Metadata: + Architecture: Swin-B + Batch Size: 8 + Epochs: 30 + FLOPs: 282G + Parameters: 88.0M + Pretrained: ImageNet-1K + Resolution: short-side 320 + Training Data: Kinetics-400 + Training Resources: 8 GPUs + Modality: RGB + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 80.21 + Top 5 Accuracy: 94.32 + Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-182ec6cc.pth + + - Name: swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb + Config: configs/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py + In Collection: Swin + Metadata: + Architecture: Swin-L + Batch Size: 8 + Epochs: 30 + FLOPs: 604G + Parameters: 197M + Pretrained: ImageNet-22K + Resolution: short-side 320 + Training Data: Kinetics-400 + Training Resources: 8 GPUs + Modality: RGB + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 83.15 + Top 5 Accuracy: 95.76 + Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-78ad8b11.pth + + - Name: swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb + Config: configs/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py + In Collection: Swin + Metadata: + Architecture: Swin-L + Batch Size: 8 + Epochs: 30 + FLOPs: 604G + Parameters: 197M + Pretrained: ImageNet-22K + Resolution: short-side 320 + Training Data: Kinetics-700 + Training Resources: 16 GPUs + Modality: RGB + Results: + - Dataset: Kinetics-700 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 75.26 + Top 5 Accuracy: 92.44 + Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.log + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb_20220930-f8d74db7.pth diff --git a/configs/recognition/swin/swin-base_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py b/configs/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py similarity index 100% rename from configs/recognition/swin/swin-base_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py rename to configs/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py diff --git a/configs/recognition/swin/swin-large_p244-w877-in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py b/configs/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py similarity index 96% rename from configs/recognition/swin/swin-large_p244-w877-in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py rename to configs/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py index a37b8db49e..0fbbb465ec 100644 --- a/configs/recognition/swin/swin-large_p244-w877-in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py +++ b/configs/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py @@ -1,5 +1,5 @@ _base_ = [ - 'swin-large_p244-w877-in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py' + 'swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py' ] model = dict(cls_head=dict(num_classes=700)) @@ -15,7 +15,7 @@ # io_backend='petrel', # path_mapping=dict( # {'data/kinetics700': 's3://openmmlab/datasets/action/Kinetics700'})) -file_client_args = dict(backend='disk') +file_client_args = dict(io_backend='disk') train_pipeline = [ dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), diff --git a/configs/recognition/swin/swin-large_p244-w877-in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py b/configs/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py similarity index 100% rename from configs/recognition/swin/swin-large_p244-w877-in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py rename to configs/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py diff --git a/configs/recognition/swin/swin-small_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py b/configs/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py similarity index 100% rename from configs/recognition/swin/swin-small_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py rename to configs/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py diff --git a/configs/recognition/swin/swin-tiny_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py b/configs/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py similarity index 100% rename from configs/recognition/swin/swin-tiny_p244-w877-in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py rename to configs/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py diff --git a/model-index.yml b/model-index.yml index 61c4a52916..426d148f98 100644 --- a/model-index.yml +++ b/model-index.yml @@ -13,6 +13,7 @@ Import: - configs/recognition/tanet/metafile.yml - configs/recognition/x3d/metafile.yml - configs/recognition/trn/metafile.yml +- configs/recognition/swin/metafile.yml - configs/detection/ava/metafile.yml - configs/detection/acrn/metafile.yml - configs/skeleton/stgcn/metafile.yml diff --git a/tests/models/recognizers/test_recognizer3d.py b/tests/models/recognizers/test_recognizer3d.py index 090c5ea6d1..3b3d6f5df3 100644 --- a/tests/models/recognizers/test_recognizer3d.py +++ b/tests/models/recognizers/test_recognizer3d.py @@ -115,3 +115,13 @@ def test_tpn_slowonly(): loss_vars, _ = train_test_step(config, input_shape=input_shape) assert 'loss_aux' in loss_vars assert loss_vars['loss_cls'] + loss_vars['loss_aux'] == loss_vars['loss'] + + +def test_swin(): + register_all_modules() + config = get_recognizer_cfg('swin/swin-tiny-p244-w877_in1k-pre_' + '8xb8-amp-32x2x1-30e_kinetics400-rgb.py') + config.model['backbone']['pretrained2d'] = False + config.model['backbone']['pretrained'] = None + input_shape = (1, 3, 4, 64, 64) # M C T H W + train_test_step(config, input_shape=input_shape) From a461627e8d0701628c386a4b76258fecd8df18e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Yiqin=20Wang=20=E7=8E=8B=E9=80=B8=E9=92=A6?= Date: Fri, 28 Oct 2022 14:29:05 +0800 Subject: [PATCH 09/57] [Feature] Support C2D (#2022) --- README.md | 6 +- configs/_base_/models/c2d_r50.py | 20 ++++ configs/recognition/c2d/README.md | 80 +++++++++++++ ...nopool_8xb32-8x8x1-100e_kinetics400-rgb.py | 7 ++ ...nopool_8xb32-8x8x1-100e_kinetics400-rgb.py | 106 ++++++++++++++++++ ...k-pre_8xb32-16x4x1-100e_kinetics400-rgb.py | 106 ++++++++++++++++++ ...1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py | 106 ++++++++++++++++++ configs/recognition/c2d/metafile.yml | 99 ++++++++++++++++ mmaction/models/backbones/__init__.py | 3 +- mmaction/models/backbones/c2d.py | 89 +++++++++++++++ model-index.yml | 1 + tests/models/backbones/test_c2d.py | 24 ++++ tests/models/recognizers/test_recognizer3d.py | 9 ++ 13 files changed, 653 insertions(+), 3 deletions(-) create mode 100644 configs/_base_/models/c2d_r50.py create mode 100644 configs/recognition/c2d/README.md create mode 100644 configs/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py create mode 100644 configs/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py create mode 100644 configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb.py create mode 100644 configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py create mode 100644 configs/recognition/c2d/metafile.yml create mode 100644 mmaction/models/backbones/c2d.py create mode 100644 tests/models/backbones/test_c2d.py diff --git a/README.md b/README.md index e041ae50c6..9cc40cb16c 100644 --- a/README.md +++ b/README.md @@ -71,6 +71,7 @@ The 1.x branch works with **PyTorch 1.6+**. ## What's New - (2022-10-11) We support **Video Swin Transformer** on Kinetics400 and additionally train a Swin-L model on Kinetics700 to extract video features for downstream tasks. +- (2022-10-28) We support **C2D** on Kinetics400, achieve 73.57% Top-1 accuracy (higher than 71.8% in the [paper](https://arxiv.org/abs/1711.07971)). **Release**: v1.0.0rc1 was released in 14/10/2022. Please refer to [changelog.md](docs/en/notes/changelog.md) for details and release history. @@ -88,17 +89,18 @@ Please refer to [install.md](https://mmaction2.readthedocs.io/en/1.x/get_started C3D (CVPR'2014) TSN (ECCV'2016) I3D (CVPR'2017) + C2D (CVPR'2018) I3D Non-Local (CVPR'2018) - R(2+1)D (CVPR'2018) + R(2+1)D (CVPR'2018) TRN (ECCV'2018) TSM (ICCV'2019) TSM Non-Local (ICCV'2019) SlowOnly (ICCV'2019) - SlowFast (ICCV'2019) + SlowFast (ICCV'2019) CSN (ICCV'2019) TIN (AAAI'2020) TPN (CVPR'2020) diff --git a/configs/_base_/models/c2d_r50.py b/configs/_base_/models/c2d_r50.py new file mode 100644 index 0000000000..2090530e37 --- /dev/null +++ b/configs/_base_/models/c2d_r50.py @@ -0,0 +1,20 @@ +model = dict( + type='Recognizer3D', + backbone=dict( + type='C2D', + depth=50, + pretrained='https://download.pytorch.org/models/resnet50-11ad3fa6.pth', + norm_eval=False), + cls_head=dict( + type='I3DHead', + num_classes=400, + in_channels=2048, + spatial_type='avg', + dropout_ratio=0.5, + init_std=0.01, + average_clips='prob'), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + format_shape='NCTHW')) diff --git a/configs/recognition/c2d/README.md b/configs/recognition/c2d/README.md new file mode 100644 index 0000000000..22bfd6ed86 --- /dev/null +++ b/configs/recognition/c2d/README.md @@ -0,0 +1,80 @@ +# C2D + + + +[Non-local Neural Networks](https://arxiv.org/abs/1711.07971) + +## Abstract + + + +Both convolutional and recurrent operations are building blocks that process one local neighborhood at a time. In this paper, we present non-local operations as a generic family of building blocks for capturing long-range dependencies. Inspired by the classical non-local means method in computer vision, our non-local operation computes the response at a position as a weighted sum of the features at all positions. This building block can be plugged into many computer vision architectures. On the task of video classification, even without any bells and whistles, our non-local models can compete or outperform current competition winners on both Kinetics and Charades datasets. In static image recognition, our non-local models improve object detection/segmentation and pose estimation on the COCO suite of tasks. + + + +
+ + +
+ +## Results and Models + +### Kinetics-400 + +| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | reference top1 acc | reference top5 acc | testing protocol | gpu_mem(M) | FLOPs | params | config | ckpt | log | +| :---------------------: | :-------: | :------------: | :--: | :-----------: | :------: | :------: | :------: | :---------------------: | :---------------------: | :----------------: | :--------: | :---: | :----: | :---------: | :-------: | :------: | +| 8x8x1 | MultiStep | short-side 320 | 8 | ResNet50
| ImageNet | 73.16 | 90.88 | 67.2
[\[PySlowFast\]](https://github.com/facebookresearch/SlowFast/blob/main/MODEL_ZOO.md#kinetics-400-and-600) | 87.8
[\[PySlowFast\]](https://github.com/facebookresearch/SlowFast/blob/main/MODEL_ZOO.md#kinetics-400-and-600) | 10 clips x 3 crops | 21547 | 33G | 24.3M | [config](/configs/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb_20221027-e0227b22.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.log) | +| 8x8x1 | MultiStep | short-side 320 | 8 | ResNet101
| ImageNet | 74.57 | 91.60 | x | x | 10 clips x 3 crops | 31836 | 63G | 43.3M | [config](/configs/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb_20221027-557bd8bc.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.log) | +| 8x8x1 | MultiStep | short-side 320 | 8 | ResNet50
(TemporalPool) | ImageNet | 73.57 | 90.96 | 71.9
[\[Non-Local\]](https://github.com/facebookresearch/video-nonlocal-net#modifications-for-improving-speed) | 90.0
[\[Non-Local\]](https://github.com/facebookresearch/video-nonlocal-net#modifications-for-improving-speed) | 10 clips x 3 crops | 17006 | 19G | 24.3M | [config](/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb_20221027-3ca304fa.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.log) | +| 16x4x1 | MultiStep | short-side 320 | 8 | ResNet50
(TemporalPool) | ImageNet | 74.54 | 91.76 | x | x | 10 clips x 3 crops | 33630 | 39G | 24.3M | [config](/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb_20221027-5f382a43.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb.log) | + +1. The values in columns named after "reference" are the results reported in the original repo, using the same model settings. +2. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size and the original batch size. +3. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available. + +For more details on data preparation, you can refer to [preparing_kinetics](/tools/data/kinetics/README.md). + +## Train + +You can use the following command to train a model. + +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Example: train C2D model on Kinetics-400 dataset in a deterministic option with periodic validation. + +```shell +python tools/train.py configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py \ + --cfg-options randomness.seed=0 randomness.deterministic=True +``` + +For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). + +## Test + +You can use the following command to test a model. + +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test C2D model on Kinetics-400 dataset and dump the result to a pkl file. + +```shell +python tools/test.py configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py \ + checkpoints/SOME_CHECKPOINT.pth --dump result.pkl +``` + +For more details, you can refer to the **Test** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). + +## Citation + +```BibTeX +@article{XiaolongWang2017NonlocalNN, + title={Non-local Neural Networks}, + author={Xiaolong Wang and Ross Girshick and Abhinav Gupta and Kaiming He}, + journal={arXiv: Computer Vision and Pattern Recognition}, + year={2017} +} +``` diff --git a/configs/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py b/configs/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py new file mode 100644 index 0000000000..1445203526 --- /dev/null +++ b/configs/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py @@ -0,0 +1,7 @@ +_base_ = ['c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py'] + +model = dict( + backbone=dict( + pretrained=('https://download.pytorch.org/' + 'models/resnet101-cd907fc2.pth'), + depth=101)) diff --git a/configs/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py b/configs/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py new file mode 100644 index 0000000000..13795ffc00 --- /dev/null +++ b/configs/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py @@ -0,0 +1,106 @@ +_base_ = [ + '../../_base_/models/tsn_r50.py', '../../_base_/schedules/sgd_100e.py', + '../../_base_/default_runtime.py' +] + +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' + +# file_client_args = dict( +# io_backend='petrel', +# path_mapping=dict( +# {'data/kinetics400': 's3://openmmlab/datasets/action/Kinetics400'})) +file_client_args = dict(io_backend='disk') + +train_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 224)), + dict(type='ThreeCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +default_hooks = dict(checkpoint=dict(interval=3, max_keep_ckpts=3)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (32 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=256) diff --git a/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb.py b/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb.py new file mode 100644 index 0000000000..4247cd8d9c --- /dev/null +++ b/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb.py @@ -0,0 +1,106 @@ +_base_ = [ + '../../_base_/models/c2d_r50.py', '../../_base_/schedules/sgd_100e.py', + '../../_base_/default_runtime.py' +] + +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' + +# file_client_args = dict( +# io_backend='petrel', +# path_mapping=dict( +# {'data/kinetics400': 's3://openmmlab/datasets/action/Kinetics400'})) +file_client_args = dict(io_backend='disk') + +train_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='SampleFrames', clip_len=16, frame_interval=4, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=16, + frame_interval=4, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=16, + frame_interval=4, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 224)), + dict(type='ThreeCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +default_hooks = dict(checkpoint=dict(interval=3, max_keep_ckpts=3)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (32 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=256) diff --git a/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py b/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py new file mode 100644 index 0000000000..b2ca2c707e --- /dev/null +++ b/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py @@ -0,0 +1,106 @@ +_base_ = [ + '../../_base_/models/c2d_r50.py', '../../_base_/schedules/sgd_100e.py', + '../../_base_/default_runtime.py' +] + +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' + +# file_client_args = dict( +# io_backend='petrel', +# path_mapping=dict( +# {'data/kinetics400': 's3://openmmlab/datasets/action/Kinetics400'})) +file_client_args = dict(io_backend='disk') + +train_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 224)), + dict(type='ThreeCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +default_hooks = dict(checkpoint=dict(interval=3, max_keep_ckpts=3)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (32 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=256) diff --git a/configs/recognition/c2d/metafile.yml b/configs/recognition/c2d/metafile.yml new file mode 100644 index 0000000000..6d6dd65488 --- /dev/null +++ b/configs/recognition/c2d/metafile.yml @@ -0,0 +1,99 @@ +Collections: +- Name: c2d + README: configs/recognition/c2d/README.md + Paper: + URL: https://arxiv.org/abs/1711.07971 + Title: 'Non-local Neural Networks' + +Models: + - Name: c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb + Config: configs/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py + In Collection: C2D + Metadata: + Architecture: ResNet50 + Batch Size: 32 + Epochs: 100 + FLOPs: 33G + Parameters: 24.3M + Pretrained: ImageNet + Resolution: short-side 320 + Training Data: Kinetics-400 + Training Resources: 8 GPUs + Modality: RGB + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 73.16 + Top 5 Accuracy: 90.88 + Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.log + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb_20221027-e0227b22.pth + + - Name: c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb + Config: configs/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py + In Collection: C2D + Metadata: + Architecture: ResNet101 + Batch Size: 32 + Epochs: 100 + FLOPs: 63G + Parameters: 43.3M + Pretrained: ImageNet + Resolution: short-side 320 + Training Data: Kinetics-400 + Training Resources: 8 GPUs + Modality: RGB + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 74.57 + Top 5 Accuracy: 91.60 + Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.log + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb_20221027-557bd8bc.pth + + - Name: c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb + Config: configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py + In Collection: C2D + Metadata: + Architecture: ResNet50 + Batch Size: 32 + Epochs: 100 + FLOPs: 19G + Parameters: 24.3M + Pretrained: ImageNet + Resolution: short-side 320 + Training Data: Kinetics-400 + Training Resources: 8 GPUs + Modality: RGB + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 73.57 + Top 5 Accuracy: 90.96 + Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.log + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb_20221027-3ca304fa.pth + + - Name: c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb + Config: configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb + In Collection: C2D + Metadata: + Architecture: ResNet50 + Batch Size: 32 + Epochs: 100 + FLOPs: 39G + Parameters: 24.3M + Pretrained: ImageNet + Resolution: short-side 320 + Training Data: Kinetics-400 + Training Resources: 8 GPUs + Modality: RGB + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 74.54 + Top 5 Accuracy: 91.76 + Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb.log + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb_20221027-5f382a43.pth diff --git a/mmaction/models/backbones/__init__.py b/mmaction/models/backbones/__init__.py index 373185ee4a..30301b2b28 100644 --- a/mmaction/models/backbones/__init__.py +++ b/mmaction/models/backbones/__init__.py @@ -1,5 +1,6 @@ # Copyright (c) OpenMMLab. All rights reserved. from .agcn import AGCN +from .c2d import C2D from .c3d import C3D from .mobilenet_v2 import MobileNetV2 from .mobilenet_v2_tsm import MobileNetV2TSM @@ -20,7 +21,7 @@ from .x3d import X3D __all__ = [ - 'C3D', 'ResNet', 'ResNet3d', 'ResNetTSM', 'ResNet2Plus1d', + 'C2D', 'C3D', 'ResNet', 'ResNet3d', 'ResNetTSM', 'ResNet2Plus1d', 'ResNet3dSlowFast', 'ResNet3dSlowOnly', 'ResNet3dCSN', 'ResNetTIN', 'X3D', 'ResNet3dLayer', 'MobileNetV2TSM', 'MobileNetV2', 'TANet', 'TimeSformer', 'STGCN', 'AGCN', 'ResNetAudio', 'SwinTransformer3D', 'VisionTransformer' diff --git a/mmaction/models/backbones/c2d.py b/mmaction/models/backbones/c2d.py new file mode 100644 index 0000000000..9f86bddb6b --- /dev/null +++ b/mmaction/models/backbones/c2d.py @@ -0,0 +1,89 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple, Union + +import torch +import torch.nn as nn +from mmcv.cnn import ConvModule + +from mmaction.models.backbones.resnet import ResNet +from mmaction.registry import MODELS + + +@MODELS.register_module() +class C2D(ResNet): + """C2D backbone. + + Compared to ResNet-50, a temporal-pool is added after the first + bottleneck. Detailed structure is kept same as "video-nonlocal-net" repo. + Please refer to https://github.com/facebookresearch/video-nonlocal-net/blob + /main/scripts/run_c2d_baseline_400k.sh. + Please note that there are some improvements compared to "Non-local Neural + Networks" paper (https://arxiv.org/abs/1711.07971). + Differences are noted at https://github.com/facebookresearch/video-nonlocal + -net#modifications-for-improving-speed. + """ + + def _make_stem_layer(self) -> None: + """Construct the stem layers consists of a conv+norm+act module and a + pooling layer.""" + self.conv1 = ConvModule( + self.in_channels, + 64, + kernel_size=7, + stride=2, + padding=3, + bias=False, + conv_cfg=self.conv_cfg, + norm_cfg=self.norm_cfg, + act_cfg=self.act_cfg) + self.maxpool3d_1 = nn.MaxPool3d( + kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 0, 0)) + self.maxpool3d_2 = nn.MaxPool3d( + kernel_size=(2, 1, 1), stride=(2, 1, 1), padding=(0, 0, 0)) + + def forward(self, x: torch.Tensor) \ + -> Union[torch.Tensor, Tuple[torch.Tensor]]: + """Defines the computation performed at every call. + + Args: + x (torch.Tensor): The input data. + + Returns: + Union[torch.Tensor or Tuple[torch.Tensor]]: The feature of the + input samples extracted by the backbone. + """ + + batches = x.shape[0] + + def _convert_to_2d(x: torch.Tensor) -> torch.Tensor: + """(N, C, T, H, W) -> (N x T, C, H, W)""" + x = x.permute((0, 2, 1, 3, 4)) + x = x.reshape(-1, x.shape[2], x.shape[3], x.shape[4]) + return x + + def _convert_to_3d(x: torch.Tensor) -> torch.Tensor: + """(N x T, C, H, W) -> (N, C, T, H, W)""" + x = x.reshape(batches, -1, x.shape[1], x.shape[2], x.shape[3]) + x = x.permute((0, 2, 1, 3, 4)) + return x + + x = _convert_to_2d(x) + x = self.conv1(x) + x = _convert_to_3d(x) + x = self.maxpool3d_1(x) + x = _convert_to_2d(x) + outs = [] + for i, layer_name in enumerate(self.res_layers): + res_layer = getattr(self, layer_name) + x = res_layer(x) + if i == 0: + x = _convert_to_3d(x) + x = self.maxpool3d_2(x) + x = _convert_to_2d(x) + if i in self.out_indices: + x = _convert_to_3d(x) + outs.append(x) + if len(outs) == 1: + return outs[0] + + return tuple(outs) diff --git a/model-index.yml b/model-index.yml index 426d148f98..d1503952ae 100644 --- a/model-index.yml +++ b/model-index.yml @@ -14,6 +14,7 @@ Import: - configs/recognition/x3d/metafile.yml - configs/recognition/trn/metafile.yml - configs/recognition/swin/metafile.yml +- configs/recognition/c2d/metafile.yml - configs/detection/ava/metafile.yml - configs/detection/acrn/metafile.yml - configs/skeleton/stgcn/metafile.yml diff --git a/tests/models/backbones/test_c2d.py b/tests/models/backbones/test_c2d.py new file mode 100644 index 0000000000..b874672ea3 --- /dev/null +++ b/tests/models/backbones/test_c2d.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmaction.models import C2D +from ..base import generate_backbone_demo_inputs + + +def test_c2d_backbone(): + """Test c2d backbone.""" + input_shape = (1, 3, 8, 64, 64) + imgs = generate_backbone_demo_inputs(input_shape) + + # c2d inference test + c2d_r50 = C2D(depth=50) + c2d_r50.init_weights() + c2d_r50.train() + feat = c2d_r50(imgs) + assert feat.shape == torch.Size([1, 2048, 4, 2, 2]) + + c2d_r101 = C2D(depth=101) + c2d_r101.init_weights() + c2d_r101.train() + feat = c2d_r101(imgs) + assert feat.shape == torch.Size([1, 2048, 4, 2, 2]) diff --git a/tests/models/recognizers/test_recognizer3d.py b/tests/models/recognizers/test_recognizer3d.py index 3b3d6f5df3..65f1b21e02 100644 --- a/tests/models/recognizers/test_recognizer3d.py +++ b/tests/models/recognizers/test_recognizer3d.py @@ -125,3 +125,12 @@ def test_swin(): config.model['backbone']['pretrained'] = None input_shape = (1, 3, 4, 64, 64) # M C T H W train_test_step(config, input_shape=input_shape) + + +def test_c2d(): + register_all_modules() + config = get_recognizer_cfg( + 'c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py') + config.model['backbone']['pretrained'] = None + input_shape = (1, 3, 8, 64, 64) # M C T H W + train_test_step(config, input_shape=input_shape) From 7ba5783552bce60dca6fa13cdd95f3e287aeeb6d Mon Sep 17 00:00:00 2001 From: wxDai Date: Tue, 1 Nov 2022 10:32:25 +0800 Subject: [PATCH 10/57] [Fix] Fix tools and mim error (#2028) --- configs/recognition/c2d/metafile.yml | 10 +++++----- tools/analysis_tools/report_accuracy.py | 6 +----- tools/slurm_train.sh | 5 ++--- 3 files changed, 8 insertions(+), 13 deletions(-) diff --git a/configs/recognition/c2d/metafile.yml b/configs/recognition/c2d/metafile.yml index 6d6dd65488..b629e5d55d 100644 --- a/configs/recognition/c2d/metafile.yml +++ b/configs/recognition/c2d/metafile.yml @@ -1,9 +1,9 @@ Collections: -- Name: c2d - README: configs/recognition/c2d/README.md - Paper: - URL: https://arxiv.org/abs/1711.07971 - Title: 'Non-local Neural Networks' + - Name: C2D + README: configs/recognition/c2d/README.md + Paper: + URL: https://arxiv.org/abs/1711.07971 + Title: 'Non-local Neural Networks' Models: - Name: c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb diff --git a/tools/analysis_tools/report_accuracy.py b/tools/analysis_tools/report_accuracy.py index 767eee4810..3516c4b06e 100644 --- a/tools/analysis_tools/report_accuracy.py +++ b/tools/analysis_tools/report_accuracy.py @@ -34,8 +34,7 @@ def parse_args(): def main(): args = parse_args() assert len(args.preds) == len(args.coefficients) - data_sample_list = args.preds - data_sample_list = [load(f) for f in data_sample_list] + data_sample_list = [load(f) for f in args.preds] score_list = [] for data_samples in data_sample_list: scores = [ @@ -54,9 +53,6 @@ def apply_softmax(scores): score_list = [apply_softmax(scores) for scores in score_list] weighted_scores = get_weighted_score(score_list, args.coefficients) - # data = open(args.datalist).readlines() - # labels = [int(x.strip().split()[-1]) for x in data] - mean_class_acc = mean_class_accuracy(weighted_scores, labels) top_1_acc, top_5_acc = top_k_accuracy(weighted_scores, labels, (1, 5)) print(f'Mean Class Accuracy: {mean_class_acc:.04f}') diff --git a/tools/slurm_train.sh b/tools/slurm_train.sh index e3058ce4a7..2cff8aae19 100755 --- a/tools/slurm_train.sh +++ b/tools/slurm_train.sh @@ -6,12 +6,11 @@ set -x PARTITION=$1 JOB_NAME=$2 CONFIG=$3 -WORK_DIR=$4 GPUS=${GPUS:-8} GPUS_PER_NODE=${GPUS_PER_NODE:-8} CPUS_PER_TASK=${CPUS_PER_TASK:-5} SRUN_ARGS=${SRUN_ARGS:-""} -PY_ARGS=${@:5} # Any arguments from the forth one are captured by this +PY_ARGS=${@:4} # Any arguments from the forth one are captured by this PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ srun -p ${PARTITION} \ @@ -22,4 +21,4 @@ srun -p ${PARTITION} \ --cpus-per-task=${CPUS_PER_TASK} \ --kill-on-bad-exit=1 \ ${SRUN_ARGS} \ - python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS} + python -u tools/train.py ${CONFIG} --launcher="slurm" ${PY_ARGS} From 416efc2cb3293c49591e045e859df9af4c653dd5 Mon Sep 17 00:00:00 2001 From: Zhan Tong Date: Mon, 31 Oct 2022 19:33:04 -0700 Subject: [PATCH 11/57] [Docs] Update VideoMAE in README.md (#2029) Co-authored-by: KaiHoo --- README.md | 1 + configs/recognition/videomae/README.md | 14 ++++++-------- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 9cc40cb16c..bd17f2b860 100644 --- a/README.md +++ b/README.md @@ -71,6 +71,7 @@ The 1.x branch works with **PyTorch 1.6+**. ## What's New - (2022-10-11) We support **Video Swin Transformer** on Kinetics400 and additionally train a Swin-L model on Kinetics700 to extract video features for downstream tasks. +- (2022-10-25) We support **VideoMAE** on Kinetics400. - (2022-10-28) We support **C2D** on Kinetics400, achieve 73.57% Top-1 accuracy (higher than 71.8% in the [paper](https://arxiv.org/abs/1711.07971)). **Release**: v1.0.0rc1 was released in 14/10/2022. Please refer to [changelog.md](docs/en/notes/changelog.md) for details and release history. diff --git a/configs/recognition/videomae/README.md b/configs/recognition/videomae/README.md index 0172b19ca8..65b353aff1 100644 --- a/configs/recognition/videomae/README.md +++ b/configs/recognition/videomae/README.md @@ -8,7 +8,7 @@ -Pre-training video transformers on extra large-scale datasets is generally required to achieve premier performance on relatively small datasets. In this paper, we show that video masked autoencoders (VideoMAE) are data-efficient learners for self-supervised video pre-training (SSVP). We are inspired by the recent ImageMAE and propose customized video tube masking with an extremely high ratio. This simple design makes video reconstruction a more challenging self-supervision task, thus encouraging extracting more effective video representations during this pre-training process. We obtain three important findings on SSVP: (1) An extremely high proportion of masking ratio (i.e., 90% to 95%) still yields favorable performance of VideoMAE. The temporally redundant video content enables a higher masking ratio than that of images. (2) VideoMAE achieves impressive results on very small datasets (i.e., around 3k-4k videos) without using any extra data. (3) VideoMAE shows that data quality is more important than data quantity for SSVP. Domain shift between pre-training and target datasets is an important issue. Notably, our VideoMAE with the vanilla ViT can achieve 85.8% on Kinetics-400, 75.3% on Something-Something V2, 90.8% on UCF101, and 61.1% on HMDB51, without using any extra data. +Pre-training video transformers on extra large-scale datasets is generally required to achieve premier performance on relatively small datasets. In this paper, we show that video masked autoencoders (VideoMAE) are data-efficient learners for self-supervised video pre-training (SSVP). We are inspired by the recent ImageMAE and propose customized video tube masking with an extremely high ratio. This simple design makes video reconstruction a more challenging self-supervision task, thus encouraging extracting more effective video representations during this pre-training process. We obtain three important findings on SSVP: (1) An extremely high proportion of masking ratio (i.e., 90% to 95%) still yields favorable performance of VideoMAE. The temporally redundant video content enables a higher masking ratio than that of images. (2) VideoMAE achieves impressive results on very small datasets (i.e., around 3k-4k videos) without using any extra data. (3) VideoMAE shows that data quality is more important than data quantity for SSVP. Domain shift between pre-training and target datasets is an important issue. Notably, our VideoMAE with the vanilla ViT can achieve 87.4% on Kinetics-400, 75.4% on Something-Something V2, 91.3% on UCF101, and 62.6% on HMDB51, without using any extra data. @@ -52,12 +52,10 @@ For more details, you can refer to the **Test** part in the [Training and Test T ## Citation ```BibTeX -@misc{feichtenhofer2020x3d, - title={X3D: Expanding Architectures for Efficient Video Recognition}, - author={Christoph Feichtenhofer}, - year={2020}, - eprint={2004.04730}, - archivePrefix={arXiv}, - primaryClass={cs.CV} +@inproceedings{tong2022videomae, + title={Video{MAE}: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training}, + author={Zhan Tong and Yibing Song and Jue Wang and Limin Wang}, + booktitle={Advances in Neural Information Processing Systems}, + year={2022} } ``` From 17e1921a37c431c741c36af60ad4a65b016a45b9 Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Tue, 1 Nov 2022 10:43:45 +0800 Subject: [PATCH 12/57] [fix] fix imgaug wrapper for dev1.x (#2024) --- mmaction/datasets/transforms/wrappers.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/mmaction/datasets/transforms/wrappers.py b/mmaction/datasets/transforms/wrappers.py index 1c0ce5f770..124088bf2d 100644 --- a/mmaction/datasets/transforms/wrappers.py +++ b/mmaction/datasets/transforms/wrappers.py @@ -294,10 +294,11 @@ def imgaug_builder(self, cfg): raise TypeError( f'type must be a str or valid type, but got {type(obj_type)}') - if 'children' in args: - args['children'] = [ - self.imgaug_builder(child) for child in args['children'] - ] + for aug_list_key in ['children', 'then_list', 'else_list']: + if aug_list_key in args: + args[aug_list_key] = [ + self.imgaug_builder(child) for child in args[aug_list_key] + ] return obj_cls(**args) From ed914c0d51e92785937d72d22cf74ef9c64b3118 Mon Sep 17 00:00:00 2001 From: wxDai Date: Thu, 3 Nov 2022 15:49:46 +0800 Subject: [PATCH 13/57] [Improvement] Improve docstring coverage (#1910) Co-authored-by: cir7 <33249023+cir7@users.noreply.github.com> Co-authored-by: Kai Hu Co-authored-by: Yining Li --- mmaction/datasets/activitynet_dataset.py | 1 + mmaction/datasets/transforms/loading.py | 29 +++++ mmaction/datasets/transforms/pose_loading.py | 39 ++++++- mmaction/datasets/transforms/processing.py | 24 ++++ mmaction/datasets/transforms/wrappers.py | 18 +++ mmaction/evaluation/functional/ava_utils.py | 5 +- mmaction/evaluation/metrics/acc_metric.py | 1 + mmaction/models/backbones/resnet.py | 1 + mmaction/models/backbones/resnet3d.py | 4 + mmaction/models/backbones/resnet3d_csn.py | 1 + mmaction/models/backbones/resnet_tsm.py | 5 + mmaction/models/backbones/stgcn.py | 79 ++++++------- mmaction/models/backbones/tanet.py | 2 +- mmaction/models/backbones/timesformer.py | 1 + mmaction/models/backbones/x3d.py | 1 + mmaction/models/common/sub_batchnorm3d.py | 3 + mmaction/models/common/transformer.py | 6 +- mmaction/models/losses/base.py | 1 + mmaction/models/losses/ohem_hinge_loss.py | 3 + mmaction/models/necks/tpn.py | 21 ++-- .../models/roi_heads/shared_heads/fbo_head.py | 8 +- mmaction/models/utils/graph.py | 106 +++++++++++------- mmaction/structures/action_data_sample.py | 12 ++ mmaction/structures/bbox/bbox_target.py | 18 +-- mmaction/structures/bbox/transforms.py | 13 +-- mmaction/utils/collect_env.py | 1 + mmaction/utils/misc.py | 8 +- mmaction/utils/setup_env.py | 10 +- tests/models/backbones/test_stgcn.py | 39 ------- 29 files changed, 291 insertions(+), 169 deletions(-) diff --git a/mmaction/datasets/activitynet_dataset.py b/mmaction/datasets/activitynet_dataset.py index e87b1d994a..4c0cd29f1c 100644 --- a/mmaction/datasets/activitynet_dataset.py +++ b/mmaction/datasets/activitynet_dataset.py @@ -79,6 +79,7 @@ def __init__(self, **kwargs) def load_data_list(self) -> List[dict]: + """Load annotation file to get video information.""" check_file_exist(self.ann_file) data_list = [] anno_database = mmengine.load(self.ann_file) diff --git a/mmaction/datasets/transforms/loading.py b/mmaction/datasets/transforms/loading.py index 8bcdcd2db1..ceb761d638 100644 --- a/mmaction/datasets/transforms/loading.py +++ b/mmaction/datasets/transforms/loading.py @@ -28,6 +28,7 @@ def __init__(self, **kwargs): self.kwargs = kwargs def init_hvu_info(self, categories, category_nums): + """Initialize hvu information.""" assert len(categories) == len(category_nums) self.categories = categories self.category_nums = category_nums @@ -414,6 +415,7 @@ def __init__(self, clip_len, frame_interval=2, test_mode=False): super().__init__(clip_len, frame_interval, test_mode=test_mode) def _get_clips(self, center_index, skip_offsets, shot_info): + """Get clip offsets.""" start = center_index - (self.clip_len // 2) * self.frame_interval end = center_index + ((self.clip_len + 1) // 2) * self.frame_interval frame_inds = list(range(start, end, self.frame_interval)) @@ -423,6 +425,12 @@ def _get_clips(self, center_index, skip_offsets, shot_info): return frame_inds def transform(self, results): + """Perform the SampleFrames loading. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ fps = results['fps'] timestamp = results['timestamp'] timestamp_start = results['timestamp_start'] @@ -620,6 +628,12 @@ def __init__(self, io_backend='disk', mode='accurate', **kwargs): assert mode in ['accurate', 'efficient'] def transform(self, results): + """Perform the PIMS initialization. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ try: import pims except ImportError: @@ -657,6 +671,13 @@ class PIMSDecode(BaseTransform): """ def transform(self, results): + """Perform the PIMS decoding. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + container = results['video_reader'] if results['frame_inds'].ndim != 1: @@ -1266,10 +1287,12 @@ def __init__(self, pad_method='zero'): @staticmethod def _zero_pad(shape): + """Zero padding method.""" return np.zeros(shape, dtype=np.float32) @staticmethod def _random_pad(shape): + """Random padding method.""" # spectrogram is normalized into a distribution of 0~1 return np.random.rand(shape).astype(np.float32) @@ -1369,6 +1392,12 @@ def __init__(self, clip_len): self.clip_len = clip_len def transform(self, results): + """Perform the building of pseudo clips. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ # the input should be one single image assert len(results['imgs']) == 1 im = results['imgs'][0] diff --git a/mmaction/datasets/transforms/pose_loading.py b/mmaction/datasets/transforms/pose_loading.py index b73c078f41..58748eacb6 100644 --- a/mmaction/datasets/transforms/pose_loading.py +++ b/mmaction/datasets/transforms/pose_loading.py @@ -21,15 +21,16 @@ class UniformSampleFrames(BaseTransform): random seed is set during testing, to make the sampling results deterministic. - Required keys are "total_frames", "start_index" , added or modified keys - are "frame_inds", "clip_len", "frame_interval" and "num_clips". + Required keys are ``'total_frames'``, ``'start_index'`` , added or + modified keys are ``'frame_inds'``, ``'clip_len'``, + ``'frame_interval'`` and ``'num_clips'``. Args: clip_len (int): Frames of each sampled output clip. - num_clips (int): Number of clips to be sampled. Default: 1. + num_clips (int): Number of clips to be sampled. Defaults to 1. test_mode (bool): Store True when building test or validation dataset. - Default: False. - seed (int): The random seed used during test time. Default: 255. + Defaults to False. + seed (int): The random seed used during test time. Defaults to 255. """ def __init__(self, clip_len, num_clips=1, test_mode=False, seed=255): @@ -113,6 +114,12 @@ def _get_test_clips(self, num_frames, clip_len): return inds def transform(self, results): + """Perform the SampleFrames loading. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ num_frames = results['total_frames'] if self.test_mode: @@ -149,7 +156,12 @@ class PoseDecode(BaseTransform): """ def transform(self, results): + """Perform the pose decoding. + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ if 'total_frames' not in results: results['total_frames'] = results['keypoint'].shape[1] @@ -231,7 +243,12 @@ def __init__(self, self.file_client = None def transform(self, results): + """Perform the kinetics pose decoding. + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ assert 'filename' in results filename = results.pop('filename') @@ -588,6 +605,12 @@ def gen_an_aug(self, results): return imgs def transform(self, results): + """Generate pseudo heatmaps based on joint coordinates and confidence. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ if not self.double: results['imgs'] = np.stack(self.gen_an_aug(results)) else: @@ -635,6 +658,12 @@ def __init__(self, clip_len, num_clips=1): self.num_clips = num_clips def transform(self, results): + """Sample frames from the video. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ num_frames = results['total_frames'] start_index = results['start_index'] diff --git a/mmaction/datasets/transforms/processing.py b/mmaction/datasets/transforms/processing.py index 8a8a9c7d18..6ea381030f 100644 --- a/mmaction/datasets/transforms/processing.py +++ b/mmaction/datasets/transforms/processing.py @@ -98,6 +98,12 @@ def __init__(self, assert self.padding >= 0 def transform(self, results): + """Convert the coordinates of keypoints to make it more compact. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ img_shape = results['img_shape'] h, w = img_shape kp = results['keypoint'] @@ -170,6 +176,12 @@ class Fuse(BaseTransform): """ def transform(self, results): + """Fuse lazy operations. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ if 'lazy' not in results: raise ValueError('No lazy operation detected') lazyop = results['lazy'] @@ -223,10 +235,12 @@ def __init__(self, size, lazy=False): @staticmethod def _crop_kps(kps, crop_bbox): + """Static method for cropping keypoint.""" return kps - crop_bbox[:2] @staticmethod def _crop_imgs(imgs, crop_bbox): + """Static method for cropping images.""" x1, y1, x2, y2 = crop_bbox return [img[y1:y2, x1:x2] for img in imgs] @@ -733,6 +747,7 @@ def __init__(self, self.lazy = lazy def _resize_imgs(self, imgs, new_w, new_h): + """Static method for resizing keypoint.""" return [ mmcv.imresize( img, (new_w, new_h), interpolation=self.interpolation) @@ -741,6 +756,7 @@ def _resize_imgs(self, imgs, new_w, new_h): @staticmethod def _resize_kps(kps, scale_factor): + """Static method for resizing keypoint.""" return kps * scale_factor @staticmethod @@ -915,6 +931,7 @@ def __init__(self, self.lazy = lazy def _flip_imgs(self, imgs, modality): + """Utility function for flipping images.""" _ = [mmcv.imflip_(img, self.direction) for img in imgs] lt = len(imgs) if modality == 'Flow': @@ -924,6 +941,7 @@ def _flip_imgs(self, imgs, modality): return imgs def _flip_kps(self, kps, kpscores, img_width): + """Utility function for flipping keypoint.""" kp_x = kps[..., 0] kp_x[kp_x != 0] = img_width - kp_x[kp_x != 0] new_order = list(range(kps.shape[2])) @@ -1075,6 +1093,12 @@ def __init__(self, brightness=0.5, contrast=0.5, saturation=0.5, hue=0.1): self.fn_idx = np.random.permutation(4) def transform(self, results): + """Perform ColorJitter. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ imgs = results['imgs'] num_clips, clip_len = 1, len(imgs) diff --git a/mmaction/datasets/transforms/wrappers.py b/mmaction/datasets/transforms/wrappers.py index 124088bf2d..80d28c7628 100644 --- a/mmaction/datasets/transforms/wrappers.py +++ b/mmaction/datasets/transforms/wrappers.py @@ -32,6 +32,12 @@ def __init__(self, op, **kwargs): self.trans = trans(**kwargs) def transform(self, results): + """Perform Torchvision augmentations. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ assert 'imgs' in results imgs = [x.transpose(2, 0, 1) for x in results['imgs']] @@ -77,6 +83,12 @@ def __init__(self, op, **kwargs): self.op = op def transform(self, results): + """Perform PytorchVideoTrans augmentations. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ assert 'imgs' in results assert 'gt_bboxes' not in results,\ @@ -307,6 +319,12 @@ def __repr__(self): return repr_str def transform(self, results): + """Perform Imgaug augmentations. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ assert results['modality'] == 'RGB', 'Imgaug only support RGB images.' in_type = results['imgs'][0].dtype diff --git a/mmaction/evaluation/functional/ava_utils.py b/mmaction/evaluation/functional/ava_utils.py index 49bf7c94ed..cb739a4a9b 100644 --- a/mmaction/evaluation/functional/ava_utils.py +++ b/mmaction/evaluation/functional/ava_utils.py @@ -14,6 +14,7 @@ def det2csv(results, custom_classes): + """Convert detection results to csv file.""" csv_results = [] for idx in range(len(results)): video_id = results[idx]['video_id'] @@ -35,6 +36,7 @@ def det2csv(results, custom_classes): # results is organized by class def results2csv(results, out_file, custom_classes=None): + """Convert detection results to csv file.""" csv_results = det2csv(results, custom_classes) # save space for float @@ -50,6 +52,7 @@ def to_str(item): def print_time(message, start): + """Print processing time.""" print('==> %g seconds to %s' % (time.time() - start, message), flush=True) @@ -162,7 +165,7 @@ def ava_eval(result_file, exclude_file, verbose=True, custom_classes=None): - + """Perform ava evaluation.""" assert result_type in ['mAP'] start = time.time() diff --git a/mmaction/evaluation/metrics/acc_metric.py b/mmaction/evaluation/metrics/acc_metric.py index a25e7de95b..488e28aa14 100644 --- a/mmaction/evaluation/metrics/acc_metric.py +++ b/mmaction/evaluation/metrics/acc_metric.py @@ -132,6 +132,7 @@ def compute_metrics(self, results: list) -> dict: @staticmethod def label2array(num, label): + """Convert multi-label to array.""" arr = np.zeros(num, dtype=np.float32) arr[label] = 1. return arr diff --git a/mmaction/models/backbones/resnet.py b/mmaction/models/backbones/resnet.py index cbdbc50f2e..0ebf6d61b0 100644 --- a/mmaction/models/backbones/resnet.py +++ b/mmaction/models/backbones/resnet.py @@ -590,6 +590,7 @@ def _freeze_stages(self) -> None: param.requires_grad = False def _partial_bn(self) -> None: + """Freezing BatchNorm2D except the first one.""" logger = MMLogger.get_current_instance() logger.info('Freezing BatchNorm2D except the first one.') count_bn = 0 diff --git a/mmaction/models/backbones/resnet3d.py b/mmaction/models/backbones/resnet3d.py index cce5db98d7..50435c3064 100644 --- a/mmaction/models/backbones/resnet3d.py +++ b/mmaction/models/backbones/resnet3d.py @@ -763,6 +763,7 @@ def _inflate_weights(self, logger: MMLogger) -> None: f': {remaining_names}') def inflate_weights(self, logger: MMLogger) -> None: + """Inflate weights.""" self._inflate_weights(self, logger) def _make_stem_layer(self) -> None: @@ -844,6 +845,7 @@ def _init_weights(self, pretrained: Optional[str] = None) -> None: raise TypeError('pretrained must be a str or None') def init_weights(self, pretrained: Optional[str] = None) -> None: + """Initialize weights.""" self._init_weights(self, pretrained) def forward(self, x: Tensor) -> Union[Tensor, Tuple[Tensor]]: @@ -1004,6 +1006,7 @@ def __init__(self, self.add_module(self.layer_name, res_layer) def inflate_weights(self, logger: MMLogger) -> None: + """Inflate weights.""" self._inflate_weights(self, logger) def _freeze_stages(self) -> None: @@ -1016,6 +1019,7 @@ def _freeze_stages(self) -> None: param.requires_grad = False def init_weights(self, pretrained: Optional[str] = None) -> None: + """Initialize weights.""" self._init_weights(self, pretrained) def forward(self, x: Tensor) -> Tensor: diff --git a/mmaction/models/backbones/resnet3d_csn.py b/mmaction/models/backbones/resnet3d_csn.py index 349eaa423d..77c5263360 100644 --- a/mmaction/models/backbones/resnet3d_csn.py +++ b/mmaction/models/backbones/resnet3d_csn.py @@ -146,6 +146,7 @@ def __init__(self, **kwargs) def train(self, mode=True): + """Set the optimization status when training.""" super(ResNet3d, self).train(mode) self._freeze_stages() if mode and self.norm_eval: diff --git a/mmaction/models/backbones/resnet_tsm.py b/mmaction/models/backbones/resnet_tsm.py index c9725f8d73..1397384a97 100644 --- a/mmaction/models/backbones/resnet_tsm.py +++ b/mmaction/models/backbones/resnet_tsm.py @@ -28,6 +28,7 @@ def __init__(self, block, num_segments, non_local_cfg=dict()): self.num_segments = num_segments def forward(self, x): + """Defines the computation performed at every call.""" x = self.block(x) n, c, h, w = x.size() @@ -168,6 +169,7 @@ def __init__(self, self.init_structure() def init_structure(self): + """Initialize structure for tsm.""" if self.is_shift: self.make_temporal_shift() if len(self.non_local_cfg) != 0: @@ -264,6 +266,7 @@ def __init__(self, net, num_segments): kernel_size=(3, 1, 1), stride=(2, 1, 1), padding=(1, 0, 0)) def forward(self, x): + """Defines the computation performed at every call.""" # [N, C, H, W] n, c, h, w = x.size() # [N // num_segments, C, num_segments, H, W] @@ -278,6 +281,7 @@ def forward(self, x): self.layer2 = TemporalPool(self.layer2, self.num_segments) def make_non_local(self): + """Wrap resnet layer into non local wrapper.""" # This part is for ResNet50 for i in range(self.num_stages): non_local_stage = self.non_local_stages[i] @@ -294,4 +298,5 @@ def make_non_local(self): self.non_local_cfg) def init_weights(self): + """Initialize weights.""" pass diff --git a/mmaction/models/backbones/stgcn.py b/mmaction/models/backbones/stgcn.py index 59918674df..8aae64676c 100644 --- a/mmaction/models/backbones/stgcn.py +++ b/mmaction/models/backbones/stgcn.py @@ -7,23 +7,11 @@ from mmengine.model.weight_init import constant_init, kaiming_init, normal_init from mmengine.runner import load_checkpoint from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm -from torch import Tensor from mmaction.registry import MODELS -from mmaction.utils import ConfigType from ..utils import Graph -def zero(x: Tensor) -> int: - """return zero.""" - return 0 - - -def identity(x: Tensor) -> Tensor: - """return input itself.""" - return x - - class STGCNBlock(nn.Module): """Applies a spatial temporal graph convolution over an input graph sequence. @@ -63,11 +51,9 @@ def __init__(self, nn.Dropout(dropout, inplace=True)) if not residual: - self.residual = zero - + self.residual = lambda x: 0 elif (in_channels == out_channels) and (stride == 1): - self.residual = identity - + self.residual = lambda x: x else: self.residual = nn.Sequential( nn.Conv2d( @@ -78,27 +64,27 @@ def __init__(self, self.relu = nn.ReLU(inplace=True) - def forward(self, x: Tensor, adj_mat: Tensor) -> tuple: + def forward(self, x: torch.Tensor, adj_mat: torch.Tensor) -> tuple: """Defines the computation performed at every call. Args: - x (Tensor): Input graph sequence in + x (torch.Tensor): Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format. - adj_mat (Tensor): Input graph adjacency matrix in :math:`(K, V, V)` - format. + adj_mat (torch.Tensor): Input graph adjacency matrix in + :math:`(K, V, V)` format. Returns: tuple: A tuple of output graph sequence and graph adjacency matrix. - - x (Tensor): Output graph sequence in + - x (torch.Tensor): Output graph sequence in :math:`(N, out_channels, T_{out}, V)` format. - - adj_mat (Tensor): graph adjacency matrix for output data in - :math:`(K, V, V)` format. + - adj_mat (torch.Tensor): graph adjacency matrix for + output data in :math:`(K, V, V)` format. where - :math:`N` is a batch size, - :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1] - `, + :math:`N` is the batch size, + :math:`K` is the spatial kernel size, as + :math:`K == kernel_size[1]`, :math:`T_{in}/T_{out}` is a length of input/output sequence, :math:`V` is the number of graph nodes. """ @@ -115,8 +101,8 @@ class ConvTemporalGraphical(nn.Module): Args: in_channels (int): Number of channels in the input sequence data. out_channels (int): Number of channels produced by the convolution. - kernel_size (int): Size of the graph convolving kernel. - t_kernel_size (int): Size of the temporal convolving kernel. + kernel_size (int): Size of the graph convolution kernel. + t_kernel_size (int): Size of the temporal convolution kernel. t_stride (int, optional): Stride of the temporal convolution. Default: 1. t_padding (int, optional): Temporal zero-padding added to both sides @@ -148,13 +134,13 @@ def __init__(self, dilation=(t_dilation, 1), bias=bias) - def forward(self, x: Tensor, adj_mat: Tensor) -> tuple: + def forward(self, x: torch.Tensor, adj_mat: torch.Tensor) -> tuple: """Defines the computation performed at every call. Args: - x (Tensor): Input graph sequence in + x (torch.Tensor): Input graph sequence in :math:`(N, in_channels, T_{in}, V)` format - adj_mat (Tensor): Input graph adjacency matrix in + adj_mat (torch.Tensor): Input graph adjacency matrix in :math:`(K, V, V)` format. Returns: @@ -178,24 +164,24 @@ def forward(self, x: Tensor, adj_mat: Tensor) -> tuple: n, kc, t, v = x.size() x = x.view(n, self.kernel_size, kc // self.kernel_size, t, v) - x = torch.einsum('nkctv,kvw->nctw', (x, adj_mat)) - x = x.contiguous() + x = torch.einsum('nkctv,kvw->nctw', (x, adj_mat)).contiguous() return x, adj_mat @MODELS.register_module() class STGCN(nn.Module): - """Backbone of Spatial temporal graph convolutional networks. + """Backbone of spatial temporal graph convolutional networks. Args: - in_channels (int): Number of channels in the input data. - graph_cfg (dict or ConfigDict): The arguments for building the graph. - edge_importance_weighting (bool): If ``True``, adds a learnable - importance weighting to the edges of the graph. Default: True. - data_bn (bool): If 'True', adds data normalization to the inputs. - Default: True. - pretrained (str or None): Name of pretrained model. + in_channels (int): Number of channels of the input data. + graph_cfg (dict): The arguments for building the graph. + edge_importance_weighting (bool): If ``True``, add a learnable + importance weighting to the edges of the graph. Defaults to True. + data_bn (bool): If ``True``, adds data normalization to the inputs. + Defaults to True. + pretrained (str, optional): Path of pretrained model. + **kwargs: Keyword parameters passed to graph convolution units. Shape: - Input: :math:`(N, in_channels, T_{in}, V_{in}, M_{in})` @@ -208,7 +194,7 @@ class STGCN(nn.Module): def __init__(self, in_channels: int, - graph_cfg: ConfigType, + graph_cfg: dict, edge_importance_weighting: bool = True, data_bn: bool = True, pretrained: str = None, @@ -226,7 +212,7 @@ def __init__(self, temporal_kernel_size = 9 kernel_size = (temporal_kernel_size, spatial_kernel_size) self.data_bn = nn.BatchNorm1d(in_channels * - A.size(1)) if data_bn else identity + A.size(1)) if data_bn else nn.Identity() kwargs0 = {k: v for k, v in kwargs.items() if k != 'dropout'} self.st_gcn_networks = nn.ModuleList(( @@ -274,13 +260,14 @@ def init_weights(self) -> None: else: raise TypeError('pretrained must be a str or None') - def forward(self, x: Tensor) -> Tensor: + def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call. + Args: - x (Tensor): The input data. + x (torch.Tensor): The input data. Returns: - Tensor: The output of the module. + torch.Tensor: The output of the module. """ # data normalization x = x.float() diff --git a/mmaction/models/backbones/tanet.py b/mmaction/models/backbones/tanet.py index 43038af89f..45127a7f36 100644 --- a/mmaction/models/backbones/tanet.py +++ b/mmaction/models/backbones/tanet.py @@ -45,7 +45,6 @@ def __init__(self, block: nn.Module, num_segments: int, def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" - assert isinstance(self.block, Bottleneck) def _inner_forward(x): @@ -105,6 +104,7 @@ def __init__(self, self.make_tam_modeling() def init_weights(self): + """Initialize weights.""" pass def make_tam_modeling(self): diff --git a/mmaction/models/backbones/timesformer.py b/mmaction/models/backbones/timesformer.py index 7fb165445d..618b381295 100644 --- a/mmaction/models/backbones/timesformer.py +++ b/mmaction/models/backbones/timesformer.py @@ -54,6 +54,7 @@ def __init__(self, self.init_weights() def init_weights(self): + """Initialize weights.""" # Lecun norm from ClassyVision kaiming_init(self.projection, mode='fan_in', nonlinearity='linear') diff --git a/mmaction/models/backbones/x3d.py b/mmaction/models/backbones/x3d.py index aca0d9c47a..23f962ab7e 100644 --- a/mmaction/models/backbones/x3d.py +++ b/mmaction/models/backbones/x3d.py @@ -27,6 +27,7 @@ def __init__(self, channels, reduction): @staticmethod def _round_width(width, multiplier, min_width=8, divisor=8): + """Round width of filters based on width multiplier.""" width *= multiplier min_width = min_width or divisor width_out = max(min_width, diff --git a/mmaction/models/common/sub_batchnorm3d.py b/mmaction/models/common/sub_batchnorm3d.py index 1707fd545f..8dde5b6526 100644 --- a/mmaction/models/common/sub_batchnorm3d.py +++ b/mmaction/models/common/sub_batchnorm3d.py @@ -32,6 +32,7 @@ def __init__(self, num_features, **cfg): self.init_weights(cfg) def init_weights(self, cfg): + """Initialize weights.""" if cfg.get('affine', True): self.weight = torch.nn.Parameter(torch.ones(self.num_features)) self.bias = torch.nn.Parameter(torch.zeros(self.num_features)) @@ -40,6 +41,7 @@ def init_weights(self, cfg): self.affine = False def _get_aggregated_mean_std(self, means, stds, n): + """Calculate aggregated mean and std.""" mean = means.view(n, -1).sum(0) / n std = stds.view(n, -1).sum(0) / n + ( (means.view(n, -1) - mean)**2).view(n, -1).sum(0) / n @@ -62,6 +64,7 @@ def aggregate_stats(self): ) def forward(self, x): + """Defines the computation performed at every call.""" if self.training: n, c, t, h, w = x.shape assert n % self.num_splits == 0 diff --git a/mmaction/models/common/transformer.py b/mmaction/models/common/transformer.py index 4cbedf1993..50524a5821 100644 --- a/mmaction/models/common/transformer.py +++ b/mmaction/models/common/transformer.py @@ -60,9 +60,11 @@ def __init__(self, self.init_weights() def init_weights(self): + """Initialize weights.""" constant_init(self.temporal_fc, val=0, bias=0) def forward(self, query, key=None, value=None, residual=None, **kwargs): + """Defines the computation performed at every call.""" assert residual is None, ( 'Always adding the shortcut in the forward function') @@ -136,10 +138,11 @@ def __init__(self, self.init_weights() def init_weights(self): - # init DividedSpatialAttentionWithNorm by default + """init DividedSpatialAttentionWithNorm by default.""" pass def forward(self, query, key=None, value=None, residual=None, **kwargs): + """Defines the computation performed at every call.""" assert residual is None, ( 'Always adding the shortcut in the forward function') @@ -214,5 +217,6 @@ def __init__(self, *args, norm_cfg=dict(type='LN'), **kwargs): self.norm = build_norm_layer(norm_cfg, self.embed_dims)[1] def forward(self, x, residual=None): + """Defines the computation performed at every call.""" assert residual is None, ('Cannot apply pre-norm with FFNWithNorm') return super().forward(self.norm(x), x) diff --git a/mmaction/models/losses/base.py b/mmaction/models/losses/base.py index 9e1df07d7d..4ab4a8ea5e 100644 --- a/mmaction/models/losses/base.py +++ b/mmaction/models/losses/base.py @@ -21,6 +21,7 @@ def __init__(self, loss_weight=1.0): @abstractmethod def _forward(self, *args, **kwargs): + """Forward function.""" pass def forward(self, *args, **kwargs): diff --git a/mmaction/models/losses/ohem_hinge_loss.py b/mmaction/models/losses/ohem_hinge_loss.py index 8804a194ee..4a5c968186 100644 --- a/mmaction/models/losses/ohem_hinge_loss.py +++ b/mmaction/models/losses/ohem_hinge_loss.py @@ -53,6 +53,9 @@ def forward(ctx, pred, labels, is_positive, ohem_ratio, group_size): @staticmethod def backward(ctx, grad_output): + """Defines a formula for differentiating the operation with backward + mode automatic differentiation.""" + labels = ctx.labels slopes = ctx.slopes diff --git a/mmaction/models/necks/tpn.py b/mmaction/models/necks/tpn.py index 27e47a0eec..b3cdc92ff9 100644 --- a/mmaction/models/necks/tpn.py +++ b/mmaction/models/necks/tpn.py @@ -6,7 +6,6 @@ import torch.nn as nn from mmcv.cnn import ConvModule from mmengine.model.weight_init import constant_init, normal_init, xavier_init -from torch import Tensor from mmaction.registry import MODELS from mmaction.utils import ConfigType, OptConfigType, SampleList @@ -74,7 +73,8 @@ def __init__( self.pool = nn.MaxPool3d( downsample_scale, downsample_scale, (0, 0, 0), ceil_mode=True) - def forward(self, x: Tensor) -> Tensor: + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Defines the computation performed at every call.""" if self.downsample_position == 'before': x = self.pool(x) x = self.conv(x) @@ -140,7 +140,8 @@ def __init__( norm_cfg=dict(type='BN3d', requires_grad=True), act_cfg=dict(type='ReLU', inplace=True)) - def forward(self, x: Tuple[Tensor]) -> Tensor: + def forward(self, x: Tuple[torch.Tensor]) -> torch.Tensor: + """Defines the computation performed at every call.""" out = [self.downsamples[i](feature) for i, feature in enumerate(x)] out = torch.cat(out, 1) out = self.fusion_conv(out) @@ -187,7 +188,8 @@ def __init__(self, in_channels: Tuple[int], out_channels: int) -> None: act_cfg=dict(type='ReLU', inplace=True))) self.spatial_modulation.append(op) - def forward(self, x: Tuple[Tensor]) -> list: + def forward(self, x: Tuple[torch.Tensor]) -> list: + """Defines the computation performed at every call.""" out = [] for i, _ in enumerate(x): if isinstance(self.spatial_modulation[i], nn.ModuleList): @@ -248,7 +250,8 @@ def init_weights(self) -> None: if isinstance(m, nn.BatchNorm3d): constant_init(m, 1) - def loss(self, x: Tensor, data_samples: Optional[SampleList]) -> dict: + def loss(self, x: torch.Tensor, + data_samples: Optional[SampleList]) -> dict: """Calculate auxiliary loss.""" x = self(x) labels = [x.gt_labels.item for x in data_samples] @@ -261,7 +264,7 @@ def loss(self, x: Tensor, data_samples: Optional[SampleList]) -> dict: losses['loss_aux'] = self.loss_weight * self.loss_cls(x, labels) return losses - def forward(self, x: Tensor) -> Tensor: + def forward(self, x: torch.Tensor) -> torch.Tensor: """Auxiliary head forward function.""" x = self.conv(x) x = self.avg_pool(x).squeeze(-1).squeeze(-1).squeeze(-1) @@ -302,7 +305,8 @@ def __init__(self, (downsample_scale, 1, 1), (0, 0, 0), ceil_mode=True) - def forward(self, x: Tensor) -> Tensor: + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Defines the computation performed at every call.""" x = self.conv(x) x = self.pool(x) return x @@ -428,8 +432,9 @@ def init_weights(self) -> None: self.aux_head.init_weights() def forward(self, - x: Tuple[Tensor], + x: Tuple[torch.Tensor], data_samples: Optional[SampleList] = None) -> tuple: + """Defines the computation performed at every call.""" loss_aux = dict() # Calculate auxiliary loss if `self.aux_head` diff --git a/mmaction/models/roi_heads/shared_heads/fbo_head.py b/mmaction/models/roi_heads/shared_heads/fbo_head.py index 67a2e44476..aeb9c28514 100644 --- a/mmaction/models/roi_heads/shared_heads/fbo_head.py +++ b/mmaction/models/roi_heads/shared_heads/fbo_head.py @@ -131,6 +131,7 @@ def init_weights(self, pretrained=None): raise TypeError('pretrained must be a str or None') def forward(self, st_feat, lt_feat): + """Defines the computation performed at every call.""" n, c = st_feat.size(0), self.latent_channels num_st_feat, num_lt_feat = self.num_st_feat, self.num_lt_feat @@ -249,6 +250,8 @@ def __init__(self, self.non_local_layers.append(layer_name) def init_weights(self, pretrained=None): + """Initiate the parameters either from existing checkpoint or from + scratch.""" if isinstance(pretrained, str): logger = MMLogger.get_current_instance() load_checkpoint(self, pretrained, strict=False, logger=logger) @@ -262,6 +265,7 @@ def init_weights(self, pretrained=None): raise TypeError('pretrained must be a str or None') def forward(self, st_feat, lt_feat): + """Defines the computation performed at every call.""" # prepare st_feat st_feat = self.st_feat_conv(st_feat) if self.st_feat_dropout_ratio > 0: @@ -309,10 +313,11 @@ def __init__(self, **kwargs): self.max_pool = nn.AdaptiveMaxPool3d((1, None, None)) def init_weights(self, pretrained=None): - # FBOMax has no parameters to be initialized. + """FBOMax has no parameters to be initialized.""" pass def forward(self, st_feat, lt_feat): + """Defines the computation performed at every call.""" out = self.max_pool(lt_feat) return out @@ -385,6 +390,7 @@ def sample_lfb(self, rois, img_metas): return lt_feat.unsqueeze(-1).unsqueeze(-1) def forward(self, x, rois, img_metas, **kwargs): + """Defines the computation performed at every call.""" # [N, C, 1, 1, 1] st_feat = self.temporal_pool(x) st_feat = self.spatial_pool(st_feat) diff --git a/mmaction/models/utils/graph.py b/mmaction/models/utils/graph.py index e0fce39cb1..4aaac07d03 100644 --- a/mmaction/models/utils/graph.py +++ b/mmaction/models/utils/graph.py @@ -1,10 +1,25 @@ # Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + import numpy as np -def get_hop_distance(num_node, edge, max_hop=1): +def get_hop_distance(num_node: int, + edges: List[Tuple[int, int]], + max_hop: int = 1) -> np.ndarray: + """Get n-hop distance matrix by edges. + + Args: + num_node (int): The number of nodes of the graph. + edges (list[tuple[int, int]]): The edges of the graph. + max_hop (int): The maximal distance between two connected nodes. + Defaults to 1. + + Returns: + hop_dis (np.ndarray): The n-hop distance matrix. + """ adj_mat = np.zeros((num_node, num_node)) - for i, j in edge: + for i, j in edges: adj_mat[i, j] = 1 adj_mat[j, i] = 1 @@ -19,7 +34,15 @@ def get_hop_distance(num_node, edge, max_hop=1): return hop_dis -def normalize_digraph(adj_matrix): +def normalize_digraph(adj_matrix: np.ndarray) -> np.ndarray: + """Normalize the digraph. + + Args: + adj_matrix (np.ndarray): The adjacency matrix. + + Returns: + norm_matrix (np.ndarray): The normalized adjacency matrix. + """ Dl = np.sum(adj_matrix, 0) num_nodes = adj_matrix.shape[0] Dn = np.zeros((num_nodes, num_nodes)) @@ -30,47 +53,56 @@ def normalize_digraph(adj_matrix): return norm_matrix -def edge2mat(link, num_node): +def edge2mat(edges: List[Tuple[int, int]], num_node: int) -> np.ndarray: + """Get adjacency matrix from edges. + + Args: + edges (list[tuple[int, int]]): The edges of the graph. + num_node (int): The number of nodes of the graph. + + Returns: + np.ndarray: The adjacency matrix. + """ A = np.zeros((num_node, num_node)) - for i, j in link: + for i, j in edges: A[j, i] = 1 return A class Graph: - """The Graph to model the skeletons extracted by the openpose. + """The Graph to model the different layout of skeletons. Args: - layout (str): must be one of the following candidates - - openpose: 18 or 25 joints. For more information, please refer to: - https://github.com/CMU-Perceptual-Computing-Lab/openpose#output - - ntu-rgb+d: Is consists of 25 joints. For more information, please - refer to https://github.com/shahroudy/NTURGB-D - - strategy (str): must be one of the follow candidates - - uniform: Uniform Labeling - - distance: Distance Partitioning - - spatial: Spatial Configuration - For more information, please refer to the section 'Partition - Strategies' in our paper (https://arxiv.org/abs/1801.07455). - - max_hop (int): the maximal distance between two connected nodes. - Default: 1 + layout (str): Must be one of the following candidates + - openpose: 18 or 25 joints. For more information, please refer to: + https://github.com/CMU-Perceptual-Computing-Lab/openpose#output + - ntu-rgb+d: 25 joints. For more information, please refer to: + https://github.com/shahroudy/NTURGB-D + - coco: 17 joints. For more information, please refer to: + https://cocodataset.org/ + + strategy (str): Must be one of the follow candidates + - uniform: Uniform Labeling + - distance: Distance Partitioning + - spatial: Spatial Configuration + For more information, please refer to the section 'Partition + Strategies' in the paper (https://arxiv.org/abs/1801.07455). + + max_hop (int): The maximal distance between two connected nodes. + Defaults to 1. dilation (int): controls the spacing between the kernel points. - Default: 1 + Defaults to 1. """ def __init__(self, - layout='openpose-18', - strategy='uniform', - max_hop=1, - dilation=1): + layout: str = 'openpose-18', + strategy: str = 'uniform', + max_hop: int = 1, + dilation: int = 1) -> None: self.max_hop = max_hop self.dilation = dilation - assert layout in [ - 'openpose-18', 'openpose-25', 'ntu-rgb+d', 'ntu_edge', 'coco' - ] + assert layout in ['openpose-18', 'openpose-25', 'ntu-rgb+d', 'coco'] assert strategy in ['uniform', 'distance', 'spatial', 'agcn'] self.get_edge(layout) self.hop_dis = get_hop_distance( @@ -80,7 +112,7 @@ def __init__(self, def __str__(self): return self.A - def get_edge(self, layout): + def get_edge(self, layout: str) -> None: """This method returns the edge pairs of the layout.""" if layout == 'openpose-18': @@ -117,17 +149,6 @@ def get_edge(self, layout): self.neighbor_link = neighbor_link self.edge = self_link + neighbor_link self.center = 21 - 1 - elif layout == 'ntu_edge': - self.num_node = 24 - self_link = [(i, i) for i in range(self.num_node)] - neighbor_1base = [(1, 2), (3, 2), (4, 3), (5, 2), (6, 5), (7, 6), - (8, 7), (9, 2), (10, 9), (11, 10), (12, 11), - (13, 1), (14, 13), (15, 14), (16, 15), (17, 1), - (18, 17), (19, 18), (20, 19), (21, 22), (22, 8), - (23, 24), (24, 12)] - neighbor_link = [(i - 1, j - 1) for (i, j) in neighbor_1base] - self.edge = self_link + neighbor_link - self.center = 2 elif layout == 'coco': self.num_node = 17 self_link = [(i, i) for i in range(self.num_node)] @@ -141,7 +162,7 @@ def get_edge(self, layout): else: raise ValueError(f'{layout} is not supported.') - def get_adjacency(self, strategy): + def get_adjacency(self, strategy: str): """This method returns the adjacency matrix according to strategy.""" valid_hop = range(0, self.max_hop + 1, self.dilation) @@ -185,7 +206,6 @@ def get_adjacency(self, strategy): A = np.stack(A) self.A = A elif strategy == 'agcn': - A = [] link_mat = edge2mat(self.self_link, self.num_node) In = normalize_digraph(edge2mat(self.neighbor_link, self.num_node)) outward = [(j, i) for (i, j) in self.neighbor_link] diff --git a/mmaction/structures/action_data_sample.py b/mmaction/structures/action_data_sample.py index 7de628d993..ab0eef440e 100644 --- a/mmaction/structures/action_data_sample.py +++ b/mmaction/structures/action_data_sample.py @@ -24,48 +24,60 @@ def set_gt_labels(self, value: Union[int, @property def gt_labels(self): + """Property of `gt_labels`""" return self._gt_labels @gt_labels.setter def gt_labels(self, value): + """Setter of `gt_labels`""" self.set_field(value, '_gt_labels', LabelData) @gt_labels.deleter def gt_labels(self): + """Deleter of `gt_labels`""" del self._gt_labels @property def pred_scores(self): + """Property of `pred_scores`""" return self._pred_scores @pred_scores.setter def pred_scores(self, value): + """Setter of `pred_scores`""" self.set_field(value, '_pred_scores', LabelData) @pred_scores.deleter def pred_scores(self): + """Deleter of `pred_scores`""" del self._pred_scores @property def proposals(self): + """Property of `proposals`""" return self._proposals @proposals.setter def proposals(self, value): + """Setter of `proposals`""" self.set_field(value, '_proposals', dtype=InstanceData) @proposals.deleter def proposals(self): + """Deleter of `proposals`""" del self._proposals @property def gt_instances(self): + """Property of `gt_instances`""" return self._gt_instances @gt_instances.setter def gt_instances(self, value): + """Setter of `gt_instances`""" self.set_field(value, '_gt_instances', dtype=InstanceData) @gt_instances.deleter def gt_instances(self): + """Deleter of `gt_instances`""" del self._gt_instances diff --git a/mmaction/structures/bbox/bbox_target.py b/mmaction/structures/bbox/bbox_target.py index 71ac21466f..024fc9b25f 100644 --- a/mmaction/structures/bbox/bbox_target.py +++ b/mmaction/structures/bbox/bbox_target.py @@ -1,20 +1,22 @@ # Copyright (c) OpenMMLab. All rights reserved. -from typing import List +from typing import List, Union +import mmengine import torch import torch.nn.functional as F -from torch import Tensor -def bbox_target(pos_bboxes_list: List[Tensor], neg_bboxes_list: List[Tensor], - gt_labels: List[Tensor], cfg: dict) -> tuple: +def bbox_target(pos_bboxes_list: List[torch.Tensor], + neg_bboxes_list: List[torch.Tensor], + gt_labels: List[torch.Tensor], + cfg: Union[dict, mmengine.ConfigDict]) -> tuple: """Generate classification targets for bboxes. Args: - pos_bboxes_list (List[Tensor]): Positive bboxes list. - neg_bboxes_list (List[Tensor]): Negative bboxes list. - gt_labels (List[Tensor]): Groundtruth classification label list. - cfg (dict): RCNN config. + pos_bboxes_list (List[torch.Tensor]): Positive bboxes list. + neg_bboxes_list (List[torch.Tensor]): Negative bboxes list. + gt_labels (List[torch.Tensor]): Groundtruth classification label list. + cfg (dict | mmengine.ConfigDict): RCNN config. Returns: tuple: Label and label_weight for bboxes. diff --git a/mmaction/structures/bbox/transforms.py b/mmaction/structures/bbox/transforms.py index 394cacd7a2..1269cf36dd 100644 --- a/mmaction/structures/bbox/transforms.py +++ b/mmaction/structures/bbox/transforms.py @@ -1,10 +1,10 @@ # Copyright (c) OpenMMLab. All rights reserved. import numpy as np -from torch import Tensor +import torch -def bbox2result(bboxes: Tensor, - labels: Tensor, +def bbox2result(bboxes: torch.Tensor, + labels: torch.Tensor, num_classes: int, thr: float = 0.01) -> list: """Convert detection results to a list of numpy arrays. @@ -12,16 +12,15 @@ def bbox2result(bboxes: Tensor, This identifies single-label classification (as opposed to multi-label) through the thr parameter which is set to a negative value. - Currently, the way to set this is to set - `test_cfg.rcnn.action_thr=-1.0` ToDo: The ideal way would be for this to be automatically set when the + Currently, the way to set this is to set ``test_cfg.rcnn.action_thr=-1.0`` model cfg uses multilabel=False, however this could be a breaking change and is left as a future exercise. NB - this should not interfere with the evaluation in any case. Args: - bboxes (Tensor): shape ``(n, 4)``. - labels (Tensor): shape ``(n, num_classes)``. + bboxes (torch.Tensor): shape ``(n, 4)``. + labels (torch.Tensor): shape ``(n, num_classes)``. num_classes (int): class number, including background class. thr (float): The score threshold used when converting predictions to detection results. If a single negative value, uses single-label diff --git a/mmaction/utils/collect_env.py b/mmaction/utils/collect_env.py index 51f336ce58..37599b4bf8 100644 --- a/mmaction/utils/collect_env.py +++ b/mmaction/utils/collect_env.py @@ -6,6 +6,7 @@ def collect_env(): + """Collect the information of the running environments.""" env_info = collect_basic_env() env_info['MMAction2'] = ( mmaction.__version__ + '+' + get_git_hash(digits=7)) diff --git a/mmaction/utils/misc.py b/mmaction/utils/misc.py index 374d62dcf3..3c34df3f68 100644 --- a/mmaction/utils/misc.py +++ b/mmaction/utils/misc.py @@ -10,25 +10,25 @@ import numpy as np -def get_random_string(length: int = 15): +def get_random_string(length: int = 15) -> str: """Get random string with letters and digits. Args: - length (int): Length of random string. Default: 15. + length (int): Length of random string. Defaults to 15. """ return ''.join( random.choice(string.ascii_letters + string.digits) for _ in range(length)) -def get_thread_id(): +def get_thread_id() -> int: """Get current thread id.""" # use ctype to find thread id thread_id = ctypes.CDLL('libc.so.6').syscall(186) return thread_id -def get_shm_dir(): +def get_shm_dir() -> str: """Get shm dir for temporary usage.""" return '/dev/shm' diff --git a/mmaction/utils/setup_env.py b/mmaction/utils/setup_env.py index 672bc21fae..6ae3872fb6 100644 --- a/mmaction/utils/setup_env.py +++ b/mmaction/utils/setup_env.py @@ -9,13 +9,13 @@ def register_all_modules(init_default_scope: bool = True) -> None: """Register all modules in mmaction into the registries. Args: - init_default_scope (bool): Whether initialize the mmaction default scope. - If True, the global default scope will be set to `mmaction`, and all - registries will build modules from mmaction's registry node. To - understand more about the registry, please refer to + init_default_scope (bool): Whether initialize the mmaction default + scope. If True, the global default scope will be set to `mmaction`, + and all registries will build modules from mmaction's registry + node. To understand more about the registry, please refer to https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/registry.md Defaults to True. - """ # noqa + """ import mmaction.datasets # noqa: F401,F403 import mmaction.engine # noqa: F401,F403 import mmaction.evaluation # noqa: F401,F403 diff --git a/tests/models/backbones/test_stgcn.py b/tests/models/backbones/test_stgcn.py index cf12540296..fdb28bff01 100644 --- a/tests/models/backbones/test_stgcn.py +++ b/tests/models/backbones/test_stgcn.py @@ -46,19 +46,6 @@ def test_stgcn_backbone(): feat = stgcn(skeletons) assert feat.shape == torch.Size([2, 256, 75, 25]) - # test ntu_edge layout, spatial strategy - input_shape = (1, 3, 300, 24, 2) - skeletons = generate_backbone_demo_inputs(input_shape) - - stgcn = STGCN( - in_channels=3, - edge_importance_weighting=True, - graph_cfg=dict(layout='ntu_edge', strategy='spatial')) - stgcn.init_weights() - stgcn.train() - feat = stgcn(skeletons) - assert feat.shape == torch.Size([2, 256, 75, 24]) - # test coco layout, uniform strategy input_shape = (1, 3, 300, 17, 2) skeletons = generate_backbone_demo_inputs(input_shape) @@ -98,19 +85,6 @@ def test_stgcn_backbone(): feat = stgcn(skeletons) assert feat.shape == torch.Size([2, 256, 75, 25]) - # test ntu_edge layout, uniform strategy - input_shape = (1, 3, 300, 24, 2) - skeletons = generate_backbone_demo_inputs(input_shape) - - stgcn = STGCN( - in_channels=3, - edge_importance_weighting=True, - graph_cfg=dict(layout='ntu_edge', strategy='uniform')) - stgcn.init_weights() - stgcn.train() - feat = stgcn(skeletons) - assert feat.shape == torch.Size([2, 256, 75, 24]) - # test coco layout, distance strategy input_shape = (1, 3, 300, 17, 2) skeletons = generate_backbone_demo_inputs(input_shape) @@ -149,16 +123,3 @@ def test_stgcn_backbone(): stgcn.train() feat = stgcn(skeletons) assert feat.shape == torch.Size([2, 256, 75, 25]) - - # test ntu_edge layout, distance strategy - input_shape = (1, 3, 300, 24, 2) - skeletons = generate_backbone_demo_inputs(input_shape) - - stgcn = STGCN( - in_channels=3, - edge_importance_weighting=True, - graph_cfg=dict(layout='ntu_edge', strategy='distance')) - stgcn.init_weights() - stgcn.train() - feat = stgcn(skeletons) - assert feat.shape == torch.Size([2, 256, 75, 24]) From d312484eb8d7adb5c603a3039cbf90246277e13e Mon Sep 17 00:00:00 2001 From: wxDai Date: Fri, 4 Nov 2022 15:14:13 +0800 Subject: [PATCH 14/57] [Docs] Add videoMAE to MODEL_ZOO (#2036) --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index bd17f2b860..e25ea7be14 100644 --- a/README.md +++ b/README.md @@ -112,6 +112,7 @@ Please refer to [install.md](https://mmaction2.readthedocs.io/en/1.x/get_started TANet (ArXiv'2020) TimeSformer (ICML'2021) VideoSwin (CVPR'2022) + VideoMAE (NeurIPS'2022) Action Localization From c3fe6d867e99fd18c3ae3e4e6174311060c255f2 Mon Sep 17 00:00:00 2001 From: wxDai Date: Wed, 9 Nov 2022 20:18:28 +0800 Subject: [PATCH 15/57] [Docs] Refactor `4_train_test.md` (#2038) --- docs/en/user_guides/4_train_test.md | 126 ++++++++++++++-------------- tools/dist_test.sh | 8 +- tools/dist_train.sh | 8 +- tools/test.py | 13 --- 4 files changed, 74 insertions(+), 81 deletions(-) diff --git a/docs/en/user_guides/4_train_test.md b/docs/en/user_guides/4_train_test.md index 4aa90fc3bb..4eb5db3418 100644 --- a/docs/en/user_guides/4_train_test.md +++ b/docs/en/user_guides/4_train_test.md @@ -12,50 +12,52 @@ Here is the full usage of the script: python tools/train.py ${CONFIG_FILE} [ARGS] ``` +````{note} By default, MMAction2 prefers GPU to CPU. If you want to train a model on CPU, please empty `CUDA_VISIBLE_DEVICES` or set it to -1 to make GPU invisible to the program. ```bash CUDA_VISIBLE_DEVICES=-1 python tools/train.py ${CONFIG_FILE} [ARGS] ``` +```` | ARGS | Description | | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `CONFIG_FILE` | The path to the config file. | | `--work-dir WORK_DIR` | The target folder to save logs and checkpoints. Defaults to a folder with the same name of the config file under `./work_dirs`. | -| `--resume [RESUME]` | Resume training. If specify a path, resume from it, while if not specify, try to auto resume from the latest checkpoint. | +| `--resume [RESUME]` | Resume training. If a path is specified, resume from it, while if not specified, try to auto resume from the latest checkpoint. | | `--amp` | Enable automatic-mixed-precision training. | | `--no-validate` | **Not suggested**. Disable checkpoint evaluation during training. | | `--auto-scale-lr` | Auto scale the learning rate according to the actual batch size and the original batch size. | | `--cfg-options CFG_OPTIONS` | Override some settings in the used config, the key-value pair in xxx=yyy format will be merged into the config file. If the value to be overwritten is a list, it should be of the form of either `key="[a,b]"` or `key=a,b`. The argument also allows nested list/tuple values, e.g. `key="[(a,b),(c,d)]"`. Note that the quotation marks are necessary and that no white space is allowed. | -| `--launcher {none,pytorch,slurm,mpi}` | Options for job launcher. | +| `--launcher {none,pytorch,slurm,mpi}` | Options for job launcher. Defaults to `none`. | ### Training with multiple GPUs We provide a shell script to start a multi-GPUs task with `torch.distributed.launch`. ```shell -bash ./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [PY_ARGS] +bash tools/dist_train.sh ${CONFIG} ${GPUS} [PY_ARGS] ``` -| ARGS | Description | -| ------------- | ---------------------------------------------------------------------------------- | -| `CONFIG_FILE` | The path to the config file. | -| `GPU_NUM` | The number of GPUs to be used. | -| `[PYARGS]` | The other optional arguments of `tools/train.py`, see [here](#train-with-your-pc). | +| ARGS | Description | +| ---------- | ---------------------------------------------------------------------------------- | +| `CONFIG` | The path to the config file. | +| `GPUS` | The number of GPUs to be used. | +| `[PYARGS]` | The other optional arguments of `tools/train.py`, see [here](#train-with-your-pc). | You can also specify extra arguments of the launcher by environment variables. For example, change the -communication port of the launcher to 29666 by the below command: +communication port of the launcher to 29666 by the following command: ```shell -PORT=29666 bash ./tools/dist_train.sh ${CONFIG_FILE} ${GPU_NUM} [PY_ARGS] +PORT=29666 bash tools/dist_train.sh ${CONFIG} ${GPUS} [PY_ARGS] ``` If you want to startup multiple training jobs and use different GPUs, you can launch them by specifying different port and visible devices. ```shell -CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 bash ./tools/dist_train.sh ${CONFIG_FILE1} 4 [PY_ARGS] -CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=29501 bash ./tools/dist_train.sh ${CONFIG_FILE2} 4 [PY_ARGS] +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 bash tools/dist_train.sh ${CONFIG} 4 [PY_ARGS] +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 bash tools/dist_train.sh ${CONFIG} 4 [PY_ARGS] ``` ### Training with multiple machines @@ -76,14 +78,14 @@ On the second machine: NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_train.sh $CONFIG $GPUS ``` -Comparing with multi-GPUs in a single machine, you need to specify some extra environment variables: +The following extra environment variables need to be specified to train or test models with multiple machines: -| ENV_VARS | Description | -| ------------- | ---------------------------------------------------------------------------- | -| `NNODES` | The total number of machines. | -| `NODE_RANK` | The index of the local machine. | -| `PORT` | The communication port, it should be the same in all machines. | -| `MASTER_ADDR` | The IP address of the master machine, it should be the same in all machines. | +| ENV_VARS | Description | +| ------------- | ----------------------------------------------------------------------------------------------------- | +| `NNODES` | The total number of machines. Defaults to 1. | +| `NODE_RANK` | The index of the local machine. Defaults to 0. | +| `PORT` | The communication port, it should be the same in all machines. Defaults to 29500. | +| `MASTER_ADDR` | The IP address of the master machine, it should be the same in all machines. Defaults to `127.0.0.1`. | Usually it is slow if you do not have high speed networking like InfiniBand. @@ -92,25 +94,24 @@ Usually it is slow if you do not have high speed networking like InfiniBand. If you run MMAction2 on a cluster managed with [slurm](https://slurm.schedmd.com/), you can use the script `slurm_train.sh`. ```shell -[ENV_VARS] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} [PY_ARGS] +[ENV_VARS] bash tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG} [PY_ARGS] ``` Here are the arguments description of the script. -| ARGS | Description | -| ------------- | ---------------------------------------------------------------------------------- | -| `PARTITION` | The partition to use in your cluster. | -| `JOB_NAME` | The name of your job, you can name it as you like. | -| `CONFIG_FILE` | The path to the config file. | -| `WORK_DIR` | The target folder to save logs and checkpoints. | -| `[PYARGS]` | The other optional arguments of `tools/train.py`, see [here](#train-with-your-pc). | +| ARGS | Description | +| ----------- | ---------------------------------------------------------------------------------- | +| `PARTITION` | The partition to use in your cluster. | +| `JOB_NAME` | The name of your job, you can name it as you like. | +| `CONFIG` | The path to the config file. | +| `[PYARGS]` | The other optional arguments of `tools/train.py`, see [here](#train-with-your-pc). | Here are the environment variables can be used to configure the slurm job. | ENV_VARS | Description | | --------------- | ---------------------------------------------------------------------------------------------------------- | | `GPUS` | The number of GPUs to be used. Defaults to 8. | -| `GPUS_PER_NODE` | The number of GPUs to be allocated per node.. | +| `GPUS_PER_NODE` | The number of GPUs to be allocated per node. Defaults to 8. | | `CPUS_PER_TASK` | The number of CPUs to be allocated per task (Usually one GPU corresponds to one task). Defaults to 5. | | `SRUN_ARGS` | The other arguments of `srun`. Available options can be found [here](https://slurm.schedmd.com/srun.html). | @@ -126,54 +127,55 @@ Here is the full usage of the script: python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [ARGS] ``` +````{note} By default, MMAction2 prefers GPU to CPU. If you want to test a model on CPU, please empty `CUDA_VISIBLE_DEVICES` or set it to -1 to make GPU invisible to the program. ```bash CUDA_VISIBLE_DEVICES=-1 python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [ARGS] ``` +```` | ARGS | Description | | ------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `CONFIG_FILE` | The path to the config file. | | `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link) | -| `--work-dir WORK_DIR` | The directory to save the file containing evaluation metrics. | -| `--out OUT` | The path to save the file containing evaluation metrics. | +| `--work-dir WORK_DIR` | The directory to save the file containing evaluation metrics. Defaults to a folder with the same name of the config file under `./work_dirs`. | | `--dump DUMP` | The path to dump all outputs of the model for offline evaluation. | | `--cfg-options CFG_OPTIONS` | Override some settings in the used config, the key-value pair in xxx=yyy format will be merged into the config file. If the value to be overwritten is a list, it should be of the form of either `key="[a,b]"` or `key=a,b`. The argument also allows nested list/tuple values, e.g. `key="[(a,b),(c,d)]"`. Note that the quotation marks are necessary and that no white space is allowed. | | `--show-dir SHOW_DIR` | The directory to save the result visualization images. | | `--show` | Visualize the prediction result in a window. | -| `--interval INTERVAL` | The interval of samples to visualize. | -| `--wait-time WAIT_TIME` | The display time of every window (in seconds). Defaults to 1. | -| `--launcher {none,pytorch,slurm,mpi}` | Options for job launcher. | +| `--interval INTERVAL` | The interval of samples to visualize. Defaults to 1. | +| `--wait-time WAIT_TIME` | The display time of every window (in seconds). Defaults to 2. | +| `--launcher {none,pytorch,slurm,mpi}` | Options for job launcher. Defaults to `none`. | ### Test with multiple GPUs We provide a shell script to start a multi-GPUs task with `torch.distributed.launch`. ```shell -bash ./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [PY_ARGS] +bash tools/dist_test.sh ${CONFIG} ${CHECKPOINT} ${GPUS} [PY_ARGS] ``` -| ARGS | Description | -| ----------------- | -------------------------------------------------------------------------------- | -| `CONFIG_FILE` | The path to the config file. | -| `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link) | -| `GPU_NUM` | The number of GPUs to be used. | -| `[PYARGS]` | The other optional arguments of `tools/test.py`, see [here](#test-with-your-pc). | +| ARGS | Description | +| ------------ | -------------------------------------------------------------------------------- | +| `CONFIG` | The path to the config file. | +| `CHECKPOINT` | The path to the checkpoint file (It can be a http link) | +| `GPUS` | The number of GPUs to be used. | +| `[PYARGS]` | The other optional arguments of `tools/test.py`, see [here](#test-with-your-pc). | You can also specify extra arguments of the launcher by environment variables. For example, change the -communication port of the launcher to 29666 by the below command: +communication port of the launcher to 29666 by the following command: ```shell -PORT=29666 bash ./tools/dist_test.sh ${CONFIG_FILE} ${CHECKPOINT_FILE} ${GPU_NUM} [PY_ARGS] +PORT=29666 bash tools/dist_test.sh ${CONFIG} ${CHECKPOINT} ${GPUS} [PY_ARGS] ``` If you want to startup multiple test jobs and use different GPUs, you can launch them by specifying different port and visible devices. ```shell -CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 bash ./tools/dist_test.sh ${CONFIG_FILE1} ${CHECKPOINT_FILE} 4 [PY_ARGS] -CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=29501 bash ./tools/dist_test.sh ${CONFIG_FILE2} ${CHECKPOINT_FILE} 4 [PY_ARGS] +CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 bash tools/dist_test.sh ${CONFIG} ${CHECKPOINT} 4 [PY_ARGS] +CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 bash tools/dist_test.sh ${CONFIG} ${CHECKPOINT} 4 [PY_ARGS] ``` ### Test with multiple machines @@ -185,23 +187,23 @@ If you launch a test job with multiple machines connected with ethernet, you can On the first machine: ```shell -NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_test.sh $CONFIG $CHECKPOINT_FILE $GPUS +NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_test.sh $CONFIG $CHECKPOINT $GPUS ``` On the second machine: ```shell -NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_test.sh $CONFIG $CHECKPOINT_FILE $GPUS +NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR bash tools/dist_test.sh $CONFIG $CHECKPOINT $GPUS ``` -Comparing with multi-GPUs in a single machine, you need to specify some extra environment variables: +Compared with multi-GPUs in a single machine, you need to specify some extra environment variables: -| ENV_VARS | Description | -| ------------- | ---------------------------------------------------------------------------- | -| `NNODES` | The total number of machines. | -| `NODE_RANK` | The index of the local machine. | -| `PORT` | The communication port, it should be the same in all machines. | -| `MASTER_ADDR` | The IP address of the master machine, it should be the same in all machines. | +| ENV_VARS | Description | +| ------------- | ----------------------------------------------------------------------------------------------------- | +| `NNODES` | The total number of machines. Defaults to 1. | +| `NODE_RANK` | The index of the local machine. Defaults to 0. | +| `PORT` | The communication port, it should be the same in all machines. Defaults to 29500. | +| `MASTER_ADDR` | The IP address of the master machine, it should be the same in all machines. Defaults to `127.0.0.1`. | Usually it is slow if you do not have high speed networking like InfiniBand. @@ -210,24 +212,24 @@ Usually it is slow if you do not have high speed networking like InfiniBand. If you run MMAction2 on a cluster managed with [slurm](https://slurm.schedmd.com/), you can use the script `slurm_test.sh`. ```shell -[ENV_VARS] ./tools/slurm_test.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${CHECKPOINT_FILE} [PY_ARGS] +[ENV_VARS] bash tools/slurm_test.sh ${PARTITION} ${JOB_NAME} ${CONFIG} ${CHECKPOINT} [PY_ARGS] ``` Here are the arguments description of the script. -| ARGS | Description | -| ----------------- | -------------------------------------------------------------------------------- | -| `PARTITION` | The partition to use in your cluster. | -| `JOB_NAME` | The name of your job, you can name it as you like. | -| `CONFIG_FILE` | The path to the config file. | -| `CHECKPOINT_FILE` | The path to the checkpoint file (It can be a http link) | -| `[PYARGS]` | The other optional arguments of `tools/test.py`, see [here](#test-with-your-pc). | +| ARGS | Description | +| ------------ | -------------------------------------------------------------------------------- | +| `PARTITION` | The partition to use in your cluster. | +| `JOB_NAME` | The name of your job, you can name it as you like. | +| `CONFIG` | The path to the config file. | +| `CHECKPOINT` | The path to the checkpoint file (It can be a http link) | +| `[PYARGS]` | The other optional arguments of `tools/test.py`, see [here](#test-with-your-pc). | Here are the environment variables can be used to configure the slurm job. | ENV_VARS | Description | | --------------- | ---------------------------------------------------------------------------------------------------------- | | `GPUS` | The number of GPUs to be used. Defaults to 8. | -| `GPUS_PER_NODE` | The number of GPUs to be allocated per node. | +| `GPUS_PER_NODE` | The number of GPUs to be allocated per node. Defaults to 8. | | `CPUS_PER_TASK` | The number of CPUs to be allocated per task (Usually one GPU corresponds to one task). Defaults to 5. | | `SRUN_ARGS` | The other arguments of `srun`. Available options can be found [here](https://slurm.schedmd.com/srun.html). | diff --git a/tools/dist_test.sh b/tools/dist_test.sh index 4e90525c09..9366526022 100755 --- a/tools/dist_test.sh +++ b/tools/dist_test.sh @@ -1,12 +1,14 @@ #!/usr/bin/env bash -NNODES=${NNODES:-1} -NODE_RANK=${NODE_RANK:-0} -MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} +set -x + CONFIG=$1 CHECKPOINT=$2 GPUS=$3 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ # Arguments starting from the forth one are captured by ${@:4} diff --git a/tools/dist_train.sh b/tools/dist_train.sh index 8944199038..941cb0d59a 100755 --- a/tools/dist_train.sh +++ b/tools/dist_train.sh @@ -1,11 +1,13 @@ #!/usr/bin/env bash -NNODES=${NNODES:-1} -NODE_RANK=${NODE_RANK:-0} -MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} +set -x + CONFIG=$1 GPUS=$2 +NNODES=${NNODES:-1} +NODE_RANK=${NODE_RANK:-0} PORT=${PORT:-29500} +MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"} PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \ python -m torch.distributed.launch --nnodes=$NNODES --node_rank=$NODE_RANK --master_addr=$MASTER_ADDR \ diff --git a/tools/test.py b/tools/test.py index cefee76eea..17fac09b2f 100644 --- a/tools/test.py +++ b/tools/test.py @@ -3,9 +3,7 @@ import os import os.path as osp -import mmengine from mmengine.config import Config, DictAction -from mmengine.hooks import Hook from mmengine.runner import Runner from mmaction.utils import register_all_modules @@ -19,7 +17,6 @@ def parse_args(): parser.add_argument( '--work-dir', help='the directory to save the file containing evaluation metrics') - parser.add_argument('--out', help='the file to save metric results.') parser.add_argument( '--dump', type=str, @@ -118,16 +115,6 @@ def main(): # build the runner from config runner = Runner.from_cfg(cfg) - if args.out: - - class SaveMetricHook(Hook): - - def after_test_epoch(self, _, metrics=None): - if metrics is not None: - mmengine.dump(metrics, args.out) - - runner.register_hook(SaveMetricHook(), 'LOWEST') - # start testing runner.test() From 6297e75a0e3e0d5be182b81cd1ba10f7311c8a64 Mon Sep 17 00:00:00 2001 From: wxDai Date: Tue, 15 Nov 2022 13:45:50 +0800 Subject: [PATCH 16/57] [Improvement] Add `SyncBuffersHook` and `Seed` (#2044) --- configs/_base_/default_runtime.py | 3 ++- docs/en/user_guides/4_train_test.md | 3 +++ tools/train.py | 16 ++++++++++++++++ 3 files changed, 21 insertions(+), 1 deletion(-) diff --git a/configs/_base_/default_runtime.py b/configs/_base_/default_runtime.py index 92fb66701a..3a64a0750f 100644 --- a/configs/_base_/default_runtime.py +++ b/configs/_base_/default_runtime.py @@ -6,7 +6,8 @@ logger=dict(type='LoggerHook', interval=20, ignore_last=False), param_scheduler=dict(type='ParamSchedulerHook'), checkpoint=dict(type='CheckpointHook', interval=1, save_best='auto'), - sampler_seed=dict(type='DistSamplerSeedHook')) + sampler_seed=dict(type='DistSamplerSeedHook'), + sync_buffers=dict(type='SyncBuffersHook')) env_cfg = dict( cudnn_benchmark=False, diff --git a/docs/en/user_guides/4_train_test.md b/docs/en/user_guides/4_train_test.md index 4eb5db3418..a67448fde3 100644 --- a/docs/en/user_guides/4_train_test.md +++ b/docs/en/user_guides/4_train_test.md @@ -28,6 +28,9 @@ CUDA_VISIBLE_DEVICES=-1 python tools/train.py ${CONFIG_FILE} [ARGS] | `--amp` | Enable automatic-mixed-precision training. | | `--no-validate` | **Not suggested**. Disable checkpoint evaluation during training. | | `--auto-scale-lr` | Auto scale the learning rate according to the actual batch size and the original batch size. | +| `--seed` | Random seed. | +| `--diff-rank-seed` | Whether or not set different seeds for different ranks. | +| `--deterministic` | Whether to set deterministic options for CUDNN backend. | | `--cfg-options CFG_OPTIONS` | Override some settings in the used config, the key-value pair in xxx=yyy format will be merged into the config file. If the value to be overwritten is a list, it should be of the form of either `key="[a,b]"` or `key=a,b`. The argument also allows nested list/tuple values, e.g. `key="[(a,b),(c,d)]"`. Note that the quotation marks are necessary and that no white space is allowed. | | `--launcher {none,pytorch,slurm,mpi}` | Options for job launcher. Defaults to `none`. | diff --git a/tools/train.py b/tools/train.py index ac45698412..e424a7a634 100644 --- a/tools/train.py +++ b/tools/train.py @@ -34,6 +34,15 @@ def parse_args(): action='store_true', help='whether to auto scale the learning rate according to the ' 'actual batch size and the original batch size.') + parser.add_argument('--seed', type=int, default=None, help='random seed') + parser.add_argument( + '--diff-rank-seed', + action='store_true', + help='whether or not set different seeds for different ranks') + parser.add_argument( + '--deterministic', + action='store_true', + help='whether to set deterministic options for CUDNN backend.') parser.add_argument( '--cfg-options', nargs='+', @@ -96,6 +105,13 @@ def merge_args(cfg, args): if args.auto_scale_lr: cfg.auto_scale_lr.enable = True + # set random seeds + if cfg.get('randomness', None) is None: + cfg.randomness = dict( + seed=args.seed, + diff_rank_seed=args.diff_rank_seed, + deterministic=args.deterministic) + if args.cfg_options is not None: cfg.merge_from_dict(args.cfg_options) From f7aae9d9c18088b1ab04324d7a355de3c2c7baa0 Mon Sep 17 00:00:00 2001 From: wxDai Date: Wed, 16 Nov 2022 11:15:57 +0800 Subject: [PATCH 17/57] [Docs] Refactor `1_config.md` (#2040) --- docs/en/user_guides/1_config.md | 523 +++++++++++++++++--------------- 1 file changed, 276 insertions(+), 247 deletions(-) diff --git a/docs/en/user_guides/1_config.md b/docs/en/user_guides/1_config.md index bd47c3c882..308ec70f17 100644 --- a/docs/en/user_guides/1_config.md +++ b/docs/en/user_guides/1_config.md @@ -9,9 +9,9 @@ you may run `python tools/analysis_tools/print_config.py /PATH/TO/CONFIG` to see - [Modify config through script arguments](#modify-config-through-script-arguments) - [Config File Structure](#config-file-structure) - [Config File Naming Convention](#config-file-naming-convention) - - [Config System for Action localization](#config-system-for-action-localization) - [Config System for Action Recognition](#config-system-for-action-recognition) - [Config System for Spatio-Temporal Action Detection](#config-system-for-spatio-temporal-action-detection) + - [Config System for Action localization](#config-system-for-action-localization) @@ -26,30 +26,29 @@ When submitting jobs using `tools/train.py` or `tools/test.py`, you may specify - Update keys inside a list of configs. - Some config dicts are composed as a list in your config. For example, the training pipeline `data.train.pipeline` is normally a list + Some config dicts are composed as a list in your config. For example, the training pipeline `train_pipeline` is normally a list e.g. `[dict(type='SampleFrames'), ...]`. If you want to change `'SampleFrames'` to `'DenseSampleFrames'` in the pipeline, you may specify `--cfg-options train_pipeline.0.type=DenseSampleFrames`. - Update values of list/tuples. If the value to be updated is a list or a tuple. For example, the config file normally sets `model.data_preprocessor.mean=[123.675, 116.28, 103.53]`. If you want to - change this key, you may specify `--cfg-options model.data_preprocessor.mean="[128,128,128]"`. Note that the quotation mark " is necessary to - support list/tuple data types. + change this key, you may specify `--cfg-options model.data_preprocessor.mean="[128,128,128]"`. Note that the quotation mark " is necessary to support list/tuple data types. ## Config File Structure -There are 3 basic component types under `config/_base_`, models, schedules, default_runtime. +There are 3 basic component types under `configs/_base_`, models, schedules, default_runtime. Many methods could be easily constructed with one of each like TSN, I3D, SlowOnly, etc. The configs that are composed by components from `_base_` are called _primitive_. For all configs under the same folder, it is recommended to have only **one** _primitive_ config. All other configs should inherit from the _primitive_ config. In this way, the maximum of inheritance level is 3. For easy understanding, we recommend contributors to inherit from exiting methods. -For example, if some modification is made base on TSN, users may first inherit the basic TSN structure by specifying `_base_ = ../tsn/tsn_r50_8xb32-1x1x3-100e_kinetics400-rgb.py`, then modify the necessary fields in the config files. +For example, if some modification is made based on TSN, users may first inherit the basic TSN structure by specifying `_base_ = ../tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py`, then modify the necessary fields in the config files. If you are building an entirely new method that does not share the structure with any of the existing methods, you may create a folder under `configs/TASK`. -Please refer to [mmcv](https://mmcv.readthedocs.io/en/latest/understand_mmcv/config.html) for detailed documentation. +Please refer to [mmengine](https://mmengine.readthedocs.io/en/latest/tutorials/config.html) for detailed documentation. ## Config File Naming Convention @@ -62,18 +61,19 @@ We follow the style below to name config files. Contributors are advised to foll `{xxx}` is required field and `[yyy]` is optional. - `{algorithm info}`: - - `{model}`: model type, e.g. `tsn`, `i3d`, etc. - - `[model setting]`: specific setting for some models. + - `{model}`: model type, e.g. `tsn`, `i3d`, `swin`, `vit`, etc. + - `[model setting]`: specific setting for some models, e.g. `base`, `p16`, `w877`, etc. - `{module info}`: - - `[pretained info]`: pretrained information, e.g. `kinetics400-pretrained`, etc. - - `{backbone}`: backbone type and pretrained information, e.g. `r50` (ResNet-50), etc. -- `training info`: + - `[pretained info]`: pretrained information, e.g. `kinetics400-pretrained`, `in1k-pre`, etc. + - `{backbone}`: backbone type. e.g. `r50` (ResNet-50), etc. + - `[backbone setting]`: specific setting for some backbones, e.g. `nl-dot-product`, `bnfrozen`, `nopool`, etc. +- `{training info}`: - `{gpu x batch_per_gpu]}`: GPUs and samples per GPU. - - `{pipeline setting}`: frame sample setting in `{clip_len}x{frame_interval}x{num_clips}` format. - - `{schedule}`: training schedule, e.g. `20e` means 20 epochs. -- `data info`: + - `{pipeline setting}`: frame sample setting, e.g. `dense`, `{clip_len}x{frame_interval}x{num_clips}`, `u48`, etc. + - `{schedule}`: training schedule, e.g. `coslr-20e`. +- `{data info}`: - `{dataset}`: dataset name, e.g. `kinetics400`, `mmit`, etc. - - `{modality}`: frame modality, e.g. `rgb`, `flow`, etc. + - `{modality}`: data modality, e.g. `rgb`, `flow`, `keypoint-2d`, etc. ### Config System for Action Recognition @@ -89,7 +89,7 @@ which is convenient to conduct various experiments. ```python # model settings model = dict( # Config of the model - type='Recognizer2D', # Type of the recognizer + type='Recognizer2D', # Class name of the recognizer backbone=dict( # Dict for backbone type='ResNet', # Name of the backbone pretrained='torchvision://resnet50', # The url/site of the pretrained model @@ -103,15 +103,15 @@ which is convenient to conduct various experiments. consensus=dict(type='AvgConsensus', dim=1), # Config of consensus module dropout_ratio=0.4, # Probability in dropout layer init_std=0.01, # Std value for linear layer initiation - average_clips=None), + average_clips='prob'), # Method to average multiple clip results data_preprocessor=dict( # Dict for data preprocessor type='ActionDataPreprocessor', # Name of data preprocessor mean=[123.675, 116.28, 103.53], # Mean values of different channels to normalize std=[58.395, 57.12, 57.375], # Std values of different channels to normalize format_shape='NCHW'), # Final image shape format - # model training and testing settings - train_cfg=None, # Config of training hyperparameters for TSN - test_cfg=None) # Config for testing hyperparameters for TSN. + # model training and testing settings + train_cfg=None, # Config of training hyperparameters for TSN + test_cfg=None) # Config for testing hyperparameters for TSN. # dataset settings dataset_type = 'RawframeDataset' # Type of dataset for training, validation and testing @@ -121,7 +121,7 @@ which is convenient to conduct various experiments. ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' # Path to the annotation file for validation ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' # Path to the annotation file for testing - train_pipeline = [ # List of training pipeline steps + train_pipeline = [ # Training data processing pipeline dict( # Config of SampleFrames type='SampleFrames', # Sample frames pipeline, sampling frames from video clip_len=1, # Frames of each sampled output clip @@ -148,10 +148,9 @@ which is convenient to conduct various experiments. dict( # Config of FormatShape type='FormatShape', # Format shape pipeline, Format final image shape to the given input_format input_format='NCHW'), # Final image shape format - dict( # Config of PackActionInputs - type='PackActionInputs') # Pack input data + dict(type='PackActionInputs') # Config of PackActionInputs ] - val_pipeline = [ # List of validation pipeline steps + val_pipeline = [ # Validation data processing pipeline dict( # Config of SampleFrames type='SampleFrames', # Sample frames pipeline, sampling frames from video clip_len=1, # Frames of each sampled output clip @@ -169,16 +168,12 @@ which is convenient to conduct various experiments. dict( # Config of Flip type='Flip', # Flip pipeline flip_ratio=0), # Probability of implementing flip - dict( # Config of Normalize - type='Normalize', # Normalize pipeline - **img_norm_cfg), # Config of image normalization dict( # Config of FormatShape type='FormatShape', # Format shape pipeline, Format final image shape to the given input_format input_format='NCHW'), # Final image shape format - dict( # Config of PackActionInputs - type='PackActionInputs') # Pack input data + dict(type='PackActionInputs') # Config of PackActionInputs ] - test_pipeline = [ # List of testing pipeline steps + test_pipeline = [ # Testing data processing pipeline dict( # Config of SampleFrames type='SampleFrames', # Sample frames pipeline, sampling frames from video clip_len=1, # Frames of each sampled output clip @@ -196,112 +191,118 @@ which is convenient to conduct various experiments. dict( # Config of Flip type='Flip', # Flip pipeline flip_ratio=0), # Probability of implementing flip - dict( # Config of Normalize - type='Normalize', # Normalize pipeline - **img_norm_cfg), # Config of image normalization dict( # Config of FormatShape type='FormatShape', # Format shape pipeline, Format final image shape to the given input_format input_format='NCHW'), # Final image shape format - dict( # Config of PackActionInputs - type='PackActionInputs') # Pack input data + dict(type='PackActionInputs') # Config of PackActionInputs ] train_dataloader = dict( # Config of train dataloader batch_size=32, # Batch size of each single GPU during training num_workers=8, # Workers to pre-fetch data for each single GPU during training - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - ann_file=ann_file_train, - data_prefix=dict(video=data_root), - pipeline=train_pipeline)) + persistent_workers=True, # If `True`, the dataloader will not shut down the worker processes after an epoch end, which can accelerate training speed + sampler=dict( + type='DefaultSampler', # DefaultSampler which supports both distributed and non-distributed training. Refer to https://github.com/open-mmlab/mmengine/blob/main/mmengine/dataset/sampler.py + shuffle=True), # Randomly shuffle the training data in each epoch + dataset=dict( # Config of train dataset + type=dataset_type, + ann_file=ann_file_train, # Path of annotation file + data_prefix=dict(img=data_root), # Prefix of frame path + pipeline=train_pipeline)) val_dataloader = dict( # Config of validation dataloader - batch_size=1, # Batch size of each single GPU during evaluation - num_workers=8, # Workers to pre-fetch data for each single GPU during evaluation - persistent_workers=True, # Maintain the workers `Dataset` instances alive - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( + batch_size=1, # Batch size of each single GPU during validation + num_workers=8, # Workers to pre-fetch data for each single GPU during validation + persistent_workers=True, # If `True`, the dataloader will not shut down the worker processes after an epoch end + sampler=dict( + type='DefaultSampler', + shuffle=False), # Not shuffle during validation and testing + dataset=dict( # Config of validation dataset type=dataset_type, - ann_file=ann_file_val, - data_prefix=dict(video=data_root_val), + ann_file=ann_file_val, # Path of annotation file + data_prefix=dict(img=data_root_val), # Prefix of frame path pipeline=val_pipeline, test_mode=True)) test_dataloader = dict( # Config of test dataloader batch_size=32, # Batch size of each single GPU during testing num_workers=8, # Workers to pre-fetch data for each single GPU during testing - persistent_workers=True, # Maintain the workers `Dataset` instances alive - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( + persistent_workers=True, # If `True`, the dataloader will not shut down the worker processes after an epoch end + sampler=dict( + type='DefaultSampler', + shuffle=False), # Not shuffle during validation and testing + dataset=dict( # Config of test dataset type=dataset_type, - ann_file=ann_file_val, - data_prefix=dict(video=data_root_val), + ann_file=ann_file_val, # Path of annotation file + data_prefix=dict(img=data_root_val), # Prefix of frame path pipeline=test_pipeline, test_mode=True)) - val_evaluator = dict(type='AccMetric') # The evaluator object used for computing metrics for validation - test_evaluator = dict(type='AccMetric') # The evaluator object used for computing metrics for test steps + + # evaluation settings + val_evaluator = dict(type='AccMetric') # Config of validation evaluator + test_evaluator = val_evaluator # Config of testing evaluator train_cfg = dict( # Config of training loop - type='EpochBasedTrainLoop', # name of training loop - max_epochs=100, # Total training epochs - val_begin=1, # The epoch that begins validating - val_interval=1) # Validation interval - val_cfg = dict( # Config of validating loop - type='ValLoop') # name of validating loop + type='EpochBasedTrainLoop', # Name of training loop + max_epochs=100, # Total training epochs + val_begin=1, # The epoch that begins validating + val_interval=1) # Validation interval + val_cfg = dict( # Config of validation loop + type='ValLoop') # Name of validation loop test_cfg = dict( # Config of testing loop - type='TestLoop') # name of testing loop + type='TestLoop') # Name of testing loop + # learning policy - param_scheduler = [dict( # Parameter scheduler for updating optimizer parameters, support dict or list - type='MultiStepLR', # Decays the parameter once the number of epoch reach milestone - begin=0, # Step at which to start updating the parameters - end=100, # Step at which to stop updating the parameters - by_epoch=True, # Whether the scheduled parameters are updated by epochs - milestones=[40, 80], # Steps to decay the learning rate - gamma=0.1) # Multiplicative factor of parameter value decay - ] + param_scheduler = [ # Parameter scheduler for updating optimizer parameters, support dict or list + dict(type='MultiStepLR', # Decays the learning rate once the number of epoch reaches one of the milestones + begin=0, # Step at which to start updating the learning rate + end=100, # Step at which to stop updating the learning rate + by_epoch=True, # Whether the scheduled learning rate is updated by epochs + milestones=[40, 80], # Steps to decay the learning rate + gamma=0.1)] # Multiplicative factor of learning rate decay + # optimizer - optim_wrapper = dict( # Common interface for updating parameters - optimizer=dict( # Optimizer used to update model parameters - type='SGD', # Type of optimizer - lr=0.01, # learning rate - momentum=0.9, # momentum factor - weight_decay=0.0001), # Weight decay of SGD - clip_grad=dict(max_norm=40, norm_type=2)) # Use gradient clip + optim_wrapper = dict( # Config of optimizer wrapper + type='OptimWrapper', # Name of optimizer wrapper, switch to AmpOptimWrapper to enable mixed precision training + optimizer=dict( # Config of optimizer. Support all kinds of optimizers in PyTorch. Refer to https://pytorch.org/docs/stable/optim.html#algorithms + type='SGD', # Name of optimizer + lr=0.01, # Learning rate + momentum=0.9, # Momentum factor + weight_decay=0.0001), # Weight decay + clip_grad=dict(max_norm=40, norm_type=2)) # Config of gradient clip # runtime settings - default_scope = 'mmaction' # Scope of current task used to reset the current registry - default_hooks = dict( # Hooks to execute default actions like updating model parameters and saving checkpoints. + default_scope = 'mmaction' # The default registry scope to find modules. Refer to https://mmengine.readthedocs.io/en/latest/tutorials/registry.html + default_hooks = dict( # Hooks to execute default actions like updating model parameters and saving checkpoints. runtime_info=dict(type='RuntimeInfoHook'), # The hook to updates runtime information into message hub timer=dict(type='IterTimerHook'), # The logger used to record time spent during iteration logger=dict( - type='LoggerHook', # The logger used to record the training/validation/testing phase - interval=20, # Interval to print the log - ignore_last=False), # Ignore the log of last iterations in each epoch + type='LoggerHook', # The logger used to record logs during training/validation/testing phase + interval=20, # Interval to print the log + ignore_last=False), # Ignore the log of last iterations in each epoch param_scheduler=dict(type='ParamSchedulerHook'), # The hook to update some hyper-parameters in optimizer checkpoint=dict( - type='CheckpointHook', # The hook to save checkpoints periodically - interval=3, # The saving period - save_best='auto', # Specified metric to mearsure the best checkpoint during evaluation - max_keep_ckpts=3), # The maximum checkpoints to keep - sampler_seed=dict(type='DistSamplerSeedHook')) # Data-loading sampler for distributed training - env_cfg = dict( # Dict for setting environment - cudnn_benchmark=False, + type='CheckpointHook', # The hook to save checkpoints periodically + interval=3, # The saving period + save_best='auto', # Specified metric to mearsure the best checkpoint during evaluation + max_keep_ckpts=3), # The maximum checkpoints to keep + sampler_seed=dict(type='DistSamplerSeedHook'), # Data-loading sampler for distributed training + sync_buffers=dict(type='SyncBuffersHook')) # Synchronize model buffers at the end of each epoch + env_cfg = dict( # Dict for setting environment + cudnn_benchmark=False, # Whether to enable cudnn benchmark mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), # Parameters to setup multiprocessing - dist_cfg=dict(backend='nccl')) # Parameters to setup distributed training, the port can also be set + dist_cfg=dict(backend='nccl')) # Parameters to setup distributed environment, the port can also be set log_processor = dict( - type='LogProcessor', # Log processor used to format log information - window_size=20, # Default smooth interval - by_epoch=True) # Whether to format logs with epoch stype - vis_backends = [ # Visual backend config list - dict(type='LocalVisBackend')] # Local visualization backend - visualizer = dict( - type='ActionVisualizer', # Universal Visualizer for classification task - vis_backends=[dict(type='LocalVisBackend')]) # Local visualization backend + type='LogProcessor', # Log processor used to format log information + window_size=20, # Default smooth interval + by_epoch=True) # Whether to format logs with epoch type + vis_backends = [ # List of visualization backends + dict(type='LocalVisBackend')] # Local visualization backend + visualizer = dict( # Config of visualizer + type='ActionVisualizer', # Name of visualizer + vis_backends=vis_backends) log_level = 'INFO' # The level of logging - resume = False # Resume from a checkpoint - load_from = None # load checkpoint as a pre-trained model from a given path. If resume == True, resume training from the checkpoint, otherwise load checkpoint without resuming - work_dir = './work_dirs/tsn_r50_8xb32-1x1x3-100e_kinetics400-rgb/' # Directory to save the model checkpoints and logs for the current experiments + load_from = None # Load model checkpoint as a pre-trained model from a given path. This will not resume training. + resume = False # Whether to resume from the checkpoint defined in `load_from`. If `load_from` is None, it will resume the latest checkpoint in the `work_dir`. ``` ### Config System for Spatio-Temporal Action Detection @@ -317,7 +318,8 @@ We incorporate modular design into our config system, which is convenient to con ```python # model setting model = dict( # Config of the model - type='FastRCNN', # Type of the detector + type='FastRCNN', # Class name of the detector + _scope_='mmdet', # The scope of current config backbone=dict( # Dict for backbone type='ResNet3dSlowOnly', # Name of the backbone depth=50, # Depth of ResNet model @@ -361,11 +363,8 @@ We incorporate modular design into our config system, which is convenient to con pos_fraction=1, # Positive bbox fraction of the sampler neg_pos_ub=-1, # Upper bound of the ratio of num negative to num positive add_gt_as_proposals=True), # Add gt bboxes as proposals - pos_weight=1.0, # Loss weight of positive examples - debug=False)), # Debug mode - test_cfg=dict( # Testing config of FastRCNN - rcnn=dict( # Dict for rcnn testing config - action_thr=0.002))) # The threshold of an action + pos_weight=1.0)), # Loss weight of positive examples + test_cfg=dict(rcnn=None)) # Testing config of FastRCNN # dataset settings dataset_type = 'AVADataset' # Type of dataset for training, validation and testing @@ -383,8 +382,7 @@ We incorporate modular design into our config system, which is convenient to con proposal_file_train = f'{anno_root}/ava_dense_proposals_train.FAIR.recall_93.9.pkl' # Path to the human detection proposals for training examples proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl' # Path to the human detection proposals for validation examples - # - train_pipeline = [ # List of training pipeline steps + train_pipeline = [ # Training data processing pipeline dict( # Config of SampleFrames type='AVASampleFrames', # Sample frames pipeline, sampling frames from video clip_len=4, # Frames of each sampled output clip @@ -407,11 +405,11 @@ We incorporate modular design into our config system, which is convenient to con dict(type='PackActionInputs') # Pack input data ] - val_pipeline = [ # List of validation pipeline steps + val_pipeline = [ # Validation data processing pipeline dict( # Config of SampleFrames type='AVASampleFrames', # Sample frames pipeline, sampling frames from video clip_len=4, # Frames of each sampled output clip - frame_interval=16) # Temporal interval of adjacent sampled frames + frame_interval=16), # Temporal interval of adjacent sampled frames dict( # Config of RawFrameDecode type='RawFrameDecode'), # Load and decode Frames pipeline, picking raw frames with given indices dict( # Config of Resize @@ -427,91 +425,114 @@ We incorporate modular design into our config system, which is convenient to con train_dataloader = dict( # Config of train dataloader batch_size=32, # Batch size of each single GPU during training num_workers=8, # Workers to pre-fetch data for each single GPU during training - persistent_workers=True, # Maintain the workers `Dataset` instances alive - sampler=dict(type='DefaultSampler', shuffle=True), # Shuffle the dataset - dataset=dict( - type=dataset_type, - ann_file=ann_file_train, - data_prefix=dict(video=data_root), - pipeline=train_pipeline)) + persistent_workers=True, # If `True`, the dataloader will not shut down the worker processes after an epoch end, which can accelerate training speed + sampler=dict( + type='DefaultSampler', # DefaultSampler which supports both distributed and non-distributed training. Refer to https://github.com/open-mmlab/mmengine/blob/main/mmengine/dataset/sampler.py + shuffle=True), # Randomly shuffle the training data in each epoch + dataset=dict( # Config of train dataset + type=dataset_type, + ann_file=ann_file_train, # Path of annotation file + exclude_file=exclude_file_train, # Path of exclude annotation file + label_file=label_file, # Path of label file + data_prefix=dict(img=data_root), # Prefix of frame path + proposal_file=proposal_file_train, # Path of human detection proposals + pipeline=train_pipeline)) val_dataloader = dict( # Config of validation dataloader batch_size=1, # Batch size of each single GPU during evaluation num_workers=8, # Workers to pre-fetch data for each single GPU during evaluation - persistent_workers=True, # Maintain the workers `Dataset` instances alive - sampler=dict(type='DefaultSampler', shuffle=False), # Dont Shuffle the dataset - dataset=dict( + persistent_workers=True, # If `True`, the dataloader will not shut down the worker processes after an epoch end + sampler=dict( + type='DefaultSampler', + shuffle=False), # Not shuffle during validation and testing + dataset=dict( # Config of validation dataset type=dataset_type, - ann_file=ann_file_val, - data_prefix=dict(video=data_root_val), + ann_file=ann_file_val, # Path of annotation file + exclude_file=exclude_file_val, # Path of exclude annotation file + label_file=label_file, # Path of label file + data_prefix=dict(img=data_root_val), # Prefix of frame path + proposal_file=proposal_file_val, # Path of human detection proposals pipeline=val_pipeline, test_mode=True)) - test_dataloader = val_dataloader # Set test_dataloader as val_dataloader + test_dataloader = val_dataloader # Config of testing dataloader + # evaluation settings - val_evaluator = dict( - type='AVAMetric', # The evaluator object used for computing metrics for validation - ann_file=ann_file_val, - label_file=label_file, - exclude_file=exclude_file_val) - test_evaluator = val_evaluator # Set test_evaluator as val_evaluator + val_evaluator = dict( # Config of validation evaluator + type='AVAMetric', + ann_file=ann_file_val, + label_file=label_file, + exclude_file=exclude_file_val) + test_evaluator = val_evaluator # Config of testing evaluator + + train_cfg = dict( # Config of training loop + type='EpochBasedTrainLoop', # Name of training loop + max_epochs=20, # Total training epochs + val_begin=1, # The epoch that begins validating + val_interval=1) # Validation interval + val_cfg = dict( # Config of validation loop + type='ValLoop') # Name of validation loop + test_cfg = dict( # Config of testing loop + type='TestLoop') # Name of testing loop + # learning policy param_scheduler = [ # Parameter scheduler for updating optimizer parameters, support dict or list dict(type='LinearLR', # Decays the learning rate of each parameter group by linearly changing small multiplicative factor - start_factor=0.1, # The number we multiply parameter value in the first epoch - by_epoch=True, # Whether the scheduled parameters are updated by epochs - begin=0, # Step at which to start updating the parameters - end=5), # Step at which to stop updating the parameters - dict(type='MultiStepLR', # Decays the parameter once the number of epoch reach milestone - begin=0, # Step at which to start updating the parameters - end=20, # Step at which to stop updating the parameters - by_epoch=True, # Whether the scheduled parameters are updated by epochs + start_factor=0.1, # The number we multiply learning rate in the first epoch + by_epoch=True, # Whether the scheduled learning rate is updated by epochs + begin=0, # Step at which to start updating the learning rate + end=5), # Step at which to stop updating the learning rate + dict(type='MultiStepLR', # Decays the learning rate once the number of epoch reaches one of the milestones + begin=0, # Step at which to start updating the learning rate + end=20, # Step at which to stop updating the learning rate + by_epoch=True, # Whether the scheduled learning rate is updated by epochs milestones=[10, 15], # Steps to decay the learning rate - gamma=0.1)] # Multiplicative factor of parameter value decay + gamma=0.1)] # Multiplicative factor of learning rate decay + # optimizer - optim_wrapper = dict( # Common interface for updating parameters - optimizer=dict( # Optimizer used to update model parameters - type='SGD', # Type of optimizer - lr=0.2, # learning rate - momentum=0.9, # momentum factor - weight_decay=0.0001), # Weight decay of SGD - clip_grad=dict(max_norm=40, norm_type=2)) # Use gradient clip + optim_wrapper = dict( # Config of optimizer wrapper + type='OptimWrapper', # Name of optimizer wrapper, switch to AmpOptimWrapper to enable mixed precision training + optimizer=dict( # Config of optimizer. Support all kinds of optimizers in PyTorch. Refer to https://pytorch.org/docs/stable/optim.html#algorithms + type='SGD', # Name of optimizer + lr=0.2, # Learning rate + momentum=0.9, # Momentum factor + weight_decay=0.0001), # Weight decay + clip_grad=dict(max_norm=40, norm_type=2)) # Config of gradient clip # runtime settings - default_scope = 'mmaction' # Scope of current task used to reset the current registry - default_hooks = dict( # Hooks to execute default actions like updating model parameters and saving checkpoints. + default_scope = 'mmaction' # The default registry scope to find modules. Refer to https://mmengine.readthedocs.io/en/latest/tutorials/registry.html + default_hooks = dict( # Hooks to execute default actions like updating model parameters and saving checkpoints. runtime_info=dict(type='RuntimeInfoHook'), # The hook to updates runtime information into message hub timer=dict(type='IterTimerHook'), # The logger used to record time spent during iteration logger=dict( - type='LoggerHook', # The logger used to record the training/validation/testing phase - interval=20, # Interval to print the log - ignore_last=False), # Ignore the log of last iterations in each epoch + type='LoggerHook', # The logger used to record logs during training/validation/testing phase + interval=20, # Interval to print the log + ignore_last=False), # Ignore the log of last iterations in each epoch param_scheduler=dict(type='ParamSchedulerHook'), # The hook to update some hyper-parameters in optimizer checkpoint=dict( - type='CheckpointHook', # The hook to save checkpoints periodically - interval=3, # The saving period - save_best='auto', # Specified metric to mearsure the best checkpoint during evaluation - max_keep_ckpts=3), # The maximum checkpoints to keep - sampler_seed=dict(type='DistSamplerSeedHook')) # Data-loading sampler for distributed training - env_cfg = dict( # Dict for setting environment - cudnn_benchmark=False, + type='CheckpointHook', # The hook to save checkpoints periodically + interval=3, # The saving period + save_best='auto', # Specified metric to mearsure the best checkpoint during evaluation + max_keep_ckpts=3), # The maximum checkpoints to keep + sampler_seed=dict(type='DistSamplerSeedHook'), # Data-loading sampler for distributed training + sync_buffers=dict(type='SyncBuffersHook')) # Synchronize model buffers at the end of each epoch + env_cfg = dict( # Dict for setting environment + cudnn_benchmark=False, # Whether to enable cudnn benchmark mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), # Parameters to setup multiprocessing - dist_cfg=dict(backend='nccl')) # Parameters to setup distributed training, the port can also be set + dist_cfg=dict(backend='nccl')) # Parameters to setup distributed environment, the port can also be set + log_processor = dict( - type='LogProcessor', # Log processor used to format log information - window_size=20, # Default smooth interval - by_epoch=True) # Whether to format logs with epoch stype - vis_backends = [ # Visual backend config list - dict(type='LocalVisBackend')] # Local visualization backend - visualizer = dict( - type='ActionVisualizer', # Universal Visualizer for classification task - vis_backends=[dict(type='LocalVisBackend')]) # Local visualization backend + type='LogProcessor', # Log processor used to format log information + window_size=20, # Default smooth interval + by_epoch=True) # Whether to format logs with epoch type + vis_backends = [ # List of visualization backends + dict(type='LocalVisBackend')] # Local visualization backend + visualizer = dict( # Config of visualizer + type='ActionVisualizer', # Name of visualizer + vis_backends=vis_backends) log_level = 'INFO' # The level of logging - load_from = ('https://download.openmmlab.com/mmaction/recognition/slowonly/' # load models as a pre-trained model from a given path. This will not resume training - 'slowonly_r50_4x16x1_256e_kinetics400_rgb/' - 'slowonly_r50_4x16x1_256e_kinetics400_rgb_20200704-a69556c6.pth') - resume = False # Resume from a checkpoint - load_from = None # load checkpoint as a pre-trained model from a given path. If resume == True, resume training from the checkpoint, otherwise load checkpoint without resuming - work_dir = ('./work_dirs/ava/' # Directory to save the model checkpoints and logs for the current experiments - 'slowonly_kinetics_pretrained_r50_4x16x1_20e_ava_rgb') + load_from = ('https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/' + 'slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb/' + 'slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb_20220901-e7b65fad.pth') # Load model checkpoint as a pre-trained model from a given path. This will not resume training. + resume = False # Whether to resume from the checkpoint defined in `load_from`. If `load_from` is None, it will resume the latest checkpoint in the `work_dir`. ``` ### Config System for Action localization @@ -528,7 +549,7 @@ which is convenient to conduct various experiments. ```python # model settings model = dict( # Config of the model - type='BMN', # Type of the localizer + type='BMN', # Class name of the localizer temporal_dim=100, # Total frames selected for each video boundary_ratio=0.5, # Ratio for determining video boundaries num_samples=32, # Number of samples for each proposal @@ -547,14 +568,14 @@ which is convenient to conduct various experiments. ann_file_val = 'data/ActivityNet/anet_anno_val.json' # Path to the annotation file for validation ann_file_test = 'data/ActivityNet/anet_anno_test.json' # Path to the annotation file for testing - train_pipeline = [ # List of training pipeline steps + train_pipeline = [ # Training data processing pipeline dict(type='LoadLocalizationFeature'), # Load localization feature pipeline dict(type='GenerateLocalizationLabels'), # Generate localization labels pipeline dict( type='PackLocalizationInputs', # Pack localization data keys=('gt_bbox'), # Keys of input meta_keys=('video_name'))] # Meta keys of input - val_pipeline = [ # List of validation pipeline steps + val_pipeline = [ # Validation data processing pipeline dict(type='LoadLocalizationFeature'), # Load localization feature pipeline dict(type='GenerateLocalizationLabels'), # Generate localization labels pipeline dict( @@ -562,7 +583,7 @@ which is convenient to conduct various experiments. keys=('gt_bbox'), # Keys of input meta_keys=('video_name', 'duration_second', 'duration_frame', 'annotations', 'feature_frame'))] # Meta keys of input - test_pipeline = [ # List of testing pipeline steps + test_pipeline = [ # Testing data processing pipeline dict(type='LoadLocalizationFeature'), # Load localization feature pipeline dict( type='PackLocalizationInputs', # Pack localization data @@ -572,105 +593,113 @@ which is convenient to conduct various experiments. train_dataloader = dict( # Config of train dataloader batch_size=8, # Batch size of each single GPU during training num_workers=8, # Workers to pre-fetch data for each single GPU during training - persistent_workers=True, # Maintain the workers `Dataset` instances alive - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, - ann_file=ann_file_train, - data_prefix=dict(video=data_root), - pipeline=train_pipeline)) + persistent_workers=True, # If `True`, the dataloader will not shut down the worker processes after an epoch end, which can accelerate training speed + sampler=dict( + type='DefaultSampler', # DefaultSampler which supports both distributed and non-distributed training. Refer to https://github.com/open-mmlab/mmengine/blob/main/mmengine/dataset/sampler.py + shuffle=True), # Randomly shuffle the training data in each epoch + dataset=dict( # Config of train dataset + type=dataset_type, + ann_file=ann_file_train, # Path of annotation file + data_prefix=dict(video=data_root), # Prefix of video path + pipeline=train_pipeline)) val_dataloader = dict( # Config of validation dataloader batch_size=1, # Batch size of each single GPU during evaluation num_workers=8, # Workers to pre-fetch data for each single GPU during evaluation - persistent_workers=True, # Maintain the workers `Dataset` instances alive - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( + persistent_workers=True, # If `True`, the dataloader will not shut down the worker processes after an epoch end + sampler=dict( + type='DefaultSampler', + shuffle=False), # Not shuffle during validation and testing + dataset=dict( # Config of validation dataset type=dataset_type, - ann_file=ann_file_val, - data_prefix=dict(video=data_root_val), + ann_file=ann_file_val, # Path of annotation file + data_prefix=dict(video=data_root_val), # Prefix of video path pipeline=val_pipeline, test_mode=True)) test_dataloader = dict( # Config of test dataloader batch_size=1, # Batch size of each single GPU during testing num_workers=8, # Workers to pre-fetch data for each single GPU during testing - persistent_workers=True, # Maintain the workers `Dataset` instances alive - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( + persistent_workers=True, # If `True`, the dataloader will not shut down the worker processes after an epoch end + sampler=dict( + type='DefaultSampler', + shuffle=False), # Not shuffle during validation and testing + dataset=dict( # Config of test dataset type=dataset_type, - ann_file=ann_file_val, - data_prefix=dict(video=data_root_val), + ann_file=ann_file_val, # Path of annotation file + data_prefix=dict(video=data_root_val), # Prefix of video path pipeline=test_pipeline, test_mode=True)) - # evaluator settings + + # evaluation settings + work_dir = './work_dirs/bmn_400x100_2x8_9e_activitynet_feature/' # Directory to save the model checkpoints and logs for the current experiments val_evaluator = dict( - type='ANetMetric', # The evaluator object used for computing metrics for validation - metric_type='AR@AN', # Metrics to be performed + type='ANetMetric', + metric_type='AR@AN', dump_config=dict( # Config of localization output - out=f'{work_dir}/results.json', # Path to output file - output_format='json')) # File format of output file + out=f'{work_dir}/results.json', # Path to the output file + output_format='json')) # File format of the output file test_evaluator = val_evaluator # Set test_evaluator as val_evaluator max_epochs = 9 # Total epochs to train the model train_cfg = dict( # Config of training loop - type='EpochBasedTrainLoop', # name of training loop - max_epochs=max_epochs, # Total training epochs - val_begin=1, # The epoch that begins validating - val_interval=1) # Validation interval - val_cfg = dict( # Config of validating loop - type='ValLoop') # name of validating loop + type='EpochBasedTrainLoop', # Name of training loop + max_epochs=max_epochs, # Total training epochs + val_begin=1, # The epoch that begins validating + val_interval=1) # Validation interval + val_cfg = dict( # Config of validation loop + type='ValLoop') # Name of validating loop test_cfg = dict( # Config of testing loop - type='TestLoop') # name of testing loop + type='TestLoop') # Name of testing loop # learning policy - param_scheduler = [dict( # Parameter scheduler for updating optimizer parameters, support dict or list - type='MultiStepLR', # Decays the parameter once the number of epoch reach milestone - begin=0, # Step at which to start updating the parameters - end=max_epochs, # Step at which to stop updating the parameters - by_epoch=True, # Whether the scheduled parameters are updated by epochs + param_scheduler = [ # Parameter scheduler for updating optimizer parameters, support dict or list + dict(type='MultiStepLR', # Decays the learning rate once the number of epoch reaches one of the milestones + begin=0, # Step at which to start updating the learning rate + end=max_epochs, # Step at which to stop updating the learning rate + by_epoch=True, # Whether the scheduled learning rate is updated by epochs milestones=[7, ], # Steps to decay the learning rate - gamma=0.1) # Multiplicative factor of parameter value decay - ] - # optimizer - optim_wrapper = dict( # Common interface for updating parameters - optimizer=dict( # Optimizer used to update model parameters - type='Adam', # Type of optimizer - lr=0.001, # learning rate - weight_decay=0.0001), # Weight decay of SGD - clip_grad=dict(max_norm=40, norm_type=2)) # Use gradient clip + gamma=0.1)] # Multiplicative factor of parameter value decay + # optimizer + optim_wrapper = dict( # Config of optimizer wrapper + type='OptimWrapper', # Name of optimizer wrapper, switch to AmpOptimWrapper to enable mixed precision training + optimizer=dict( # Config of optimizer. Support all kinds of optimizers in PyTorch. Refer to https://pytorch.org/docs/stable/optim.html#algorithms + type='Adam', # Name of optimizer + lr=0.001, # Learning rate + weight_decay=0.0001), # Weight decay + clip_grad=dict(max_norm=40, norm_type=2)) # Config of gradient clip # runtime settings - default_scope = 'mmaction' # Scope of current task used to reset the current registry - default_hooks = dict( # Hooks to execute default actions like updating model parameters and saving checkpoints. + default_scope = 'mmaction' # The default registry scope to find modules. Refer to https://mmengine.readthedocs.io/en/latest/tutorials/registry.html + default_hooks = dict( # Hooks to execute default actions like updating model parameters and saving checkpoints. runtime_info=dict(type='RuntimeInfoHook'), # The hook to updates runtime information into message hub timer=dict(type='IterTimerHook'), # The logger used to record time spent during iteration logger=dict( - type='LoggerHook', # The logger used to record the training/validation/testing phase - interval=20, # Interval to print the log - ignore_last=False), # Ignore the log of last iterations in each epoch + type='LoggerHook', # The logger used to record logs during training/validation/testing phase + interval=20, # Interval to print the log + ignore_last=False), # Ignore the log of last iterations in each epoch param_scheduler=dict(type='ParamSchedulerHook'), # The hook to update some hyper-parameters in optimizer checkpoint=dict( - type='CheckpointHook', # The hook to save checkpoints periodically - interval=3, # The saving period - save_best='auto', # Specified metric to mearsure the best checkpoint during evaluation - max_keep_ckpts=3), # The maximum checkpoints to keep - sampler_seed=dict(type='DistSamplerSeedHook')) # Data-loading sampler for distributed training - env_cfg = dict( # Dict for setting environment - cudnn_benchmark=False, + type='CheckpointHook', # The hook to save checkpoints periodically + interval=3, # The saving period + save_best='auto', # Specified metric to mearsure the best checkpoint during evaluation + max_keep_ckpts=3), # The maximum checkpoints to keep + sampler_seed=dict(type='DistSamplerSeedHook'), # Data-loading sampler for distributed training + sync_buffers=dict(type='SyncBuffersHook')) # Synchronize model buffers at the end of each epoch + env_cfg = dict( # Dict for setting environment + cudnn_benchmark=False, # Whether to enable cudnn benchmark mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), # Parameters to setup multiprocessing - dist_cfg=dict(backend='nccl')) # Parameters to setup distributed training, the port can also be set + dist_cfg=dict(backend='nccl')) # Parameters to setup distributed environment, the port can also be set log_processor = dict( - type='LogProcessor', # Log processor used to format log information - window_size=20, # Default smooth interval - by_epoch=True) # Whether to format logs with epoch stype - vis_backends = [ # Visual backend config list - dict(type='LocalVisBackend')] # Local visualization backend - visualizer = dict( - type='ActionVisualizer', # Universal Visualizer for classification task - vis_backends=[dict(type='LocalVisBackend')]) # Local visualization backend + type='LogProcessor', # Log processor used to format log information + window_size=20, # Default smooth interval + by_epoch=True) # Whether to format logs with epoch type + vis_backends = [ # List of visualization backends + dict(type='LocalVisBackend')] # Local visualization backend + visualizer = dict( # Config of visualizer + type='ActionVisualizer', # Name of visualizer + vis_backends=vis_backends) log_level = 'INFO' # The level of logging - resume = False # Resume from a checkpoint - load_from = None # load checkpoint as a pre-trained model from a given path. If resume == True, resume training from the checkpoint, otherwise load checkpoint without resuming - work_dir = './work_dirs/bmn_400x100_2x8_9e_activitynet_feature/' # Directory to save the model checkpoints and logs for the current experiments + load_from = None # Load model checkpoint as a pre-trained model from a given path. This will not resume training. + resume = False # Whether to resume from the checkpoint defined in `load_from`. If `load_from` is None, it will resume the latest checkpoint in the `work_dir`. ``` From 5a7ab0c9bdfa2ce3a40aac8a0124b3f084402510 Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Fri, 18 Nov 2022 12:27:39 +0800 Subject: [PATCH 18/57] [Fix] Fix multi-view inference (#2045) * fix multiple crop inference * move convert_to_datasample to BaseHead * add ut for average_clips is None * modify tsn config --- configs/_base_/models/tsn_r50.py | 2 +- mmaction/datasets/transforms/formatting.py | 28 +++++++++------- mmaction/models/heads/base.py | 32 +++++++------------ mmaction/models/recognizers/base.py | 25 ++------------- tests/models/recognizers/test_recognizer3d.py | 13 +++++++- 5 files changed, 43 insertions(+), 57 deletions(-) diff --git a/configs/_base_/models/tsn_r50.py b/configs/_base_/models/tsn_r50.py index 171549b636..be1c185607 100644 --- a/configs/_base_/models/tsn_r50.py +++ b/configs/_base_/models/tsn_r50.py @@ -13,7 +13,7 @@ consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01, - average_clips=None), + average_clips='prob'), data_preprocessor=dict( type='ActionDataPreprocessor', mean=[123.675, 116.28, 103.53], diff --git a/mmaction/datasets/transforms/formatting.py b/mmaction/datasets/transforms/formatting.py index 84d0e899eb..d29e4d5278 100644 --- a/mmaction/datasets/transforms/formatting.py +++ b/mmaction/datasets/transforms/formatting.py @@ -169,12 +169,18 @@ def __repr__(self): class FormatShape(BaseTransform): """Format final imgs shape to the given input_format. - Required keys are ``imgs``, ``num_clips`` and ``clip_len``, - added or modified keys are ``imgs`` and ``input_shape``. + Required keys: + - imgs + - num_clips + - clip_len + + Modified Keys: + - img + - input_shape Args: input_format (str): Define the final imgs format. - collapse (bool): To collpase input_format N... to ... (NCTHW to CTHW, + collapse (bool): To collapse input_format N... to ... (NCTHW to CTHW, etc.) if N is 1. Should be set as True when training and testing detectors. Defaults to False. """ @@ -197,7 +203,7 @@ def transform(self, results: dict) -> dict: results['imgs'] = np.array(results['imgs']) imgs = results['imgs'] # [M x H x W x C] - # M = 1 * N_crops * N_clips * L + # M = 1 * N_crops * N_clips * T if self.collapse: assert results['num_clips'] == 1 @@ -206,11 +212,11 @@ def transform(self, results: dict) -> dict: clip_len = results['clip_len'] imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:]) - # N_crops x N_clips x L x H x W x C + # N_crops x N_clips x T x H x W x C imgs = np.transpose(imgs, (0, 1, 5, 2, 3, 4)) - # N_crops x N_clips x C x L x H x W + # N_crops x N_clips x C x T x H x W imgs = imgs.reshape((-1, ) + imgs.shape[2:]) - # M' x C x L x H x W + # M' x C x T x H x W # M' = N_crops x N_clips elif self.input_format == 'NCHW': imgs = np.transpose(imgs, (0, 3, 1, 2)) @@ -230,14 +236,14 @@ def transform(self, results: dict) -> dict: num_clips = results['num_clips'] clip_len = results['clip_len'] imgs = imgs.reshape((-1, num_clips, clip_len) + imgs.shape[1:]) - # N_crops x N_clips x L x H x W x C + # N_crops x N_clips x T x H x W x C imgs = np.transpose(imgs, (0, 1, 2, 5, 3, 4)) - # N_crops x N_clips x L x C x H x W + # N_crops x N_clips x T x C x H x W imgs = imgs.reshape((-1, imgs.shape[2] * imgs.shape[3]) + imgs.shape[4:]) # M' x C' x H x W # M' = N_crops x N_clips - # C' = L x C + # C' = T x C elif self.input_format == 'NPTCHW': num_proposals = results['num_proposals'] num_clips = results['num_clips'] @@ -245,7 +251,7 @@ def transform(self, results: dict) -> dict: imgs = imgs.reshape((num_proposals, num_clips * clip_len) + imgs.shape[1:]) # P x M x H x W x C - # M = N_clips x L + # M = N_clips x T imgs = np.transpose(imgs, (0, 1, 4, 2, 3)) # P x M x C x H x W diff --git a/mmaction/models/heads/base.py b/mmaction/models/heads/base.py index 25b1f16895..9c505801d7 100644 --- a/mmaction/models/heads/base.py +++ b/mmaction/models/heads/base.py @@ -166,12 +166,8 @@ def predict(self, feats: Union[Tensor, Tuple[Tensor]], data samples. Returns: - List[:obj:`LabelData`]: Recognition results wrapped - by :obj:`LabelData`. Each item usually contains - following keys. - - - item (Tensor): Classification scores, has a shape - (num_classes, ) + List[:obj:`ActionDataSample`]: Recognition results wrapped + by :obj:`ActionDataSample`. """ cls_scores = self(feats, **kwargs) return self.predict_by_feat(cls_scores, data_samples) @@ -189,21 +185,16 @@ def predict_by_feat(self, cls_scores: Tensor, information such as `gt_labels`. Returns: - List[:obj:`LabelData`]: Recognition results wrapped - by :obj:`LabelData`. Each item usually contains following - keys. - - - item (Tensor): Classification scores, has a shape - (num_classes, ) + List[:obj:`ActionDataSample`]: Recognition results wrapped + by :obj:`ActionDataSample`. """ num_segs = cls_scores.shape[0] // len(data_samples) cls_scores = self.average_clip(cls_scores, num_segs=num_segs) - predictions: LabelList = [] - for score in cls_scores: - label = LabelData(item=score) - predictions.append(label) - return predictions + for data_sample, score in zip(data_samples, cls_scores): + prediction = LabelData(item=score) + data_sample.pred_scores = prediction + return data_samples def average_clip(self, cls_scores: Tensor, num_segs: int = 1) -> Tensor: """Averaging class scores over multiple clips. @@ -225,13 +216,12 @@ class score. Only called in test mode. f'Currently supported ones are ' f'["score", "prob", None]') - if self.average_clips is None: - return cls_scores - batch_size = cls_scores.shape[0] cls_scores = cls_scores.view(batch_size // num_segs, num_segs, -1) - if self.average_clips == 'prob': + if self.average_clips is None: + return cls_scores + elif self.average_clips == 'prob': cls_scores = F.softmax(cls_scores, dim=2).mean(dim=1) elif self.average_clips == 'score': cls_scores = cls_scores.mean(dim=1) diff --git a/mmaction/models/recognizers/base.py b/mmaction/models/recognizers/base.py index 504741e381..e96740bcf9 100644 --- a/mmaction/models/recognizers/base.py +++ b/mmaction/models/recognizers/base.py @@ -7,9 +7,8 @@ from mmengine.model import BaseModel, merge_dict from mmaction.registry import MODELS -from mmaction.utils import (ConfigType, ForwardResults, InstanceList, - OptConfigType, OptMultiConfig, OptSampleList, - SampleList) +from mmaction.utils import (ConfigType, ForwardResults, OptConfigType, + OptMultiConfig, OptSampleList, SampleList) class BaseRecognizer(BaseModel, metaclass=ABCMeta): @@ -172,8 +171,6 @@ def predict(self, inputs: torch.Tensor, data_samples: SampleList, feats, predict_kwargs = self.extract_feat(inputs, test_mode=True) predictions = self.cls_head.predict(feats, data_samples, **predict_kwargs) - # convert to ActionDataSample. - predictions = self.convert_to_datasample(data_samples, predictions) return predictions def _forward(self, @@ -236,21 +233,3 @@ def forward(self, else: raise RuntimeError(f'Invalid mode "{mode}". ' 'Only supports loss, predict and tensor mode') - - def convert_to_datasample(self, inputs: SampleList, - data_samples: InstanceList) -> SampleList: - """Convert predictions to ``ActionDataSample``. - - Args: - inputs (List[``ActionDataSample``]): The input data. - data_samples (List[``LabelData``]): Recognition results wrapped - by ``LabelData``. - - Returns: - List[``ActionDataSample``]: Recognition results wrapped by - ``ActionDataSample``. - """ - - for data_sample, pred_instances in zip(inputs, data_samples): - data_sample.pred_scores = pred_instances - return inputs diff --git a/tests/models/recognizers/test_recognizer3d.py b/tests/models/recognizers/test_recognizer3d.py index 65f1b21e02..07a4969864 100644 --- a/tests/models/recognizers/test_recognizer3d.py +++ b/tests/models/recognizers/test_recognizer3d.py @@ -29,10 +29,21 @@ def train_test_step(cfg, input_shape): predictions = recognizer.test_step(data_batch) score = predictions[0].pred_scores.item assert len(predictions) == 1 - assert score.shape, torch.Size([num_classes]) + assert score.shape == torch.Size([num_classes]) assert torch.min(score) >= 0 assert torch.max(score) <= 1 + # test when average_clips is None + recognizer.cls_head.average_clips = None + num_views = 3 + input_shape = (num_views, *input_shape[1:]) + data_batch['inputs'] = [torch.randint(0, 256, input_shape)] + with torch.no_grad(): + predictions = recognizer.test_step(data_batch) + score = predictions[0].pred_scores.item + assert len(predictions) == 1 + assert score.shape == torch.Size([num_views, num_classes]) + return loss_vars, predictions From 739cf2014ab4838b630728edb791f301a0d58a39 Mon Sep 17 00:00:00 2001 From: Austin Welch Date: Thu, 24 Nov 2022 23:01:38 -0500 Subject: [PATCH 19/57] [Fix] switch mmaction2totorchserve from mmcv to mmengine (#2053) * switch to mmengine * fix precommit --- tools/deployment/mmaction2torchserve.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/tools/deployment/mmaction2torchserve.py b/tools/deployment/mmaction2torchserve.py index d491ac7b36..bb170c8c88 100644 --- a/tools/deployment/mmaction2torchserve.py +++ b/tools/deployment/mmaction2torchserve.py @@ -4,7 +4,8 @@ from pathlib import Path from tempfile import TemporaryDirectory -import mmcv +from mmengine.config import Config +from mmengine.utils import mkdir_or_exist try: from model_archiver.model_packaging import package_model @@ -38,9 +39,9 @@ def mmaction2torchserve( force (bool): If True, if there is an existing `{model_name}.mar` file under `output_folder` it will be overwritten. """ - mmcv.mkdir_or_exist(output_folder) + mkdir_or_exist(output_folder) - config = mmcv.Config.fromfile(config_file) + config = Config.fromfile(config_file) with TemporaryDirectory() as tmpdir: config.dump(f'{tmpdir}/config.py') From 59c40379582bac366219e6e191a1967b65128fea Mon Sep 17 00:00:00 2001 From: Yining Li Date: Fri, 25 Nov 2022 12:02:53 +0800 Subject: [PATCH 20/57] [Doc] refine some documents (#2058) --- .github/CONTRIBUTING.md | 69 ------------ docker/Dockerfile | 9 +- docs/en/get_started.md | 54 +++++++--- docs/en/migration.md | 156 ++++++++++++++-------------- docs/en/notes/contribution_guide.md | 24 ++--- docs/en/notes/faq.md | 24 ++--- docs/en/switch_language.md | 4 +- 7 files changed, 145 insertions(+), 195 deletions(-) delete mode 100644 .github/CONTRIBUTING.md diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md deleted file mode 100644 index 2bb82d9fa9..0000000000 --- a/.github/CONTRIBUTING.md +++ /dev/null @@ -1,69 +0,0 @@ -# Contributing to MMAction2 - -All kinds of contributions are welcome, including but not limited to the following. - -- Fixes (typo, bugs) -- New features and components - -## Workflow - -1. Fork and pull the latest mmaction2 -2. Checkout a new branch with a meaningful name (do not use master branch for PRs) -3. Commit your changes -4. Create a PR - -```{note} -- If you plan to add some new features that involve large changes, it is encouraged to open an issue for discussion first. -- If you are the author of some papers and would like to include your method to mmaction2, please contact us. We will much appreciate your contribution. -``` - -## Code style - -### Python - -We adopt [PEP8](https://www.python.org/dev/peps/pep-0008/) as the preferred code style. - -We use the following tools for linting and formatting: - -- [flake8](http://flake8.pycqa.org/en/latest/): linter -- [yapf](https://github.com/google/yapf): formatter -- [isort](https://github.com/timothycrosley/isort): sort imports - -Style configurations of yapf and isort can be found in [setup.cfg](../setup.cfg). - -We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`, -fixes `end-of-files`, sorts `requirments.txt` automatically on every commit. -The config for a pre-commit hook is stored in [.pre-commit-config](../.pre-commit-config.yaml). - -After you clone the repository, you will need to install initialize pre-commit hook. - -``` -pip install -U pre-commit -``` - -From the repository folder - -``` -pre-commit install -``` - -If you are facing an issue when installing markdown lint, you may install ruby for markdown lint by -referring to [this repo](https://github.com/innerlee/setup) by following the usage and taking [`zzruby.sh`](https://github.com/innerlee/setup/blob/master/zzruby.sh) - -or by the following steps - -```shell -# install rvm -curl -L https://get.rvm.io | bash -s -- --autolibs=read-fail -rvm autolibs disable -# install ruby -rvm install 2.7.1 -``` - -After this on every commit check code linters and formatter will be enforced. - -> Before you create a PR, make sure that your code lints and is formatted by yapf. - -### C++ and CUDA - -We follow the [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html). diff --git a/docker/Dockerfile b/docker/Dockerfile index 506366f70a..45c82cfcb7 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -8,12 +8,16 @@ ENV TORCH_CUDA_ARCH_LIST="6.0 6.1 7.0+PTX" ENV TORCH_NVCC_FLAGS="-Xfatbin -compress-all" ENV CMAKE_PREFIX_PATH="$(dirname $(which conda))/../" +# fetch the key refer to https://forums.developer.nvidia.com/t/18-04-cuda-docker-image-is-broken/212892/9 +RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub 32 +RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub RUN apt-get update && apt-get install -y git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 ffmpeg \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* -# Install mmcv-full -RUN pip install mmcv-full==latest -f https://download.openmmlab.com/mmcv/dist/cu101/torch1.6.0/index.html +# Install MMCV +RUN pip install openmim +RUN mim install mmengine "mmcv>=2.0rc1" # Install MMAction2 RUN conda clean --all @@ -21,5 +25,6 @@ RUN git clone https://github.com/open-mmlab/mmaction2.git /mmaction2 WORKDIR /mmaction2 RUN mkdir -p /mmaction2/data ENV FORCE_CUDA="1" +RUN git checkout 1.x RUN pip install cython --no-cache-dir RUN pip install --no-cache-dir -e . diff --git a/docs/en/get_started.md b/docs/en/get_started.md index 448221960b..59a8b231b7 100644 --- a/docs/en/get_started.md +++ b/docs/en/get_started.md @@ -87,42 +87,49 @@ pip install "mmaction2>=1.0rc0" To verify whether MMAction2 is installed correctly, we provide some sample codes to run an inference demo. -**Step 1.** We need to download config and checkpoint files. +**Step 1.** Download the config and checkpoint files. ```shell -mim download mmaction2 --config tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb --dest . +mim download mmaction2 --config tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb --dest . ``` **Step 2.** Verify the inference demo. -Option (a). If you install mmaction2 from source, just run the following command: +Option (a). If you install mmaction2 from source, you can run the following command: ```shell # The demo.mp4 and label_map_k400.txt are both from Kinetics-400 -python demo/demo.py tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb.py \ - tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb_20220818-2692d16c.pth \ +python demo/demo.py tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.py \ + tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb_20220906-2692d16c.pth \ demo/demo.mp4 tools/data/kinetics/label_map_k400.txt ``` You will see the top-5 labels with corresponding scores in your terminal. -Option (b). If you install mmaction2 as a python package, open you python interpreter and copy&paste the following codes. +Option (b). If you install mmaction2 as a python package, you can run the following codes in your python interpreter, which will do the similar verification: ```python +from operator import itemgetter from mmaction.apis import init_recognizer, inference_recognizer from mmaction.utils import register_all_modules -config_file = 'tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb.py' -checkpoint_file = 'tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb_20220818-2692d16c.pth' +config_file = 'tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.py' +checkpoint_file = 'tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb_20220906-2692d16c.pth' video_file = 'demo/demo.mp4' label_file = 'tools/data/kinetics/label_map_k400.txt' register_all_modules() # register all modules and set mmaction2 as the default scope. model = init_recognizer(config_file, checkpoint_file, device='cpu') # or device='cuda:0' -results = inference_recognizer(model, video_file) +result = inference_recognizer(model, video_file) + +pred_scores = result.pred_scores.item.tolist() +score_tuples = tuple(zip(range(len(pred_scores)), pred_scores)) +score_sorted = sorted(score_tuples, key=itemgetter(1), reverse=True) +top5_label = score_sorted[:5] labels = open(label_file).readlines() labels = [x.strip() for x in labels] -results = [(labels[k[0]], k[1]) for k in results] +results = [(labels[k[0]], k[1]) for k in top5_label] + print('The top-5 labels with corresponding scores are:') for result in results: print(f'{result[0]}: ', result[1]) @@ -132,7 +139,7 @@ for result in results: ### CUDA versions -When installing PyTorch, you need to specify the version of CUDA. If you are +When installing PyTorch, you may need to specify the version of CUDA. If you are not clear on which to choose, follow our recommendations: - For Ampere-based NVIDIA GPUs, such as GeForce 30 series and NVIDIA A100, CUDA 11 is a must. @@ -151,12 +158,12 @@ version of cudatoolkit in `conda install` command. ### Install MMCV without MIM -MMCV contains C++ and CUDA extensions, thus depending on PyTorch in a complex +MMCV contains C++ and CUDA extensions, so it depends on PyTorch in a complex way. MIM solves such dependencies automatically and makes the installation easier. However, it is not a must. To install MMCV with pip instead of MIM, please follow -[MMCV installation guides](https://mmcv.readthedocs.io/en/dev-2.x/get_started/installation.html). +[MMCV installation guides](https://mmcv.readthedocs.io/en/2.x/get_started/installation.html). This requires manually specifying a find-url based on PyTorch version and its CUDA version. For example, the following command install mmcv built for PyTorch 1.10.x and CUDA 11.3. @@ -167,7 +174,24 @@ pip install 'mmcv>=2.0.0rc1' -f https://download.openmmlab.com/mmcv/dist/cu113/t ### Install on CPU-only platforms -MMAction2 can be built for CPU only environment. In CPU mode you can train, test or inference a model. +MMAction2 can be built for CPU-only environment. In CPU mode you can train, test or inference a model. Some functionalities are gone in this mode, usually GPU-compiled ops. But don't -worry, almost all models in MMAction2 don't depends on these ops. +worry, almost all models in MMAction2 don't depend on these ops. + +### Using MMAction2 with Docker + +We provide a [Dockerfile](https://github.com/open-mmlab/mmaction2/blob/1.x/docker/Dockerfile) +to build an image. Ensure that your [docker version](https://docs.docker.com/engine/install/) >=19.03. + +```shell +# build an image with PyTorch 1.6.0, CUDA 10.1, CUDNN 7. +# If you prefer other versions, just modified the Dockerfile +docker build -f ./docker/Dockerfile --rm -t mmaction2 . +``` + +Run it with + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmaction2/data mmaction2 +``` diff --git a/docs/en/migration.md b/docs/en/migration.md index dfe4fd60a4..2917455f80 100644 --- a/docs/en/migration.md +++ b/docs/en/migration.md @@ -1,33 +1,27 @@ # Migration from MMAction2 0.x -We introduce some modifications in MMAction2 1.x, and some of them are BC-breaking. To migrate your projects from MMAction2 0.x smoothly, please read this tutorial. +MMAction2 1.x introduced major refactorings and modifications including some BC-breaking changes. We provide this tutorial to help you migrate your projects from MMAction2 0.x smoothly. ## New dependencies -MMAction2 1.x depends on some new packages, you can prepare a new clean environment and install again -according to the [install tutorial](./get_started.md). Or install the below packages manually. +MMAction2 1.x depends on the following packages. You are recommended to prepare a new clean environment and install them according to [install tutorial](./get_started.md) -1. [MMEngine](https://github.com/open-mmlab/mmengine): MMEngine is the core the OpenMMLab 2.0 architecture, - and we splited many compentents unrelated to computer vision from MMCV to MMEngine. -2. [MMCV](https://github.com/open-mmlab/mmcv): The computer vision package of OpenMMLab. This is not a new - dependency, but you need to upgrade it to above `2.0.0rc0` version. +1. [MMEngine](https://github.com/open-mmlab/mmengine): MMEngine is a foundational library for training deep learning model introduced in OpenMMLab 2.0 architecture. +2. [MMCV](https://github.com/open-mmlab/mmcv): MMCV is a foundational library for computer vision. MMAction2 1.x requires `mmcv>=2.0.0rc0` which is more compact and efficient than `mmcv-full==1.x`. ## Configuration files -In MMAction2 1.x, we refactored the structure of configuration files, and the original files are not usable. +In MMAction2 1.x, we refactored the structure of configuration files. The configuration files with the old style will be incompatible. - - -In this section, we will introduce all changes of the configuration files. And we assume you already have -ideas of the [config files](./user_guides/config.md). +In this section, we will introduce all changes of the configuration files. And we assume you are already familiar with the [config files](./user_guides/config.md). ### Model settings -No changes in `model.backbone`, `model.neck` and `model.head` fields. +No changes in `model.backbone` and `model.neck`. For `model.cls_head`, we move the `average_clips` inside it, which is originally set in `model.test_cfg`. ### Data settings -Changes in **`data`**: +#### Changes in **`data`** - The original `data` field is splited to `train_dataloader`, `val_dataloader` and `test_dataloader`. This allows us to configure them in fine-grained. For example, @@ -77,9 +71,9 @@ test_dataloader = val_dataloader -Changes in **`pipeline`**: +#### Changes in **`pipeline`** -- The original formatting transforms **`ToTensor`**, **`Collect`** are combined as `PackActionInputs` for action recognition task; and **`ToTensor`**, **`Collect`**, **`ToDataContainer`** are combined as `PackLocalizationInputs` +- The original formatting transforms **`ToTensor`**, **`Collect`** are combined as `PackActionInputs`. - We don't recommend to do **`Normalize`** in the dataset pipeline. Please remove it from pipelines and set it in the `model.data_preprocessor` field. @@ -140,14 +134,13 @@ train_pipeline = [
-Changes in **`evaluation`**: +#### Changes in **`evaluation`** -- The **`evaluation`** field is splited to `val_evaluator` and `test_evaluator`. And it won't supports `interval` and `save_best` arguments. - The `interval` is moved to `train_cfg.val_interval`, see [the schedule settings](./user_guides/1_config.md#schedule-settings) and the `save_best` - is moved to `default_hooks.checkpoint.save_best`, see [the runtime settings](./user_guides/1_config.md#runtime-settings). -- The 'mean_average_precision', 'mean_class_accuracy', 'mmit_mean_average_precision', 'top_k_accuracy' are combined as `AccMetric`, and use `metric_list`to specify to calculate which metric -- The `AVAMetric` is used to evaluate AVA Dataset -- The `BSNMetric` is used to evaluate BSN model +- The **`evaluation`** field is splited to `val_evaluator` and `test_evaluator`. And it won't support `interval` and `save_best` arguments. +- The `interval` is moved to `train_cfg.val_interval` and the `save_best` is moved to `default_hooks.checkpoint.save_best`. +- The 'mean_average_precision', 'mean_class_accuracy', 'mmit_mean_average_precision', 'top_k_accuracy' are combined as `AccMetric`, and you could use `metric_list` to specify which metric to calculate. +- The `AVAMetric` is used to evaluate AVA Dataset. +- The `ANetMetric` is used to evaluate ActivityNet Dataset. @@ -166,7 +159,9 @@ evaluation = dict(
```python -val_evaluator = dict(type='AccMetric') +val_evaluator = dict( + type='AccMetric', + metric_list=('top_k_accuracy', 'mean_class_accuracy')) test_evaluator = val_evaluator ``` @@ -176,11 +171,11 @@ test_evaluator = val_evaluator ### Schedule settings -Changes in **`optimizer`** and **`optimizer_config`**: +#### Changes in **`optimizer`** and **`optimizer_config`** -- Now we use `optim_wrapper` field to specify all configuration about the optimization process. And the - `optimizer` is a sub field of `optim_wrapper` now. -- `paramwise_cfg` is also a sub field of `optim_wrapper`, instead of `optimizer`. +- Now we use `optim_wrapper` field to configure the optimization process. And the + `optimizer` becomes a sub field of `optim_wrapper`. +- `paramwise_cfg` is also a sub field of `optim_wrapper` parallel to `optimizer`. - `optimizer_config` is removed now, and all configurations of it are moved to `optim_wrapper`. - `grad_clip` is renamed to `clip_grad`. @@ -222,14 +217,14 @@ optim_wrapper = dict(
-Changes in **`lr_config`**: +#### Changes in **`lr_config`** - The `lr_config` field is removed and we use new `param_scheduler` to replace it. - The `warmup` related arguments are removed, since we use schedulers combination to implement this functionality. The new schedulers combination mechanism is very flexible, and you can use it to design many kinds of learning -rate / momentum curves. See [the tutorial](TODO) for more details. +rate / momentum curves. @@ -270,7 +265,7 @@ param_scheduler = [
-Changes in **`runner`**: +#### Changes in **`runner`** Most configuration in the original `runner` field is moved to `train_cfg`, `val_cfg` and `test_cfg`, which configure the loop in training, validation and test. @@ -291,9 +286,9 @@ runner = dict(type='EpochBasedRunner', max_epochs=100) ```python # The `val_interval` is the original `evaluation.interval`. -train_cfg = dict(by_epoch=True, max_epochs=100, val_interval=1) -val_cfg = dict() # Use the default validation loop. -test_cfg = dict() # Use the default test loop. +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=100, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') # Use the default validation loop. +test_cfg = dict(type='TestLoop') # Use the default test loop. ``` @@ -301,17 +296,20 @@ test_cfg = dict() # Use the default test loop. In fact, in OpenMMLab 2.0, we introduced `Loop` to control the behaviors in training, validation and test. And -the functionalities of `Runner` are also changed. You can find more details in [the MMEngine tutorials](TODO). +the functionalities of `Runner` are also changed. You can find more details in the [MMEngine tutorials](https://mmengine.readthedocs.io/en/latest/tutorials/runner.html). ### Runtime settings -Changes in **`checkpoint_config`** and **`log_config`**: +#### Changes in **`checkpoint_config`** and **`log_config`** The `checkpoint_config` are moved to `default_hooks.checkpoint` and the `log_config` are moved to `default_hooks.logger`. And we move many hooks settings from the script code to the `default_hooks` field in the runtime configuration. ```python default_hooks = dict( + # update runtime information, e.g. current iter and lr. + runtime_info=dict(type='RuntimeInfoHook'), + # record the time of every iterations. timer=dict(type='IterTimerHook'), @@ -324,11 +322,11 @@ default_hooks = dict( # save checkpoint per epoch, and automatically save the best checkpoint. checkpoint=dict(type='CheckpointHook', interval=1, save_best='auto'), - # set sampler seed in distributed evrionment. + # set sampler seed in distributed environment. sampler_seed=dict(type='DistSamplerSeedHook'), - # validation results visualization, set True to enable it. - visualization=dict(type='VisualizationHook', enable=False), + # synchronize model buffers at the end of each epoch. + sync_buffers=dict(type='SyncBuffersHook') ) ``` @@ -371,7 +369,7 @@ visualizer = dict( -Changes in **`load_from`** and **`resume_from`**: +#### Changes in **`load_from`** and **`resume_from`** - The `resume_from` is removed. And we use `resume` and `load_from` to replace it. - If `resume=True` and `load_from` is not None, resume training from the checkpoint in `load_from`. @@ -379,8 +377,9 @@ Changes in **`load_from`** and **`resume_from`**: - If `resume=False` and `load_from` is not None, only load the checkpoint, not resume training. - If `resume=False` and `load_from` is None, do not load nor resume. -Changes in **`dist_params`**: The `dist_params` field is a sub field of `env_cfg` now. And there are some new -configurations in the `env_cfg`. +#### Changes in **`dist_params`** + +The `dist_params` field is a sub field of `env_cfg` now. And there are some new configurations in the `env_cfg`. ```python env_cfg = dict( @@ -395,10 +394,13 @@ env_cfg = dict( ) ``` -Changes in **`workflow`**: `workflow` related functionalities are removed. +#### Changes in **`workflow`** + +`Workflow` related functionalities are removed. -New field **`visualizer`**: The visualizer is a new design in OpenMMLab 2.0 architecture. We use a -visualizer instance in the runner to handle results & log visualization and save to different backends. +#### New field **`visualizer`** + +The visualizer is a new design in OpenMMLab 2.0 architecture. We use a visualizer instance in the runner to handle results & log visualization and save to different backends. ```python visualizer = dict( @@ -411,7 +413,9 @@ visualizer = dict( ) ``` -New field **`default_scope`**: The start point to search module for all registries. The `default_scope` in MMAction2 is `mmaction`. See [the registry tutorial](TODO) for more details. +#### New field **`default_scope`** + +The start point to search module for all registries. The `default_scope` in MMAction2 is `mmaction`. See [the registry tutorial](https://mmengine.readthedocs.io/en/latest/tutorials/registry.html) for more details. ## Packages @@ -426,7 +430,6 @@ The documentation can be found [here](mmaction.apis). | `train_model` | Removed, use `runner.train` to train. | | `multi_gpu_test` | Removed, use `runner.test` to test. | | `single_gpu_test` | Removed, use `runner.test` to test. | -| `show_result_pyplot` | Waiting for support. | | `set_random_seed` | Removed, use `mmengine.runner.set_random_seed`. | | `init_random_seed` | Removed, use `mmengine.dist.sync_random_seed`. | @@ -437,16 +440,17 @@ The `mmaction.core` package is renamed to [`mmaction.engine`](mmaction.engine). | Sub package | Changes | | :----------: | :-------------------------------------------------------------------------------------------------: | | `evaluation` | Removed, use the metrics in `mmaction.evaluation`. | -| `hook` | Moved to `mmaction.engine.hooks` | -| `optimizers` | Moved to `mmaction.engine.optimizers` | +| `hooks` | Moved to `mmaction.engine.hooks` | +| `optimizer` | Moved to `mmaction.engine.optimizers` | | `utils` | Removed, the distributed environment related functions can be found in the `mmengine.dist` package. | ### `mmaction.datasets` The documentation can be found [here](mmaction.datasets) -Changes in [`BaseDataset`](mmaction.datasets.Base): -| Method of Dataset | Changes | +#### Changes in [`BaseActionDataset`](mmaction.datasets.BaseActionDataset): + +| Method | Changes | | :--------------------: | :-------------------------------------------: | | `prepare_train_frames` | Replaced by `get_data_info` | | `preprare_test_frames` | Replaced by `get_data_info` | @@ -454,44 +458,40 @@ Changes in [`BaseDataset`](mmaction.datasets.Base): | `dump_results` | Removed, use `mmengine.evaluator.DumpResults` | | `load_annotations` | Replaced by `load_data_list` | -Now, you can write a new Dataset class inherited from \[BaseDataset\] and overwrite `load_data_list` only. To load more data information, you could overwrite `get_data_info` like `RawframeDataset` and `AVADataset`. -The `mmaction.datasets.pipelines` is renamed to `mmaction.datasets.transforms` and the `mmaction.datasets.pipelines.augmentations` is renamed to `mmaction.datasets.pipelines.augmentations.processing` +Now, you can write a new Dataset class inherited from `BaseActionDataset` and overwrite `load_data_list` only. To load more data information, you could overwrite `get_data_info` like `RawframeDataset` and `AVADataset`. +The `mmaction.datasets.pipelines` is renamed to `mmaction.datasets.transforms` and the `mmaction.datasets.pipelines.augmentations` is renamed to `mmaction.datasets.pipelines.processing`. ### `mmaction.models` The documentation can be found [here](mmaction.models). The interface of all **backbones**, **necks** and **losses** didn't change. -Changes in [`BaseRecognizer`](mmaction.models.BaseRecognizer): +#### Changes in [`BaseRecognizer`](mmaction.models.BaseRecognizer): -| Method of recognizer | Changes | -| :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------: | -| `extract_feat` | No changes | -| `forward` | Now only accepts three arguments: `inputs`, `data_samples` and `mode`. See [the documentation](mmaction.models.BaseRecognizer) for more details. | -| `forward_train` | Replaced by `loss`. | -| `forward_test` | Replaced by `predict`. | -| `train_step` | The `optimizer` argument is replaced by `optim_wrapper` and it accepts [`OptimWrapper`](mmengine.optim.OptimWrapper). | -| `val_step` | The original `val_step` is the same as `train_step`, now it calls `predict`. | -| `test_step` | New method, and it's the same as `val_step`. | +| Method | Changes | +| :-------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| `extract_feat` | Enhanced method, which now supports output features of three stages (`backbone`, `neck`, `head`) and can handle different modes, such as `train_mode` and `test_mode`. | +| `forward` | Now only accepts three arguments: `inputs`, `data_samples` and `mode`. See [the documentation](mmaction.models.BaseRecognizer) for more details. | +| `forward_train` | Replaced by `loss`. | +| `forward_test` | Replaced by `predict`. | +| `train_step` | The `optimizer` argument is replaced by `optim_wrapper` and it accepts [`OptimWrapper`](mmengine.optim.OptimWrapper). | +| `val_step` | The original `val_step` is the same as `train_step`, now it calls `predict`. | +| `test_step` | New method, and it's the same as `val_step`. | -Changes in [heads](mmaction.models.heads) +#### Changes in [BaseHead](mmaction.models.BaseHead): -| Function | Changes | -| :-------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | -| `forward` | No changes | -| `loss` | It accepts `data_samples` instead of `labels` to calculate loss. The `data_samples` is a list of [ActionDataSample](mmaction.structures.ActionDataSample) | -| `predict` | New method, and it returns the recognition results | +| Method | Changes | +| :-------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | +| `forward` | No changes | +| `loss` | It accepts `feats` and `data_samples` instead of `cls_score` and `labels` to calculate loss. The `data_samples` is a list of [ActionDataSample](mmaction.structures.ActionDataSample). | +| `predict` | New method. It accepts `feats` and `data_samples` to predict classification scores. | ### `mmaction.utils` -| Function | Changes | -| :--------------------------: | :-----------------------------------------------------------: | -| `collect_env` | No changes | -| `get_root_logger` | Removed, use `mmengine.MMLogger.get_current_instance` | -| `load_json_log` | Waiting for support | -| `setup_multi_processes` | Removed, use `mmengine.utils.dl_utils.setup_multi_processes`. | -| `wrap_non_distributed_model` | Removed, we auto wrap the model in the runner. | -| `wrap_distributed_model` | Removed, we auto wrap the model in the runner. | -| `auto_select_device` | Removed, we auto select the device in the runner. | +| Function | Changes | +| :---------------------: | :-----------------------------------------------------------: | +| `collect_env` | No changes | +| `get_root_logger` | Removed, use `mmengine.MMLogger.get_current_instance` | +| `setup_multi_processes` | Removed, use `mmengine.utils.dl_utils.setup_multi_processes`. | ### Other changes diff --git a/docs/en/notes/contribution_guide.md b/docs/en/notes/contribution_guide.md index 3c81e7feb1..92548868d2 100644 --- a/docs/en/notes/contribution_guide.md +++ b/docs/en/notes/contribution_guide.md @@ -4,6 +4,7 @@ All kinds of contributions are welcome, including but not limited to the followi - Fixes (typo, bugs) - New features and components +- Add documentation or translate the documentation into other languages ## Workflow @@ -28,11 +29,13 @@ We use the following tools for linting and formatting: - [flake8](http://flake8.pycqa.org/en/latest/): linter - [yapf](https://github.com/google/yapf): formatter - [isort](https://github.com/timothycrosley/isort): sort imports +- [codespell](https://github.com/codespell-project/codespell): A Python utility to fix common misspellings in text files. +- [mdformat](https://github.com/executablebooks/mdformat): Mdformat is an opinionated Markdown formatter that can be used to enforce a consistent style in Markdown files. +- [docformatter](https://github.com/myint/docformatter): A formatter to format docstring. Style configurations of yapf and isort can be found in [setup.cfg](../../../setup.cfg). -We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`, -fixes `end-of-files`, sorts `requirments.txt` automatically on every commit. +We use [pre-commit hook](https://pre-commit.com/) that checks and formats for `flake8`, `yapf`, `isort`, `trailing whitespaces`, `markdown files`, fixes `end-of-files`, sorts `requirments.txt` automatically on every commit. The config for a pre-commit hook is stored in [.pre-commit-config](../../../.pre-commit-config.yaml). After you clone the repository, you will need to install initialize pre-commit hook. @@ -43,26 +46,13 @@ pip install -U pre-commit From the repository folder -``` -pre-commit install -``` - -If you are facing an issue when installing markdown lint, you may install ruby for markdown lint by -referring to [this repo](https://github.com/innerlee/setup) by following the usage and taking [`zzruby.sh`](https://github.com/innerlee/setup/blob/master/zzruby.sh) - -or by the following steps - ```shell -# install rvm -curl -L https://get.rvm.io | bash -s -- --autolibs=read-fail -rvm autolibs disable -# install ruby -rvm install 2.7.1 +pre-commit install ``` After this on every commit check code linters and formatter will be enforced. -Before you create a PR, make sure that your code lints and is formatted by yapf. +> Before you create a PR, make sure that your code lints and is formatted by yapf. ### C++ and CUDA diff --git a/docs/en/notes/faq.md b/docs/en/notes/faq.md index 4fdcea4d2f..4f028d5b4c 100644 --- a/docs/en/notes/faq.md +++ b/docs/en/notes/faq.md @@ -12,14 +12,14 @@ We list some common issues faced by many users and their corresponding solutions - [Testing](#testing) Feel free to enrich the list if you find any frequent issues and have ways to help others to solve them. -If the contents here do not cover your issue, please create an issue using the [provided templates](/.github/ISSUE_TEMPLATE/error-report.md) and make sure you fill in all required information in the template. +If the contents here do not cover your issue, please create an issue using the [provided templates](/.github/ISSUE_TEMPLATE/error-report.md) and make sure to fill in all required information in the template. ## Installation - **"No module named 'mmcv.ops'"; "No module named 'mmcv.\_ext'"** 1. Uninstall existing mmcv in the environment using `pip uninstall mmcv` - 2. Install mmcv following the [installation instruction](https://mmcv.readthedocs.io/en/latest/#installation) + 2. Install mmcv following the [installation instruction](https://mmcv.readthedocs.io/en/2.x/get_started/installation.html#install-mmcv) - **"OSError: MoviePy Error: creation of None failed because of the following error"** @@ -30,7 +30,7 @@ If the contents here do not cover your issue, please create an issue using the [ - **"Why I got the error message 'Please install XXCODEBASE to use XXX' even if I have already installed XXCODEBASE?"** - You got that error message because our project failed to import a function or a class from XXCODEBASE. You can try to run the corresponding line to see what happens. One possible reason is, for some codebases in OpenMMLAB, you need to install mmcv-full before you install them. + You got that error message because our project failed to import a function or a class from XXCODEBASE. You can try to run the corresponding line to see what happens. One possible reason is, for some codebases in OpenMMLAB, you need to install mmcv and mmengine before you install them. You could follow this [tutorial](https://mmaction2.readthedocs.io/en/1.x/get_started.html#installation) to install them. ## Data @@ -40,19 +40,19 @@ If the contents here do not cover your issue, please create an issue using the [ If users encounter FileNotFound error for the first or last frame of the data, there is a need to check the files begin with offset 0 or 1, that is `xxx_00000.jpg` or `xxx_00001.jpg`, and then change the `start_index` value of data pipeline in configs. -- **How should we preprocess the videos in the dataset? Resizing them to a fix size(all videos with the same height-width ratio) like `340x256`(1) or resizing them so that the short edges of all videos are of the same length (256px or 320px)** +- **How should we preprocess the videos in the dataset? Resizing them to a fix size(all videos with the same height-width ratio) like `340x256` (1) or resizing them so that the short edges of all videos are of the same length (256px or 320px) (2)** - We have tried both preprocessing approaches and found (2) is a better solution in general, so we use (2) with short edge length 256px as the default preprocessing setting. We benchmarked these preprocessing approaches and you may find the results in [TSN Data Benchmark](https://github.com/open-mmlab/mmaction2/tree/dev-1.x/configs/recognition/tsn) and [SlowOnly Data Benchmark](https://github.com/open-mmlab/mmaction2/tree/dev-1.x/configs/recognition/tsn). + We have tried both preprocessing approaches and found (2) is a better solution in general, so we use (2) with short edge length 256px as the default preprocessing setting. We benchmarked these preprocessing approaches and you may find the results in [TSN Data Benchmark](https://github.com/open-mmlab/mmaction2/tree/master/configs/recognition/tsn) and [SlowOnly Data Benchmark](https://github.com/open-mmlab/mmaction2/tree/master/configs/recognition/slowonly). - **Mismatched data pipeline items lead to errors like `KeyError: 'total_frames'`** We have both pipeline for processing videos and frames. - **For videos**, We should decode them on the fly in the pipeline, so pairs like `DecordInit & DecordDecode`, `OpenCVInit & OpenCVDecode`, `PyAVInit & PyAVDecode` should be used for this case like [this example](https://github.com/open-mmlab/mmaction2/blob/dev-1.x/configs/recognition/slowfast/slowfast_r50_4x16x1_256e_8xb8_kinetics400_rgb.py#L13-L15). + **For videos**, We should decode them on the fly in the pipeline, so pairs like `DecordInit & DecordDecode`, `OpenCVInit & OpenCVDecode`, `PyAVInit & PyAVDecode` should be used for this case like [this example](https://github.com/open-mmlab/mmaction2/blob/1.x/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py#L14-L16). - **For Frames**, the image has been decoded offline, so pipeline item `RawFrameDecode` should be used for this case like [this example](https://github.com/open-mmlab/mmaction2/blob/023777cfd26bb175f85d78c455f6869673e0aa09/configs/recognition/slowonly/slowonly_r101_8x8x1_196e_kinetics400_rgb.py#L31). + **For Frames**, the image has been decoded offline, so pipeline item `RawFrameDecode` should be used for this case like [this example](https://github.com/open-mmlab/mmaction2/blob/1.x/configs/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb.py#L17). - `KeyError: 'total_frames'` is caused by incorrectly using `RawFrameDecode` step for videos, since when the input is a video, it can not get the `total_frame` beforehand. + `KeyError: 'total_frames'` is caused by incorrectly using `RawFrameDecode` step for videos, since when the input is a video, it can not get the `total_frames` beforehand. ## Training @@ -65,10 +65,10 @@ If the contents here do not cover your issue, please create an issue using the [ - **How to fix stages of backbone when finetuning a model?** - You can refer to [`def _freeze_stages()`](https://github.com/open-mmlab/mmaction2/blob/0149a0e8c1e0380955db61680c0006626fd008e9/mmaction/models/backbones/x3d.py#L458) and [`frozen_stages`](https://github.com/open-mmlab/mmaction2/blob/0149a0e8c1e0380955db61680c0006626fd008e9/mmaction/models/backbones/x3d.py#L183-L184), - reminding to set `find_unused_parameters = True` in config files for distributed training or testing. + You can refer to [`def _freeze_stages()`](https://github.com/open-mmlab/mmaction2/blob/1.x/mmaction/models/backbones/resnet3d.py#L791) and [`frozen_stages`](https://github.com/open-mmlab/mmaction2/blob/1.x/mmaction/models/backbones/resnet3d.py#L369-L370). + Reminding to set `find_unused_parameters = True` in config files for distributed training or testing. - Actually, users can set `frozen_stages` to freeze stages in backbones except C3D model, since all backbones inheriting from `ResNet` and `ResNet3D` support the inner function `_freeze_stages()`. + Actually, users can set `frozen_stages` to freeze stages in backbones except C3D model, since almost all backbones inheriting from `ResNet` and `ResNet3D` support the inner function `_freeze_stages()`. - **How to set memcached setting in config files?** @@ -95,7 +95,7 @@ If the contents here do not cover your issue, please create an issue using the [ - **How to make predicted score normalized by softmax within \[0, 1\]?** - change this in the config, make `model['cls_head'] = dict(average_clips='prob')`. + change this in the config, make `model.cls_head.average_clips = 'prob'`. - **What if the model is too large and the GPU memory can not fit even only one testing sample?** diff --git a/docs/en/switch_language.md b/docs/en/switch_language.md index 4bade2237f..0009eafa9e 100644 --- a/docs/en/switch_language.md +++ b/docs/en/switch_language.md @@ -1,3 +1,3 @@ -## English +## English -## 简体中文 +## 简体中文 From 3225be87ed17137bf2f11713a4540c764df37978 Mon Sep 17 00:00:00 2001 From: wxDai Date: Fri, 25 Nov 2022 12:03:27 +0800 Subject: [PATCH 21/57] [Fix] fix `gen_ntu_rgbd_raw.py` (#2076) --- tools/data/skeleton/gen_ntu_rgbd_raw.py | 292 +++++++----------------- 1 file changed, 77 insertions(+), 215 deletions(-) diff --git a/tools/data/skeleton/gen_ntu_rgbd_raw.py b/tools/data/skeleton/gen_ntu_rgbd_raw.py index 4cc8d4015d..57b9676704 100644 --- a/tools/data/skeleton/gen_ntu_rgbd_raw.py +++ b/tools/data/skeleton/gen_ntu_rgbd_raw.py @@ -1,10 +1,10 @@ # Copyright (c) OpenMMLab. All rights reserved. import argparse -import math import os import os.path as osp +from typing import Dict, List, Optional, Tuple -import mmcv +import mmengine import numpy as np training_subjects_60 = [ @@ -25,146 +25,14 @@ max_frame = 300 -def unit_vector(vector): - """Returns the unit vector of the vector.""" - return vector / np.linalg.norm(vector) - - -def angle_between(v1, v2): - """Returns the angle in radians between vectors 'v1' and 'v2':: - - >>> angle_between((1, 0, 0), (0, 1, 0)) - 1.5707963267948966 - >>> angle_between((1, 0, 0), (1, 0, 0)) - 0.0 - >>> angle_between((1, 0, 0), (-1, 0, 0)) - 3.141592653589793 - """ - if np.abs(v1).sum() < 1e-6 or np.abs(v2).sum() < 1e-6: - return 0 - v1_u = unit_vector(v1) - v2_u = unit_vector(v2) - return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) - - -def rotation_matrix(axis, theta): - """Return the rotation matrix associated with counterclockwise rotation - about the given axis by theta radians.""" - if np.abs(axis).sum() < 1e-6 or np.abs(theta) < 1e-6: - return np.eye(3) - axis = np.asarray(axis) - axis = axis / math.sqrt(np.dot(axis, axis)) - a = math.cos(theta / 2.0) - b, c, d = -axis * math.sin(theta / 2.0) - aa, bb, cc, dd = a * a, b * b, c * c, d * d - bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d - return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], - [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)], - [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]]) - - -def pre_normalization(data, zaxis=[0, 1], xaxis=[8, 4]): - N, C, T, V, M = data.shape - s = np.transpose(data, [0, 4, 2, 3, 1]) # N C T V M -> N M T V C - - print('pad the null frames with the previous frames') - prog_bar = mmcv.ProgressBar(len(s)) - for i_s, skeleton in enumerate(s): - if skeleton.sum() == 0: - print(i_s, ' has no skeleton') - for i_p, person in enumerate(skeleton): - if person.sum() == 0: - continue - if person[0].sum() == 0: - index = (person.sum(-1).sum(-1) != 0) - tmp = person[index].copy() - person *= 0 - person[:len(tmp)] = tmp - - for i_f, frame in enumerate(person): - if frame.sum() == 0: - if person[i_f:].sum() == 0: - rest = len(person) - i_f - num = int(np.ceil(rest / i_f)) - pad = np.concatenate( - [person[0:i_f] for _ in range(num)], 0)[:rest] - s[i_s, i_p, i_f:] = pad - break - prog_bar.update() - - print('sub the center joint #1 (spine joint in ntu and ' - 'neck joint in kinetics)') - prog_bar = mmcv.ProgressBar(len(s)) - for i_s, skeleton in enumerate(s): - if skeleton.sum() == 0: - continue - main_body_center = skeleton[0][:, 1:2, :].copy() - for i_p, person in enumerate(skeleton): - if person.sum() == 0: - continue - mask = (person.sum(-1) != 0).reshape(T, V, 1) - s[i_s, i_p] = (s[i_s, i_p] - main_body_center) * mask - prog_bar.update() - - print('parallel the bone between hip(jpt 0) and ' - 'spine(jpt 1) of the first person to the z axis') - prog_bar = mmcv.ProgressBar(len(s)) - for i_s, skeleton in enumerate(s): - if skeleton.sum() == 0: - continue - joint_bottom = skeleton[0, 0, zaxis[0]] - joint_top = skeleton[0, 0, zaxis[1]] - axis = np.cross(joint_top - joint_bottom, [0, 0, 1]) - angle = angle_between(joint_top - joint_bottom, [0, 0, 1]) - matrix_z = rotation_matrix(axis, angle) - for i_p, person in enumerate(skeleton): - if person.sum() == 0: - continue - for i_f, frame in enumerate(person): - if frame.sum() == 0: - continue - for i_j, joint in enumerate(frame): - s[i_s, i_p, i_f, i_j] = np.dot(matrix_z, joint) - prog_bar.update() - - print('parallel the bone between right shoulder(jpt 8) and ' - 'left shoulder(jpt 4) of the first person to the x axis') - prog_bar = mmcv.ProgressBar(len(s)) - for i_s, skeleton in enumerate(s): - if skeleton.sum() == 0: - continue - joint_rshoulder = skeleton[0, 0, xaxis[0]] - joint_lshoulder = skeleton[0, 0, xaxis[1]] - axis = np.cross(joint_rshoulder - joint_lshoulder, [1, 0, 0]) - angle = angle_between(joint_rshoulder - joint_lshoulder, [1, 0, 0]) - matrix_x = rotation_matrix(axis, angle) - for i_p, person in enumerate(skeleton): - if person.sum() == 0: - continue - for i_f, frame in enumerate(person): - if frame.sum() == 0: - continue - for i_j, joint in enumerate(frame): - s[i_s, i_p, i_f, i_j] = np.dot(matrix_x, joint) - prog_bar.update() - - data = np.transpose(s, [0, 4, 2, 3, 1]) - return data - - -def read_skeleton_filter(file): +def read_skeleton_filter(file: str) -> Dict: with open(file, 'r') as f: - skeleton_sequence = {} - skeleton_sequence['num_frame'] = int(f.readline()) - skeleton_sequence['frameInfo'] = [] + skeleton_sequence = {'num_frame': int(f.readline()), 'frameInfo': []} for t in range(skeleton_sequence['num_frame']): - frame_info = {} - frame_info['numBody'] = int(f.readline()) - frame_info['bodyInfo'] = [] + frame_info = {'numBody': int(f.readline()), 'bodyInfo': []} for m in range(frame_info['numBody']): - body_info = {} body_info_key = [ 'bodyID', 'clipedEdges', 'handLeftConfidence', 'handLeftState', 'handRightConfidence', 'handRightState', @@ -195,20 +63,20 @@ def read_skeleton_filter(file): return skeleton_sequence -def get_nonzero_std(s): # T V C +def get_nonzero_std(s: np.ndarray) -> float: # T V C index = s.sum(-1).sum(-1) != 0 s = s[index] if len(s) != 0: - s = s[:, :, 0].std() + s[:, :, 1].std() + s[:, :, - 2].std() # three channels + s = s[:, :, 0].std() + \ + s[:, :, 1].std() + \ + s[:, :, 2].std() # three channels else: s = 0 return s -def read_xyz(file, max_body=2, num_joint=25): +def read_xyz(file: str, max_body: int = 4, num_joint: int = 25) -> np.ndarray: seq_info = read_skeleton_filter(file) - # num_frame = seq_info['num_frame'] data = np.zeros((max_body, seq_info['num_frame'], num_joint, 3)) for n, f in enumerate(seq_info['frameInfo']): for m, b in enumerate(f['bodyInfo']): @@ -222,32 +90,23 @@ def read_xyz(file, max_body=2, num_joint=25): energy = np.array([get_nonzero_std(x) for x in data]) index = energy.argsort()[::-1][0:max_body_true] data = data[index] - data = data.transpose(3, 1, 2, 0) - return data + # filter padding body + data = data[data.sum((1, 2, 3)) != 0] + return data -def gendata(data_path, - out_path, - ignored_sample_path=None, - task='ntu60', - benchmark='xsub', - part='train', - pre_norm=True): - if ignored_sample_path is not None: - with open(ignored_sample_path, 'r') as f: - ignored_samples = [ - line.strip() + '.skeleton' for line in f.readlines() - ] - else: - ignored_samples = [] - sample_name = [] - sample_label = [] - total_frames = [] - results = [] +def get_names_and_labels(data_path: str, + task: str, + benchmark: str, + ignored_samples: Optional[List[str]] = None) -> Tuple: + train_names = [] + train_labels = [] + val_names = [] + val_labels = [] for filename in os.listdir(data_path): - if filename in ignored_samples: + if ignored_samples is not None and filename in ignored_samples: continue setup_number = int(filename[filename.find('S') + 1:filename.find('S') + @@ -266,54 +125,71 @@ def gendata(data_path, istraining = (subject_id in training_subjects_120) elif benchmark == 'xview': istraining = (camera_id in training_cameras_60) - elif benchmark == 'xsetup': + elif benchmark == 'xset': istraining = (setup_number in training_setups_120) else: raise ValueError() - if part == 'train': - issample = istraining - elif part == 'val': - issample = not (istraining) + if istraining: + train_names.append(filename) + train_labels.append(action_class - 1) else: - raise ValueError() + val_names.append(filename) + val_labels.append(action_class - 1) + + return train_names, train_labels, val_names, val_labels - if issample: - sample_name.append(filename) - sample_label.append(action_class - 1) - fp = np.zeros((len(sample_label), 3, max_frame, num_joint, max_body_true), - dtype=np.float32) - prog_bar = mmcv.ProgressBar(len(sample_name)) - for i, s in enumerate(sample_name): - data = read_xyz( +def gendata(data_path: str, + out_path: str, + ignored_sample_path: Optional[str] = None, + task: str = 'ntu60') -> None: + split = dict() + + if ignored_sample_path is not None: + with open(ignored_sample_path, 'r') as f: + ignored_samples = [ + line.strip() + '.skeleton' for line in f.readlines() + ] + else: + ignored_samples = [] + + if task == 'ntu60': + benchmarks = ['xsub', 'xview'] + else: + benchmarks = ['xsub', 'xset'] + + names = None + labels = None + for benchmark in benchmarks: + train_names, train_labels, val_names, val_labels = \ + get_names_and_labels(data_path, task, benchmark, ignored_samples) + split[f'{benchmark}_train'] = [osp.splitext(s)[0] for s in train_names] + split[f'{benchmark}_val'] = [osp.splitext(s)[0] for s in val_names] + + if names is None and labels is None: + names = train_names + val_names + labels = train_labels + val_labels + + results = [] + + prog_bar = mmengine.ProgressBar(len(names)) + for i, s in enumerate(names): + ske = read_xyz( osp.join(data_path, s), max_body=max_body_kinect, - num_joint=num_joint).astype(np.float32) - fp[i, :, 0:data.shape[1], :, :] = data - total_frames.append(data.shape[1]) - prog_bar.update() - - if pre_norm: - fp = pre_normalization(fp) + num_joint=num_joint).astype(np.float16) - prog_bar = mmcv.ProgressBar(len(sample_name)) - for i, s in enumerate(sample_name): anno = dict() - anno['total_frames'] = total_frames[i] - anno['keypoint'] = fp[i, :, 0:total_frames[i], :, :].transpose( - 3, 1, 2, 0) # C T V M -> M T V C anno['frame_dir'] = osp.splitext(s)[0] - anno['img_shape'] = (1080, 1920) - anno['original_shape'] = (1080, 1920) - anno['label'] = sample_label[i] - + anno['label'] = labels[i] + anno['keypoint'] = ske + anno['total_frames'] = ske.shape[1] results.append(anno) prog_bar.update() - output_path = '{}/{}.pkl'.format(out_path, part) - mmcv.dump(results, output_path) - print(f'{benchmark}-{part} finish~!') + annotations = {'split': split, 'annotations': results} + mmengine.dump(annotations, f'{out_path}/{task}_3d.pkl') if __name__ == '__main__': @@ -329,27 +205,13 @@ def gendata(data_path, type=str, default='NTU_RGBD_samples_with_missing_skeletons.txt') parser.add_argument( - '--out-folder', type=str, default='../../../data/ntu60/') + '--out-folder', type=str, default='../../../data/skeleton/') parser.add_argument('--task', type=str, default='ntu60') args = parser.parse_args() assert args.task in ['ntu60', 'ntu120'] - if args.task == 'ntu60': - benchmark = ['xsub', 'xview'] - else: - benchmark = ['xsub', 'xsetup'] - part = ['train', 'val'] - - for b in benchmark: - for p in part: - out_path = osp.join(args.out_folder, b) - if not osp.exists(out_path): - os.makedirs(out_path) - gendata( - args.data_path, - out_path, - args.ignored_sample_path, - args.task, - benchmark=b, - part=p) + mmengine.mkdir_or_exist(args.out_folder) + + gendata(args.data_path, args.out_folder, args.ignored_sample_path, + args.task) From b7911b0114ba1fc7a4684fff751e2858c6d408b7 Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Fri, 25 Nov 2022 15:31:05 +0800 Subject: [PATCH 22/57] [Improve] support gradcam (#2049) Co-authored-by: daiwenxun.vendor --- demo/README.md | 12 +- .../faster-rcnn_r50_fpn_2x_coco_infer.py | 0 .../i3d_r50_32x2x1_rawframes_infer.py | 27 ++ .../i3d_r50_32x2x1_video_infer.py | 28 +++ ...hrnet-w32_8xb64-210e_coco-256x192_infer.py | 0 .../tsn_r50_1x1x8_rawframes_infer.py | 27 ++ .../demo_configs/tsn_r50_1x1x8_video_infer.py | 28 +++ demo/demo_gradcam.py | 207 +++++++++++++++ demo/demo_skeleton.py | 4 +- docs/en/user_guides/3_inference.md | 8 +- mmaction/apis/inference.py | 4 +- mmaction/testing/__init__.py | 13 + .../base.py => mmaction/testing/_utils.py | 25 -- mmaction/utils/gradcam_utils.py | 107 ++++---- tests/models/__init__.py | 13 - tests/models/backbones/test_agcn.py | 2 +- tests/models/backbones/test_c2d.py | 2 +- tests/models/backbones/test_c3d.py | 2 +- tests/models/backbones/test_mobilenet_v2.py | 2 +- .../models/backbones/test_mobilenet_v2_tsm.py | 2 +- tests/models/backbones/test_resnet.py | 2 +- tests/models/backbones/test_resnet2plus1d.py | 2 +- tests/models/backbones/test_resnet3d.py | 2 +- tests/models/backbones/test_resnet3d_csn.py | 2 +- .../backbones/test_resnet3d_slowfast.py | 2 +- .../backbones/test_resnet3d_slowonly.py | 2 +- tests/models/backbones/test_resnet_audio.py | 2 +- tests/models/backbones/test_resnet_tin.py | 2 +- tests/models/backbones/test_resnet_tsm.py | 2 +- tests/models/backbones/test_stgcn.py | 2 +- tests/models/backbones/test_swin.py | 2 +- tests/models/backbones/test_tanet.py | 2 +- tests/models/backbones/test_timesformer.py | 2 +- tests/models/backbones/test_x3d.py | 2 +- tests/models/localizers/test_bmn.py | 2 +- tests/models/localizers/test_pem.py | 2 +- tests/models/localizers/test_tem.py | 2 +- tests/models/necks/test_tpn.py | 2 +- tests/models/recognizers/test_recognizer2d.py | 3 +- tests/models/recognizers/test_recognizer3d.py | 2 +- .../models/recognizers/test_recognizer_gcn.py | 3 +- tests/models/utils/__init__.py | 1 + tests/models/utils/test_gradcam.py | 235 ++++++++++++++++++ 43 files changed, 667 insertions(+), 126 deletions(-) rename demo/{skeleton_demo_cfg => demo_configs}/faster-rcnn_r50_fpn_2x_coco_infer.py (100%) create mode 100644 demo/demo_configs/i3d_r50_32x2x1_rawframes_infer.py create mode 100644 demo/demo_configs/i3d_r50_32x2x1_video_infer.py rename demo/{skeleton_demo_cfg => demo_configs}/td-hm_hrnet-w32_8xb64-210e_coco-256x192_infer.py (100%) create mode 100644 demo/demo_configs/tsn_r50_1x1x8_rawframes_infer.py create mode 100644 demo/demo_configs/tsn_r50_1x1x8_video_infer.py create mode 100644 demo/demo_gradcam.py create mode 100644 mmaction/testing/__init__.py rename tests/models/base.py => mmaction/testing/_utils.py (84%) delete mode 100644 tests/models/__init__.py create mode 100644 tests/models/utils/__init__.py create mode 100644 tests/models/utils/test_gradcam.py diff --git a/demo/README.md b/demo/README.md index 93f85fad85..b8a033cadf 100644 --- a/demo/README.md +++ b/demo/README.md @@ -222,18 +222,18 @@ or use checkpoint url from `configs/` to directly load corresponding checkpoint, 1. Get GradCAM results of a I3D model, using a video file as input and then generate an gif file with 10 fps. ```shell - python demo/demo_gradcam.py configs/recognition/i3d/i3d_r50_video_inference_32x2x1_100e_kinetics400_rgb.py \ - checkpoints/i3d_r50_video_32x2x1_100e_kinetics400_rgb_20200826-e31c6f52.pth demo/demo.mp4 \ + python demo/demo_gradcam.py demo/demo_configs/i3d_r50_32x2x1_video_infer.py \ + checkpoints/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb_20220812-e213c223.pth demo/demo.mp4 \ --target-layer-name backbone/layer4/1/relu --fps 10 \ --out-filename demo/demo_gradcam.gif ``` -2. Get GradCAM results of a TSM model, using a video file as input and then generate an gif file, loading checkpoint from url. +2. Get GradCAM results of a TSN model, using a video file as input and then generate an gif file, loading checkpoint from url. ```shell - python demo/demo_gradcam.py configs/recognition/tsm/tsm_r50_video_inference_1x1x8_100e_kinetics400_rgb.py \ - https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_video_1x1x8_100e_kinetics400_rgb/tsm_r50_video_1x1x8_100e_kinetics400_rgb_20200702-a77f4328.pth \ - demo/demo.mp4 --target-layer-name backbone/layer4/1/relu --out-filename demo/demo_gradcam_tsm.gif + python demo/demo_gradcam.py demo/demo_configs/tsn_r50_1x1x8_video_infer.py \ + https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb_20220906-dcbc6e01.pth \ + demo/demo.mp4 --target-layer-name backbone/layer4/1/relu --out-filename demo/demo_gradcam_tsn.gif ``` ## Webcam demo diff --git a/demo/skeleton_demo_cfg/faster-rcnn_r50_fpn_2x_coco_infer.py b/demo/demo_configs/faster-rcnn_r50_fpn_2x_coco_infer.py similarity index 100% rename from demo/skeleton_demo_cfg/faster-rcnn_r50_fpn_2x_coco_infer.py rename to demo/demo_configs/faster-rcnn_r50_fpn_2x_coco_infer.py diff --git a/demo/demo_configs/i3d_r50_32x2x1_rawframes_infer.py b/demo/demo_configs/i3d_r50_32x2x1_rawframes_infer.py new file mode 100644 index 0000000000..6c7fc421e2 --- /dev/null +++ b/demo/demo_configs/i3d_r50_32x2x1_rawframes_infer.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +_base_ = ['../../configs/_base_/models/i3d_r50.py'] + +# dataset settings +dataset_type = 'RawframeDataset' +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +test_dataloader = dict( + batch_size=1, + num_workers=2, + dataset=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/demo/demo_configs/i3d_r50_32x2x1_video_infer.py b/demo/demo_configs/i3d_r50_32x2x1_video_infer.py new file mode 100644 index 0000000000..e6a54430d7 --- /dev/null +++ b/demo/demo_configs/i3d_r50_32x2x1_video_infer.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +_base_ = ['../../configs/_base_/models/i3d_r50.py'] + +# dataset settings +dataset_type = 'VideoDataset' +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=2, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +test_dataloader = dict( + batch_size=1, + num_workers=2, + dataset=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/demo/skeleton_demo_cfg/td-hm_hrnet-w32_8xb64-210e_coco-256x192_infer.py b/demo/demo_configs/td-hm_hrnet-w32_8xb64-210e_coco-256x192_infer.py similarity index 100% rename from demo/skeleton_demo_cfg/td-hm_hrnet-w32_8xb64-210e_coco-256x192_infer.py rename to demo/demo_configs/td-hm_hrnet-w32_8xb64-210e_coco-256x192_infer.py diff --git a/demo/demo_configs/tsn_r50_1x1x8_rawframes_infer.py b/demo/demo_configs/tsn_r50_1x1x8_rawframes_infer.py new file mode 100644 index 0000000000..0ab9fb9329 --- /dev/null +++ b/demo/demo_configs/tsn_r50_1x1x8_rawframes_infer.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +_base_ = ['../../configs/_base_/models/tsn_r50.py'] + +# dataset settings +dataset_type = 'RawframeDataset' +test_pipeline = [ + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='RawFrameDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] + +test_dataloader = dict( + batch_size=1, + num_workers=2, + dataset=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/demo/demo_configs/tsn_r50_1x1x8_video_infer.py b/demo/demo_configs/tsn_r50_1x1x8_video_infer.py new file mode 100644 index 0000000000..8d8af995ca --- /dev/null +++ b/demo/demo_configs/tsn_r50_1x1x8_video_infer.py @@ -0,0 +1,28 @@ +# Copyright (c) OpenMMLab. All rights reserved. +_base_ = ['../../configs/_base_/models/tsn_r50.py'] + +# dataset settings +dataset_type = 'VideoDataset' +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] + +test_dataloader = dict( + batch_size=1, + num_workers=2, + dataset=dict( + type=dataset_type, + ann_file=None, + data_prefix=None, + pipeline=test_pipeline)) diff --git a/demo/demo_gradcam.py b/demo/demo_gradcam.py new file mode 100644 index 0000000000..9a97145d37 --- /dev/null +++ b/demo/demo_gradcam.py @@ -0,0 +1,207 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import os.path as osp +from typing import Dict, List, Optional, Tuple + +import mmcv +import numpy as np +import torch.nn as nn +from mmengine import Config, DictAction +from mmengine.dataset import Compose, pseudo_collate + +from mmaction.apis import init_recognizer +from mmaction.utils import GradCAM, register_all_modules + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMAction2 GradCAM demo') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file/url') + parser.add_argument('video', help='video file/url or rawframes directory') + parser.add_argument( + '--use-frames', + default=False, + action='store_true', + help='whether to use rawframes as input') + parser.add_argument( + '--device', type=str, default='cuda:0', help='CPU/CUDA device option') + parser.add_argument( + '--target-layer-name', + type=str, + default='backbone/layer4/1/relu', + help='GradCAM target layer name') + parser.add_argument('--out-filename', default=None, help='output filename') + parser.add_argument('--fps', default=5, type=int) + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + default={}, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. For example, ' + "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'") + parser.add_argument( + '--target-resolution', + nargs=2, + default=None, + type=int, + help='Target resolution (w, h) for resizing the frames when using a ' + 'video as input. If either dimension is set to -1, the frames are ' + 'resized by keeping the existing aspect ratio') + parser.add_argument( + '--resize-algorithm', + default='bilinear', + help='resize algorithm applied to generate video & gif') + + args = parser.parse_args() + return args + + +def build_inputs(model: nn.Module, + video_path: str, + use_frames: bool = False) -> Dict: + """build inputs for GradCAM. + + Note that, building inputs for GradCAM is exactly the same as building + inputs for Recognizer test stage. Codes from `inference_recognizer`. + + Args: + model (nn.Module): Recognizer model. + video_path (str): video file/url or rawframes directory. + use_frames (bool): whether to use rawframes as input. + Defaults to False. + + Returns: + dict: Both GradCAM inputs and Recognizer test stage inputs, + including two keys, ``inputs`` and ``data_samples``. + """ + if not (osp.exists(video_path) or video_path.startswith('http')): + raise RuntimeError(f"'{video_path}' is missing") + + if osp.isfile(video_path) and use_frames: + raise RuntimeError( + f"'{video_path}' is a video file, not a rawframe directory") + if osp.isdir(video_path) and not use_frames: + raise RuntimeError( + f"'{video_path}' is a rawframe directory, not a video file") + + cfg = model.cfg + + # build the data pipeline + test_pipeline = cfg.test_pipeline + test_pipeline = Compose(test_pipeline) + # prepare data + if use_frames: + filename_tmpl = cfg.test_dataloader.dataset.get( + 'filename_tmpl', 'img_{:05}.jpg') + start_index = cfg.test_dataloader.dataset.get('start_index', 1) + data = dict( + frame_dir=video_path, + total_frames=len(os.listdir(video_path)), + label=-1, + start_index=start_index, + filename_tmpl=filename_tmpl, + modality='RGB') + else: + start_index = cfg.test_dataloader.dataset.get('start_index', 0) + data = dict( + filename=video_path, + label=-1, + start_index=start_index, + modality='RGB') + data = test_pipeline(data) + data = pseudo_collate([data]) + + return data + + +def _resize_frames(frame_list: List[np.ndarray], + scale: Optional[Tuple[int]] = None, + keep_ratio: bool = True, + interpolation: str = 'bilinear') -> List[np.ndarray]: + """Resize frames according to given scale. + + Codes are modified from `mmaction/datasets/transforms/processing.py`, + `Resize` class. + + Args: + frame_list (list[np.ndarray]): Frames to be resized. + scale (tuple[int]): If keep_ratio is True, it serves as scaling + factor or maximum size: the image will be rescaled as large + as possible within the scale. Otherwise, it serves as (w, h) + of output size. + keep_ratio (bool): If set to True, Images will be resized without + changing the aspect ratio. Otherwise, it will resize images to a + given size. Defaults to True. + interpolation (str): Algorithm used for interpolation: + 'nearest' | 'bilinear'. Defaults to ``'bilinear'``. + + Returns: + list[np.ndarray]: Resized frames. + """ + if scale is None or (scale[0] == -1 and scale[1] == -1): + return frame_list + scale = tuple(scale) + max_long_edge = max(scale) + max_short_edge = min(scale) + if max_short_edge == -1: + scale = (np.inf, max_long_edge) + + img_h, img_w, _ = frame_list[0].shape + + if keep_ratio: + new_w, new_h = mmcv.rescale_size((img_w, img_h), scale) + else: + new_w, new_h = scale + + frame_list = [ + mmcv.imresize(img, (new_w, new_h), interpolation=interpolation) + for img in frame_list + ] + + return frame_list + + +def main(): + args = parse_args() + + # Register all modules in mmaction2 into the registries + register_all_modules() + + cfg = Config.fromfile(args.config) + cfg.merge_from_dict(args.cfg_options) + + # Build the recognizer from a config file and checkpoint file/url + model = init_recognizer(cfg, args.checkpoint, device=args.device) + + inputs = build_inputs(model, args.video, use_frames=args.use_frames) + gradcam = GradCAM(model, args.target_layer_name) + results = gradcam(inputs) + + if args.out_filename is not None: + try: + from moviepy.editor import ImageSequenceClip + except ImportError: + raise ImportError('Please install moviepy to enable output file.') + + # frames_batches shape [B, T, H, W, 3], in RGB order + frames_batches = (results[0] * 255.).numpy().astype(np.uint8) + frames = frames_batches.reshape(-1, *frames_batches.shape[-3:]) + + frame_list = list(frames) + frame_list = _resize_frames( + frame_list, + args.target_resolution, + interpolation=args.resize_algorithm) + + video_clips = ImageSequenceClip(frame_list, fps=args.fps) + out_type = osp.splitext(args.out_filename)[1][1:] + if out_type == 'gif': + video_clips.write_gif(args.out_filename) + else: + video_clips.write_videofile(args.out_filename, remove_temp=True) + + +if __name__ == '__main__': + main() diff --git a/demo/demo_skeleton.py b/demo/demo_skeleton.py index 33973b3930..98ce13f1bd 100644 --- a/demo/demo_skeleton.py +++ b/demo/demo_skeleton.py @@ -45,7 +45,7 @@ def parse_args(): help='skeleton model checkpoint file/url') parser.add_argument( '--det-config', - default='demo/skeleton_demo_cfg/faster-rcnn_r50_fpn_2x_coco_infer.py', + default='demo/demo_configs/faster-rcnn_r50_fpn_2x_coco_infer.py', help='human detection config file path (from mmdet)') parser.add_argument( '--det-checkpoint', @@ -66,7 +66,7 @@ def parse_args(): help='the category id for human detection') parser.add_argument( '--pose-config', - default='demo/skeleton_demo_cfg/' + default='demo/demo_configs/' 'td-hm_hrnet-w32_8xb64-210e_coco-256x192_infer.py', help='human pose estimation config file path (from mmpose)') parser.add_argument( diff --git a/docs/en/user_guides/3_inference.md b/docs/en/user_guides/3_inference.md index 8a603b66ee..2989fe9994 100644 --- a/docs/en/user_guides/3_inference.md +++ b/docs/en/user_guides/3_inference.md @@ -94,11 +94,11 @@ Assume that you are located at `$MMACTION2` . python demo/demo_skeleton.py demo/demo_skeleton.mp4 demo/demo_skeleton_out.mp4 \ --config configs/skeleton/posec3d/slowonly_r50_8xb16-u48-240e_ntu60-xsub-keypoint.py \ --checkpoint https://download.openmmlab.com/mmaction/skeleton/posec3d/slowonly_r50_u48_240e_ntu60_xsub_keypoint/slowonly_r50_u48_240e_ntu60_xsub_keypoint-f3adabf1.pth \ - --det-config demo/skeleton_demo_cfg/faster-rcnn_r50_fpn_2x_coco_infer.py \ + --det-config demo/demo_configs/faster-rcnn_r50_fpn_2x_coco_infer.py \ --det-checkpoint http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth \ --det-score-thr 0.9 \ --det-cat-id 0 \ - --pose-config demo/skeleton_demo_cfg/td-hm_hrnet-w32_8xb64-210e_coco-256x192_infer.py \ + --pose-config demo/demo_configs/td-hm_hrnet-w32_8xb64-210e_coco-256x192_infer.py \ --pose-checkpoint https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth \ --label-map tools/data/skeleton/label_map_ntu60.txt ``` @@ -109,11 +109,11 @@ python demo/demo_skeleton.py demo/demo_skeleton.mp4 demo/demo_skeleton_out.mp4 \ python demo/demo_skeleton.py demo/demo_skeleton.mp4 demo/demo_skeleton_out.mp4 \ --config configs/skeleton/stgcn/stgcn_1xb16-80e_ntu60-xsub-keypoint.py \ --checkpoint https://download.openmmlab.com/mmaction/skeleton/stgcn/stgcn_80e_ntu60_xsub_keypoint/stgcn_80e_ntu60_xsub_keypoint-e7bb9653.pth \ - --det-config demo/skeleton_demo_cfg/faster-rcnn_r50_fpn_2x_coco_infer.py \ + --det-config demo/demo_configs/faster-rcnn_r50_fpn_2x_coco_infer.py \ --det-checkpoint http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth \ --det-score-thr 0.9 \ --det-cat-id 0 \ - --pose-config demo/skeleton_demo_cfg/td-hm_hrnet-w32_8xb64-210e_coco-256x192_infer.py \ + --pose-config demo/demo_configs/td-hm_hrnet-w32_8xb64-210e_coco-256x192_infer.py \ --pose-checkpoint https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth \ --label-map tools/data/skeleton/label_map_ntu60.txt ``` diff --git a/mmaction/apis/inference.py b/mmaction/apis/inference.py index 0e3ab2ddac..c67b9a574d 100644 --- a/mmaction/apis/inference.py +++ b/mmaction/apis/inference.py @@ -20,11 +20,11 @@ def init_recognizer(config: Union[str, Path, mmengine.Config], """Initialize a recognizer from config file. Args: - config (Union[str, :obj:`Path`, :obj:`mmengine.Config`]): Config file + config (str or :obj:`Path` or :obj:`mmengine.Config`): Config file path, :obj:`Path` or the config object. checkpoint (str, optional): Checkpoint path/url. If set to None, the model will not load any weights. Defaults to None. - device (Union[str, torch.device]): The desired device of returned + device (str | torch.device): The desired device of returned tensor. Defaults to ``'cuda:0'``. Returns: diff --git a/mmaction/testing/__init__.py b/mmaction/testing/__init__.py new file mode 100644 index 0000000000..9f76126057 --- /dev/null +++ b/mmaction/testing/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from ._utils import (check_norm_state, generate_backbone_demo_inputs, + generate_detector_demo_inputs, + generate_recognizer_demo_inputs, get_audio_recognizer_cfg, + get_cfg, get_detector_cfg, get_localizer_cfg, + get_recognizer_cfg, get_skeletongcn_cfg) + +__all__ = [ + 'check_norm_state', 'generate_backbone_demo_inputs', + 'generate_recognizer_demo_inputs', 'get_cfg', 'get_recognizer_cfg', + 'get_audio_recognizer_cfg', 'get_localizer_cfg', 'get_detector_cfg', + 'generate_detector_demo_inputs', 'get_skeletongcn_cfg' +] diff --git a/tests/models/base.py b/mmaction/testing/_utils.py similarity index 84% rename from tests/models/base.py rename to mmaction/testing/_utils.py index 4d74f531da..5e6dbf649d 100644 --- a/tests/models/base.py +++ b/mmaction/testing/_utils.py @@ -104,31 +104,6 @@ def random_label(n): return dict(img=[img], proposals=[proposals], img_metas=[img_metas]) -def generate_gradcam_inputs(input_shape=(1, 3, 3, 224, 224), model_type='2D'): - """Create a superset of inputs needed to run gradcam. - - Args: - input_shape (tuple[int]): input batch dimensions. - Default: (1, 3, 3, 224, 224). - model_type (str): Model type for data generation, from {'2D', '3D'}. - Default:'2D' - return: - dict: model inputs, including two keys, ``imgs`` and ``label``. - """ - imgs = np.random.random(input_shape) - - if model_type in ['2D', '3D']: - gt_labels = torch.LongTensor([2] * input_shape[0]) - else: - raise ValueError(f'Data type {model_type} is not available') - - inputs = { - 'imgs': torch.FloatTensor(imgs), - 'label': gt_labels, - } - return inputs - - def get_cfg(config_type, fname): """Grab configs necessary to create a recognizer. diff --git a/mmaction/utils/gradcam_utils.py b/mmaction/utils/gradcam_utils.py index 06d0c78b8e..23f124f554 100644 --- a/mmaction/utils/gradcam_utils.py +++ b/mmaction/utils/gradcam_utils.py @@ -1,5 +1,6 @@ # Copyright (c) OpenMMLab. All rights reserved. import torch +import torch.nn as nn import torch.nn.functional as F @@ -11,20 +12,21 @@ class GradCAM: https://github.com/facebookresearch/SlowFast/blob/master/slowfast/visualization/gradcam_utils.py # noqa For more information about GradCAM, please visit: https://arxiv.org/pdf/1610.02391.pdf - """ - def __init__(self, model, target_layer_name, colormap='viridis'): - """Create GradCAM class with recognizer, target layername & colormap. + Args: + model (nn.Module): the recognizer model to be used. + target_layer_name (str): name of convolutional layer to + be used to get gradients and feature maps from for creating + localization maps. + colormap (str): matplotlib colormap used to create + heatmap. Defaults to 'viridis'. For more information, please visit + https://matplotlib.org/3.3.0/tutorials/colors/colormaps.html + """ - Args: - model (nn.Module): the recognizer model to be used. - target_layer_name (str): name of convolutional layer to - be used to get gradients and feature maps from for creating - localization maps. - colormap (Optional[str]): matplotlib colormap used to create - heatmap. Default: 'viridis'. For more information, please visit - https://matplotlib.org/3.3.0/tutorials/colors/colormaps.html - """ + def __init__(self, + model: nn.Module, + target_layer_name: str, + colormap: str = 'viridis') -> None: from ..models.recognizers import Recognizer2D, Recognizer3D if isinstance(model, Recognizer2D): self.is_recognizer2d = True @@ -41,11 +43,9 @@ def __init__(self, model, target_layer_name, colormap='viridis'): import matplotlib.pyplot as plt self.colormap = plt.get_cmap(colormap) - self.data_mean = torch.tensor(model.cfg.img_norm_cfg['mean']) - self.data_std = torch.tensor(model.cfg.img_norm_cfg['std']) self._register_hooks(target_layer_name) - def _register_hooks(self, layer_name): + def _register_hooks(self, layer_name: str) -> None: """Register forward and backward hook to a layer, given layer_name, to obtain gradients and activations. @@ -68,32 +68,38 @@ def get_activations(module, input, output): target_layer.register_forward_hook(get_activations) target_layer.register_backward_hook(get_gradients) - def _calculate_localization_map(self, inputs, use_labels, delta=1e-20): + def _calculate_localization_map(self, + data: dict, + use_labels: bool, + delta=1e-20) -> tuple: """Calculate localization map for all inputs with Grad-CAM. Args: - inputs (dict): model inputs, generated by test pipeline, - at least including two keys, ``imgs`` and ``label``. + data (dict): model inputs, generated by test pipeline, use_labels (bool): Whether to use given labels to generate - localization map. Labels are in ``inputs['label']``. + localization map. delta (float): used in localization map normalization, must be small enough. Please make sure `localization_map_max - localization_map_min >> delta` + Returns: - tuple[torch.Tensor, torch.Tensor]: (localization_map, preds) - localization_map (torch.Tensor): the localization map for - input imgs. - preds (torch.Tensor): Model predictions for `inputs` with - shape (batch_size, num_classes). + localization_map (torch.Tensor): the localization map for + input imgs. + preds (torch.Tensor): Model predictions with shape + (batch_size, num_classes). """ - inputs['imgs'] = inputs['imgs'].clone() + inputs = data['inputs'] + # use score before softmax + self.model.cls_head.average_clips = 'score' # model forward & backward - preds = self.model(gradcam=True, **inputs) + results = self.model.test_step(data) + preds = [result.pred_scores.item for result in results] + preds = torch.stack(preds) + if use_labels: - labels = inputs['label'] - if labels.ndim == 1: - labels = labels.unsqueeze(-1) + labels = [result.gt_labels.item for result in results] + labels = torch.stack(labels) score = torch.gather(preds, dim=1, index=labels) else: score = torch.max(preds, dim=-1)[0] @@ -101,12 +107,13 @@ def _calculate_localization_map(self, inputs, use_labels, delta=1e-20): score = torch.sum(score) score.backward() + imgs = torch.stack(inputs) if self.is_recognizer2d: # [batch_size, num_segments, 3, H, W] - b, t, _, h, w = inputs['imgs'].size() + b, t, _, h, w = imgs.size() else: # [batch_size, num_crops*num_clips, 3, clip_len, H, W] - b1, b2, _, t, h, w = inputs['imgs'].size() + b1, b2, _, t, h, w = imgs.size() b = b1 * b2 gradients = self.target_gradients @@ -151,28 +158,31 @@ def _calculate_localization_map(self, inputs, use_labels, delta=1e-20): return localization_map.squeeze(dim=1), preds - def _alpha_blending(self, localization_map, input_imgs, alpha): + def _alpha_blending(self, localization_map: torch.Tensor, + input_imgs: torch.Tensor, + alpha: float) -> torch.Tensor: """Blend heatmaps and model input images and get visulization results. Args: localization_map (torch.Tensor): localization map for all inputs, - generated with Grad-CAM - input_imgs (torch.Tensor): model inputs, normed images. + generated with Grad-CAM. + input_imgs (torch.Tensor): model inputs, raw images. alpha (float): transparency level of the heatmap, in the range [0, 1]. + Returns: torch.Tensor: blending results for localization map and input - images, with shape [B, T, H, W, 3] and pixel values in - RGB order within range [0, 1]. + images, with shape [B, T, H, W, 3] and pixel values in + RGB order within range [0, 1]. """ # localization_map shape [B, T, H, W] localization_map = localization_map.cpu() # heatmap shape [B, T, H, W, 3] in RGB order heatmap = self.colormap(localization_map.detach().numpy()) - heatmap = heatmap[:, :, :, :, :3] + heatmap = heatmap[..., :3] heatmap = torch.from_numpy(heatmap) - + input_imgs = torch.stack(input_imgs) # Permute input imgs to [B, T, H, W, 3], like heatmap if self.is_recognizer2d: # Recognizer2D input (B, T, C, H, W) @@ -184,9 +194,7 @@ def _alpha_blending(self, localization_map, input_imgs, alpha): curr_inp = curr_inp.permute(0, 2, 3, 4, 1) # renormalize input imgs to [0, 1] - curr_inp = curr_inp.cpu() - curr_inp *= self.data_std - curr_inp += self.data_mean + curr_inp = curr_inp.cpu().float() curr_inp /= 255. # alpha blending @@ -194,7 +202,10 @@ def _alpha_blending(self, localization_map, input_imgs, alpha): return blended_imgs - def __call__(self, inputs, use_labels=False, alpha=0.5): + def __call__(self, + data: dict, + use_labels: bool = False, + alpha: float = 0.5) -> tuple: """Visualize the localization maps on their corresponding inputs as heatmap, using Grad-CAM. @@ -204,25 +215,25 @@ def __call__(self, inputs, use_labels=False, alpha=0.5): there are 960(32*10*3) images generated. Args: - inputs (dict): model inputs, generated by test pipeline, - at least including two keys, ``imgs`` and ``label``. + data (dict): model inputs, generated by test pipeline. use_labels (bool): Whether to use given labels to generate - localization map. Labels are in ``inputs['label']``. + localization map. alpha (float): transparency level of the heatmap, in the range [0, 1]. + Returns: blended_imgs (torch.Tensor): Visualization results, blended by - localization maps and model inputs. + localization maps and model inputs. preds (torch.Tensor): Model predictions for inputs. """ # localization_map shape [B, T, H, W] # preds shape [batch_size, num_classes] localization_map, preds = self._calculate_localization_map( - inputs, use_labels=use_labels) + data, use_labels=use_labels) # blended_imgs shape [B, T, H, W, 3] - blended_imgs = self._alpha_blending(localization_map, inputs['imgs'], + blended_imgs = self._alpha_blending(localization_map, data['inputs'], alpha) # blended_imgs shape [B, T, H, W, 3] diff --git a/tests/models/__init__.py b/tests/models/__init__.py deleted file mode 100644 index 7ae5f7087a..0000000000 --- a/tests/models/__init__.py +++ /dev/null @@ -1,13 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from .base import (check_norm_state, generate_backbone_demo_inputs, - generate_detector_demo_inputs, generate_gradcam_inputs, - generate_recognizer_demo_inputs, get_audio_recognizer_cfg, - get_cfg, get_detector_cfg, get_localizer_cfg, - get_recognizer_cfg, get_skeletongcn_cfg) - -__all__ = [ - 'check_norm_state', 'generate_backbone_demo_inputs', - 'generate_recognizer_demo_inputs', 'generate_gradcam_inputs', 'get_cfg', - 'get_recognizer_cfg', 'get_audio_recognizer_cfg', 'get_localizer_cfg', - 'get_detector_cfg', 'generate_detector_demo_inputs', 'get_skeletongcn_cfg' -] diff --git a/tests/models/backbones/test_agcn.py b/tests/models/backbones/test_agcn.py index 0012467da2..f774ee6ac6 100644 --- a/tests/models/backbones/test_agcn.py +++ b/tests/models/backbones/test_agcn.py @@ -2,7 +2,7 @@ import torch from mmaction.models import AGCN -from ..base import generate_backbone_demo_inputs +from mmaction.testing import generate_backbone_demo_inputs def test_AGCN_backbone(): diff --git a/tests/models/backbones/test_c2d.py b/tests/models/backbones/test_c2d.py index b874672ea3..4cf27387b9 100644 --- a/tests/models/backbones/test_c2d.py +++ b/tests/models/backbones/test_c2d.py @@ -2,7 +2,7 @@ import torch from mmaction.models import C2D -from ..base import generate_backbone_demo_inputs +from mmaction.testing import generate_backbone_demo_inputs def test_c2d_backbone(): diff --git a/tests/models/backbones/test_c3d.py b/tests/models/backbones/test_c3d.py index eb71f5cce5..03020e01c7 100644 --- a/tests/models/backbones/test_c3d.py +++ b/tests/models/backbones/test_c3d.py @@ -2,7 +2,7 @@ import torch from mmaction.models import C3D -from ..base import generate_backbone_demo_inputs +from mmaction.testing import generate_backbone_demo_inputs def test_c3d_backbone(): diff --git a/tests/models/backbones/test_mobilenet_v2.py b/tests/models/backbones/test_mobilenet_v2.py index 8748f20c82..4ddb8e8091 100644 --- a/tests/models/backbones/test_mobilenet_v2.py +++ b/tests/models/backbones/test_mobilenet_v2.py @@ -4,7 +4,7 @@ from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm from mmaction.models import MobileNetV2 -from ..base import check_norm_state, generate_backbone_demo_inputs +from mmaction.testing import check_norm_state, generate_backbone_demo_inputs def test_mobilenetv2_backbone(): diff --git a/tests/models/backbones/test_mobilenet_v2_tsm.py b/tests/models/backbones/test_mobilenet_v2_tsm.py index ee6b349e3c..34ad6b86a9 100644 --- a/tests/models/backbones/test_mobilenet_v2_tsm.py +++ b/tests/models/backbones/test_mobilenet_v2_tsm.py @@ -2,7 +2,7 @@ import torch from mmaction.models import MobileNetV2TSM -from ..base import generate_backbone_demo_inputs +from mmaction.testing import generate_backbone_demo_inputs def test_mobilenetv2_tsm_backbone(): diff --git a/tests/models/backbones/test_resnet.py b/tests/models/backbones/test_resnet.py index a4c6064545..b2629aed2f 100644 --- a/tests/models/backbones/test_resnet.py +++ b/tests/models/backbones/test_resnet.py @@ -5,7 +5,7 @@ from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm from mmaction.models import ResNet -from ..base import check_norm_state, generate_backbone_demo_inputs +from mmaction.testing import check_norm_state, generate_backbone_demo_inputs def test_resnet_backbone(): diff --git a/tests/models/backbones/test_resnet2plus1d.py b/tests/models/backbones/test_resnet2plus1d.py index fe0a8c25d5..e74a05ae48 100644 --- a/tests/models/backbones/test_resnet2plus1d.py +++ b/tests/models/backbones/test_resnet2plus1d.py @@ -4,7 +4,7 @@ from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm from mmaction.models import ResNet2Plus1d -from ..base import generate_backbone_demo_inputs +from mmaction.testing import generate_backbone_demo_inputs def test_resnet2plus1d_backbone(): diff --git a/tests/models/backbones/test_resnet3d.py b/tests/models/backbones/test_resnet3d.py index fdc3cc5d3a..baa1b8fe55 100644 --- a/tests/models/backbones/test_resnet3d.py +++ b/tests/models/backbones/test_resnet3d.py @@ -7,7 +7,7 @@ from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm from mmaction.models import ResNet3d, ResNet3dLayer -from ..base import check_norm_state, generate_backbone_demo_inputs +from mmaction.testing import check_norm_state, generate_backbone_demo_inputs @pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') diff --git a/tests/models/backbones/test_resnet3d_csn.py b/tests/models/backbones/test_resnet3d_csn.py index aa14de4b4b..d41f05551a 100644 --- a/tests/models/backbones/test_resnet3d_csn.py +++ b/tests/models/backbones/test_resnet3d_csn.py @@ -5,7 +5,7 @@ from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm from mmaction.models import ResNet3dCSN -from ..base import generate_backbone_demo_inputs +from mmaction.testing import generate_backbone_demo_inputs def test_resnet_csn_backbone(): diff --git a/tests/models/backbones/test_resnet3d_slowfast.py b/tests/models/backbones/test_resnet3d_slowfast.py index 7cb53473cf..a3de73a620 100644 --- a/tests/models/backbones/test_resnet3d_slowfast.py +++ b/tests/models/backbones/test_resnet3d_slowfast.py @@ -4,7 +4,7 @@ from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm from mmaction.models import ResNet3dSlowFast -from ..base import generate_backbone_demo_inputs +from mmaction.testing import generate_backbone_demo_inputs def test_slowfast_backbone(): diff --git a/tests/models/backbones/test_resnet3d_slowonly.py b/tests/models/backbones/test_resnet3d_slowonly.py index 8397d4356b..9603469c37 100644 --- a/tests/models/backbones/test_resnet3d_slowonly.py +++ b/tests/models/backbones/test_resnet3d_slowonly.py @@ -3,7 +3,7 @@ import torch from mmaction.models import ResNet3dSlowOnly -from ..base import generate_backbone_demo_inputs +from mmaction.testing import generate_backbone_demo_inputs def test_slowonly_backbone(): diff --git a/tests/models/backbones/test_resnet_audio.py b/tests/models/backbones/test_resnet_audio.py index 8913392771..6c22bd137a 100644 --- a/tests/models/backbones/test_resnet_audio.py +++ b/tests/models/backbones/test_resnet_audio.py @@ -2,7 +2,7 @@ import torch from mmaction.models import ResNetAudio -from ..base import generate_backbone_demo_inputs +from mmaction.testing import generate_backbone_demo_inputs def test_resnet_audio_backbone(): diff --git a/tests/models/backbones/test_resnet_tin.py b/tests/models/backbones/test_resnet_tin.py index c1e75e0a9b..9a1662e333 100644 --- a/tests/models/backbones/test_resnet_tin.py +++ b/tests/models/backbones/test_resnet_tin.py @@ -4,7 +4,7 @@ import torch.nn as nn from mmaction.models import ResNetTIN -from ..base import generate_backbone_demo_inputs +from mmaction.testing import generate_backbone_demo_inputs @pytest.mark.skipif( diff --git a/tests/models/backbones/test_resnet_tsm.py b/tests/models/backbones/test_resnet_tsm.py index 5adb8609b7..ddac948354 100644 --- a/tests/models/backbones/test_resnet_tsm.py +++ b/tests/models/backbones/test_resnet_tsm.py @@ -7,7 +7,7 @@ from mmaction.models import ResNetTSM from mmaction.models.backbones.resnet_tsm import NL3DWrapper -from ..base import generate_backbone_demo_inputs +from mmaction.testing import generate_backbone_demo_inputs def test_resnet_tsm_backbone(): diff --git a/tests/models/backbones/test_stgcn.py b/tests/models/backbones/test_stgcn.py index fdb28bff01..7ba4ec2ab9 100644 --- a/tests/models/backbones/test_stgcn.py +++ b/tests/models/backbones/test_stgcn.py @@ -2,7 +2,7 @@ import torch from mmaction.models import STGCN -from ..base import generate_backbone_demo_inputs +from mmaction.testing import generate_backbone_demo_inputs def test_stgcn_backbone(): diff --git a/tests/models/backbones/test_swin.py b/tests/models/backbones/test_swin.py index 8c454e6c70..b75fd6ee20 100644 --- a/tests/models/backbones/test_swin.py +++ b/tests/models/backbones/test_swin.py @@ -3,7 +3,7 @@ import torch from mmaction.models import SwinTransformer3D -from ..base import generate_backbone_demo_inputs +from mmaction.testing import generate_backbone_demo_inputs def test_swin_backbone(): diff --git a/tests/models/backbones/test_tanet.py b/tests/models/backbones/test_tanet.py index 4b7601e95f..ea8ee4733b 100644 --- a/tests/models/backbones/test_tanet.py +++ b/tests/models/backbones/test_tanet.py @@ -3,7 +3,7 @@ import torch from mmaction.models import TANet -from ..base import generate_backbone_demo_inputs +from mmaction.testing import generate_backbone_demo_inputs def test_tanet_backbone(): diff --git a/tests/models/backbones/test_timesformer.py b/tests/models/backbones/test_timesformer.py index 337d49b105..e33f37c0f5 100644 --- a/tests/models/backbones/test_timesformer.py +++ b/tests/models/backbones/test_timesformer.py @@ -3,7 +3,7 @@ import torch from mmaction.models import TimeSformer -from ..base import generate_backbone_demo_inputs +from mmaction.testing import generate_backbone_demo_inputs def test_timesformer_backbone(): diff --git a/tests/models/backbones/test_x3d.py b/tests/models/backbones/test_x3d.py index a8092f5c7e..2bf417d4ea 100644 --- a/tests/models/backbones/test_x3d.py +++ b/tests/models/backbones/test_x3d.py @@ -4,7 +4,7 @@ from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm from mmaction.models import X3D -from ..base import check_norm_state, generate_backbone_demo_inputs +from mmaction.testing import check_norm_state, generate_backbone_demo_inputs def test_x3d_backbone(): diff --git a/tests/models/localizers/test_bmn.py b/tests/models/localizers/test_bmn.py index f39a96e81a..6440a9fa9c 100644 --- a/tests/models/localizers/test_bmn.py +++ b/tests/models/localizers/test_bmn.py @@ -9,8 +9,8 @@ from mmaction.registry import MODELS from mmaction.structures import ActionDataSample +from mmaction.testing import get_localizer_cfg from mmaction.utils import register_all_modules -from ..base import get_localizer_cfg register_all_modules() diff --git a/tests/models/localizers/test_pem.py b/tests/models/localizers/test_pem.py index 623e4acff7..fe1fbf41e5 100644 --- a/tests/models/localizers/test_pem.py +++ b/tests/models/localizers/test_pem.py @@ -8,8 +8,8 @@ from mmaction.registry import MODELS from mmaction.structures import ActionDataSample +from mmaction.testing import get_localizer_cfg from mmaction.utils import register_all_modules -from ..base import get_localizer_cfg register_all_modules() diff --git a/tests/models/localizers/test_tem.py b/tests/models/localizers/test_tem.py index a7e9d9c236..31697867f7 100644 --- a/tests/models/localizers/test_tem.py +++ b/tests/models/localizers/test_tem.py @@ -9,8 +9,8 @@ from mmaction.registry import MODELS from mmaction.structures import ActionDataSample +from mmaction.testing import get_localizer_cfg from mmaction.utils import register_all_modules -from ..base import get_localizer_cfg register_all_modules() diff --git a/tests/models/necks/test_tpn.py b/tests/models/necks/test_tpn.py index a46e1ab493..1e9387aa39 100644 --- a/tests/models/necks/test_tpn.py +++ b/tests/models/necks/test_tpn.py @@ -7,7 +7,7 @@ from mmaction.models import TPN from mmaction.structures import ActionDataSample -from ..base import generate_backbone_demo_inputs +from mmaction.testing import generate_backbone_demo_inputs def get_label(label_): diff --git a/tests/models/recognizers/test_recognizer2d.py b/tests/models/recognizers/test_recognizer2d.py index 7e6f2099d4..e73c7a7dcf 100644 --- a/tests/models/recognizers/test_recognizer2d.py +++ b/tests/models/recognizers/test_recognizer2d.py @@ -2,8 +2,9 @@ import torch from mmaction.registry import MODELS +from mmaction.testing import (generate_recognizer_demo_inputs, + get_recognizer_cfg) from mmaction.utils import register_all_modules -from ..base import generate_recognizer_demo_inputs, get_recognizer_cfg def test_tsn(): diff --git a/tests/models/recognizers/test_recognizer3d.py b/tests/models/recognizers/test_recognizer3d.py index 07a4969864..7d80de00fb 100644 --- a/tests/models/recognizers/test_recognizer3d.py +++ b/tests/models/recognizers/test_recognizer3d.py @@ -5,8 +5,8 @@ from mmaction.registry import MODELS from mmaction.structures import ActionDataSample +from mmaction.testing import get_recognizer_cfg from mmaction.utils import register_all_modules -from ..base import get_recognizer_cfg def train_test_step(cfg, input_shape): diff --git a/tests/models/recognizers/test_recognizer_gcn.py b/tests/models/recognizers/test_recognizer_gcn.py index aa56b79cfd..36314bc618 100644 --- a/tests/models/recognizers/test_recognizer_gcn.py +++ b/tests/models/recognizers/test_recognizer_gcn.py @@ -2,8 +2,9 @@ import torch from mmaction.registry import MODELS +from mmaction.testing import (generate_recognizer_demo_inputs, + get_skeletongcn_cfg) from mmaction.utils import register_all_modules -from ..base import generate_recognizer_demo_inputs, get_skeletongcn_cfg def test_recognizer_gcn(): diff --git a/tests/models/utils/__init__.py b/tests/models/utils/__init__.py new file mode 100644 index 0000000000..ef101fec61 --- /dev/null +++ b/tests/models/utils/__init__.py @@ -0,0 +1 @@ +# Copyright (c) OpenMMLab. All rights reserved. diff --git a/tests/models/utils/test_gradcam.py b/tests/models/utils/test_gradcam.py new file mode 100644 index 0000000000..d1a39ef87c --- /dev/null +++ b/tests/models/utils/test_gradcam.py @@ -0,0 +1,235 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import platform + +import pytest +import torch + +from mmaction.registry import MODELS +from mmaction.structures import ActionDataSample +from mmaction.testing import get_recognizer_cfg +from mmaction.utils import register_all_modules +from mmaction.utils.gradcam_utils import GradCAM + +register_all_modules() + + +def _get_target_shapes(input_shape, num_classes=400, model_type='2D'): + if model_type not in ['2D', '3D']: + raise ValueError(f'Data type {model_type} is not available') + + preds_target_shape = (input_shape[0], num_classes) + if model_type == '3D': + # input shape (batch_size, num_crops*num_clips, C, clip_len, H, W) + # target shape (batch_size*num_crops*num_clips, clip_len, H, W, C) + blended_imgs_target_shape = (input_shape[0] * input_shape[1], + input_shape[3], input_shape[4], + input_shape[5], input_shape[2]) + else: + # input shape (batch_size, num_segments, C, H, W) + # target shape (batch_size, num_segments, H, W, C) + blended_imgs_target_shape = (input_shape[0], input_shape[1], + input_shape[3], input_shape[4], + input_shape[2]) + + return blended_imgs_target_shape, preds_target_shape + + +def _do_test_2D_models(recognizer, + target_layer_name, + input_shape, + num_classes=400, + device='cpu'): + demo_data = { + 'inputs': [torch.randint(0, 256, input_shape[1:])], + 'data_samples': [ActionDataSample().set_gt_labels(2)] + } + + recognizer = recognizer.to(device) + gradcam = GradCAM(recognizer, target_layer_name) + + blended_imgs_target_shape, preds_target_shape = _get_target_shapes( + input_shape, num_classes=num_classes, model_type='2D') + + blended_imgs, preds = gradcam(demo_data) + assert blended_imgs.size() == blended_imgs_target_shape + assert preds.size() == preds_target_shape + + blended_imgs, preds = gradcam(demo_data, True) + assert blended_imgs.size() == blended_imgs_target_shape + assert preds.size() == preds_target_shape + + +def _do_test_3D_models(recognizer, + target_layer_name, + input_shape, + num_classes=400): + blended_imgs_target_shape, preds_target_shape = _get_target_shapes( + input_shape, num_classes=num_classes, model_type='3D') + demo_data = { + 'inputs': [torch.randint(0, 256, input_shape[1:])], + 'data_samples': [ActionDataSample().set_gt_labels(2)] + } + + gradcam = GradCAM(recognizer, target_layer_name) + + blended_imgs, preds = gradcam(demo_data) + assert blended_imgs.size() == blended_imgs_target_shape + assert preds.size() == preds_target_shape + + blended_imgs, preds = gradcam(demo_data, True) + assert blended_imgs.size() == blended_imgs_target_shape + assert preds.size() == preds_target_shape + + +def test_tsn(): + config = get_recognizer_cfg( + 'tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py') + config.model['backbone']['pretrained'] = None + recognizer = MODELS.build(config.model) + recognizer.cfg = config + + input_shape = (1, 25, 3, 32, 32) + target_layer_name = 'backbone/layer4/1/relu' + + _do_test_2D_models(recognizer, target_layer_name, input_shape) + + +def test_i3d(): + config = get_recognizer_cfg( + 'i3d/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb.py') + config.model['backbone']['pretrained2d'] = False + config.model['backbone']['pretrained'] = None + + recognizer = MODELS.build(config.model) + recognizer.cfg = config + + input_shape = (1, 1, 3, 32, 32, 32) + target_layer_name = 'backbone/layer4/1/relu' + + _do_test_3D_models(recognizer, target_layer_name, input_shape) + + +def test_r2plus1d(): + config = get_recognizer_cfg( + 'r2plus1d/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb.py') + config.model['backbone']['pretrained2d'] = False + config.model['backbone']['pretrained'] = None + config.model['backbone']['norm_cfg'] = dict(type='BN3d') + + recognizer = MODELS.build(config.model) + recognizer.cfg = config + + input_shape = (1, 3, 3, 8, 32, 32) + target_layer_name = 'backbone/layer4/1/relu' + + _do_test_3D_models(recognizer, target_layer_name, input_shape) + + +def test_slowfast(): + config = get_recognizer_cfg( + 'slowfast/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb.py') + + recognizer = MODELS.build(config.model) + recognizer.cfg = config + + input_shape = (1, 1, 3, 32, 32, 32) + target_layer_name = 'backbone/slow_path/layer4/1/relu' + + _do_test_3D_models(recognizer, target_layer_name, input_shape) + + +def test_tsm(): + config = get_recognizer_cfg( + 'tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb.py') + config.model['backbone']['pretrained'] = None + target_layer_name = 'backbone/layer4/1/relu' + + # base config + recognizer = MODELS.build(config.model) + recognizer.cfg = config + input_shape = (1, 8, 3, 32, 32) + _do_test_2D_models(recognizer, target_layer_name, input_shape) + + # test twice sample + 3 crops, 2*3*8=48 + config.model.test_cfg = dict(average_clips='prob') + recognizer = MODELS.build(config.model) + recognizer.cfg = config + input_shape = (1, 48, 3, 32, 32) + _do_test_2D_models(recognizer, target_layer_name, input_shape) + + +def test_csn(): + config = get_recognizer_cfg( + 'csn/ipcsn_ig65m-pretrained-r152-bnfrozen_32x2x1-58e_kinetics400-rgb.py' # noqa: E501 + ) + config.model['backbone']['pretrained2d'] = False + config.model['backbone']['pretrained'] = None + + recognizer = MODELS.build(config.model) + recognizer.cfg = config + input_shape = (1, 1, 3, 32, 32, 32) + target_layer_name = 'backbone/layer4/1/relu' + + _do_test_3D_models(recognizer, target_layer_name, input_shape) + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_tpn(): + target_layer_name = 'backbone/layer4/1/relu' + + config = get_recognizer_cfg( + 'tpn/tpn-tsm_imagenet-pretrained-r50_8xb8-1x1x8-150e_sthv1-rgb.py') + config.model['backbone']['pretrained'] = None + config.model['backbone']['num_segments'] = 4 + config.model.test_cfg['fcn_test'] = False + recognizer = MODELS.build(config.model) + recognizer.cfg = config + + input_shape = (1, 4, 3, 16, 16) + _do_test_2D_models(recognizer, target_layer_name, input_shape, 174) + + config = get_recognizer_cfg( + 'tpn/tpn-slowonly_r50_8xb8-8x8x1-150e_kinetics400-rgb.py') + config.model['backbone']['pretrained'] = None + config.model.test_cfg['fcn_test'] = False + recognizer = MODELS.build(config.model) + recognizer.cfg = config + input_shape = (1, 3, 3, 4, 16, 16) + _do_test_3D_models(recognizer, target_layer_name, input_shape) + + +@pytest.mark.skipif(platform.system() == 'Windows', reason='Windows mem limit') +def test_c3d(): + config = get_recognizer_cfg( + 'c3d/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb.py') + config.model['backbone']['pretrained'] = None + recognizer = MODELS.build(config.model) + recognizer.cfg = config + input_shape = (1, 1, 3, 16, 112, 112) + target_layer_name = 'backbone/conv5a/activate' + _do_test_3D_models(recognizer, target_layer_name, input_shape, 101) + + +@pytest.mark.skipif( + not torch.cuda.is_available(), reason='requires CUDA support') +def test_tin(): + config = get_recognizer_cfg( + 'tin/tin_kinetics400-pretrained-tsm-r50_1x1x8-50e_kinetics400-rgb.py') + config.model['backbone']['pretrained'] = None + target_layer_name = 'backbone/layer4/1/relu' + + recognizer = MODELS.build(config.model) + recognizer.cfg = config + input_shape = (1, 8, 3, 64, 64) + _do_test_2D_models( + recognizer, target_layer_name, input_shape, device='cuda:0') + + +def test_x3d(): + config = get_recognizer_cfg('x3d/x3d_s_13x6x1_facebook-kinetics400-rgb.py') + config.model['backbone']['pretrained'] = None + recognizer = MODELS.build(config.model) + recognizer.cfg = config + input_shape = (1, 1, 3, 13, 32, 32) + target_layer_name = 'backbone/layer4/1/relu' + _do_test_3D_models(recognizer, target_layer_name, input_shape) From ba489957b82d89dbf6708ce9488b3d423610dd65 Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Fri, 25 Nov 2022 15:36:18 +0800 Subject: [PATCH 23/57] [Improve] support init_cfg for Swin and ViTMAE (#2055) Co-authored-by: wxDai --- mmaction/models/backbones/swin.py | 98 ++++++++----------- mmaction/models/backbones/vit_mae.py | 51 ++++------ mmaction/models/recognizers/base.py | 23 ++--- tests/models/recognizers/test_recognizer2d.py | 1 + 4 files changed, 68 insertions(+), 105 deletions(-) diff --git a/mmaction/models/backbones/swin.py b/mmaction/models/backbones/swin.py index 4b19ac67a3..8554dd4ce9 100644 --- a/mmaction/models/backbones/swin.py +++ b/mmaction/models/backbones/swin.py @@ -1,7 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. from functools import lru_cache, reduce from operator import mul -from typing import Dict, Optional, Sequence, Tuple, Union +from typing import Dict, List, Optional, Sequence, Tuple, Union import numpy as np import torch @@ -14,7 +14,7 @@ from mmengine.logging import MMLogger from mmengine.model import BaseModule, ModuleList from mmengine.model.weight_init import trunc_normal_ -from mmengine.runner.checkpoint import _load_checkpoint, load_checkpoint +from mmengine.runner.checkpoint import _load_checkpoint from mmaction.registry import MODELS @@ -747,8 +747,11 @@ class SwinTransformer3D(BaseModule): Defaults to ``(3, )``. out_after_downsample (bool): Whether to output the feature map of a stage after the following downsample layer. Defaults to False. - init_cfg (dict, optional): Config dict for initialization. - Defaults to None. + init_cfg (dict or list[dict]): Initialization config dict. Defaults to + ``[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ]``. """ arch_zoo = { **dict.fromkeys(['t', 'tiny'], @@ -769,27 +772,32 @@ class SwinTransformer3D(BaseModule): 'num_heads': [6, 12, 24, 48]}), } # yapf: disable - def __init__(self, - arch: Union[str, Dict], - pretrained: Optional[str] = None, - pretrained2d: bool = True, - patch_size: Union[int, Sequence[int]] = (2, 4, 4), - in_channels: int = 3, - window_size: Sequence[int] = (8, 7, 7), - mlp_ratio: float = 4., - qkv_bias: bool = True, - qk_scale: Optional[float] = None, - drop_rate: float = 0., - attn_drop_rate: float = 0., - drop_path_rate: float = 0.1, - act_cfg: Dict = dict(type='GELU'), - norm_cfg: Dict = dict(type='LN'), - patch_norm: bool = True, - frozen_stages: int = -1, - with_cp: bool = False, - out_indices: Sequence[int] = (3, ), - out_after_downsample=False, - init_cfg: Optional[Dict] = None) -> None: + def __init__( + self, + arch: Union[str, Dict], + pretrained: Optional[str] = None, + pretrained2d: bool = True, + patch_size: Union[int, Sequence[int]] = (2, 4, 4), + in_channels: int = 3, + window_size: Sequence[int] = (8, 7, 7), + mlp_ratio: float = 4., + qkv_bias: bool = True, + qk_scale: Optional[float] = None, + drop_rate: float = 0., + attn_drop_rate: float = 0., + drop_path_rate: float = 0.1, + act_cfg: Dict = dict(type='GELU'), + norm_cfg: Dict = dict(type='LN'), + patch_norm: bool = True, + frozen_stages: int = -1, + with_cp: bool = False, + out_indices: Sequence[int] = (3, ), + out_after_downsample: bool = False, + init_cfg: Optional[Union[Dict, List[Dict]]] = [ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ] + ) -> None: super().__init__(init_cfg=init_cfg) self.pretrained = pretrained @@ -967,40 +975,18 @@ def inflate_weights(self, logger: MMLogger) -> None: msg = self.load_state_dict(state_dict, strict=False) logger.info(msg) - def init_weights(self, pretrained: Optional[str] = None) -> None: - """Initialize the weights in backbone. - - Args: - pretrained (str, optional): Path to pre-trained weights. - Defaults to None. - """ - - def _init_weights(m): - if isinstance(m, nn.Linear): - trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - - if pretrained: - self.pretrained = pretrained - if isinstance(self.pretrained, str): + def init_weights(self) -> None: + """Initialize the weights in backbone.""" + if self.pretrained2d: logger = MMLogger.get_current_instance() logger.info(f'load model from: {self.pretrained}') - - if self.pretrained2d: - # Inflate 2D model into 3D model. - self.inflate_weights(logger) - else: - # Directly load 3D model. - load_checkpoint( - self, self.pretrained, strict=False, logger=logger) - elif self.pretrained is None: - self.apply(_init_weights) + # Inflate 2D model into 3D model. + self.inflate_weights(logger) else: - raise TypeError('pretrained must be a str or None') + if self.pretrained: + self.init_cfg = dict( + type='Pretrained', checkpoint=self.pretrained) + super().init_weights() def forward(self, x: torch.Tensor) -> \ Union[Tuple[torch.Tensor], torch.Tensor]: diff --git a/mmaction/models/backbones/vit_mae.py b/mmaction/models/backbones/vit_mae.py index c61ac24a70..21ff837724 100644 --- a/mmaction/models/backbones/vit_mae.py +++ b/mmaction/models/backbones/vit_mae.py @@ -1,14 +1,12 @@ # Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional +from typing import Dict, List, Optional, Union import torch import torch.nn.functional as F from mmcv.cnn import build_norm_layer from mmcv.cnn.bricks import DropPath from mmcv.cnn.bricks.transformer import FFN, PatchEmbed -from mmengine.logging import MMLogger -from mmengine.model import BaseModule -from mmengine.runner.checkpoint import _load_checkpoint, load_state_dict +from mmengine.model import BaseModule, ModuleList from mmengine.utils import to_2tuple from torch import Tensor, nn @@ -247,9 +245,12 @@ class VisionTransformer(BaseModule): tubelet_size (int): Temporal size of one patch. Defaults to 2. use_mean_pooling (bool): If True, take the mean pooling over all positions. Defaults to True. - init_cfg (dict or Configdict, optional): The Config for initialization. - Defaults to None. pretrained (str, optional): Name of pretrained model. Default: None. + init_cfg (dict or list[dict]): Initialization config dict. Defaults to + ``[ + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ]``. """ def __init__(self, @@ -271,11 +272,19 @@ def __init__(self, num_frames: int = 16, tubelet_size: int = 2, use_mean_pooling: int = True, - init_cfg: Optional[ConfigType] = None, pretrained: Optional[str] = None, + init_cfg: Optional[Union[Dict, List[Dict]]] = [ + dict( + type='TruncNormal', layer='Linear', std=0.02, + bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.) + ], **kwargs) -> None: + + if pretrained: + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) super().__init__(init_cfg=init_cfg) - self.pretrained = pretrained + patch_size = to_2tuple(patch_size) img_size = to_2tuple(img_size) @@ -306,7 +315,7 @@ def __init__(self, # stochastic depth decay rule dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] - self.blocks = nn.ModuleList([ + self.blocks = ModuleList([ Block( embed_dims=embed_dims, num_heads=num_heads, @@ -327,30 +336,6 @@ def __init__(self, self.norm = build_norm_layer(norm_cfg, embed_dims)[1] self.fc_norm = None - def init_weights(self) -> None: - """Initiate the parameters either from existing checkpoint or from - scratch.""" - - if isinstance(self.pretrained, str): - logger = MMLogger.get_current_instance() - logger.info(f'load model from: {self.pretrained}') - - state_dict = _load_checkpoint(self.pretrained) - if 'state_dict' in state_dict: - state_dict = state_dict['state_dict'] - load_state_dict(self, state_dict, strict=False, logger=logger) - elif self.pretrained is None: - for m in self.modules(): - if isinstance(m, nn.Linear): - nn.init.trunc_normal_(m.weight, std=.02) - if isinstance(m, nn.Linear) and m.bias is not None: - nn.init.constant_(m.bias, 0) - elif isinstance(m, nn.LayerNorm): - nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) - else: - raise TypeError('pretrained must be a str or None') - def forward(self, x: Tensor) -> Tensor: """Defines the computation performed at every call. diff --git a/mmaction/models/recognizers/base.py b/mmaction/models/recognizers/base.py index e96740bcf9..8066ceed0d 100644 --- a/mmaction/models/recognizers/base.py +++ b/mmaction/models/recognizers/base.py @@ -8,7 +8,7 @@ from mmaction.registry import MODELS from mmaction.utils import (ConfigType, ForwardResults, OptConfigType, - OptMultiConfig, OptSampleList, SampleList) + OptSampleList, SampleList) class BaseRecognizer(BaseModel, metaclass=ABCMeta): @@ -28,8 +28,6 @@ class BaseRecognizer(BaseModel, metaclass=ABCMeta): data_preprocessor (Union[ConfigDict, dict], optional): The pre-process config of :class:`ActionDataPreprocessor`. it usually includes, ``mean``, ``std`` and ``format_shape``. Defaults to None. - init_cfg (Union[ConfigDict, dict], optional): Config to control the - initialization. Defaults to None. """ def __init__(self, @@ -38,14 +36,13 @@ def __init__(self, neck: OptConfigType = None, train_cfg: OptConfigType = None, test_cfg: OptConfigType = None, - data_preprocessor: OptConfigType = None, - init_cfg: OptMultiConfig = None) -> None: + data_preprocessor: OptConfigType = None) -> None: if data_preprocessor is None: # This preprocessor will only stack batch data samples. data_preprocessor = dict(type='ActionDataPreprocessor') - super(BaseRecognizer, self).__init__( - data_preprocessor=data_preprocessor, init_cfg=init_cfg) + super(BaseRecognizer, + self).__init__(data_preprocessor=data_preprocessor) # Record the source of the backbone. self.backbone_from = 'mmaction2' @@ -110,19 +107,13 @@ def with_cls_head(self) -> bool: def init_weights(self) -> None: """Initialize the model network weights.""" - if self.backbone_from in ['mmcls', 'mmaction2']: - self.backbone.init_weights() - elif self.backbone_from in ['torchvision', 'timm']: + super().init_weights() + if self.backbone_from in ['torchvision', 'timm']: warnings.warn('We do not initialize weights for backbones in ' f'{self.backbone_from}, since the weights for ' - f'backbones in {self.backbone_from} are initialized' + f'backbones in {self.backbone_from} are initialized ' 'in their __init__ functions.') - if self.with_cls_head: - self.cls_head.init_weights() - if self.with_neck: - self.neck.init_weights() - def loss(self, inputs: torch.Tensor, data_samples: SampleList, **kwargs) -> dict: """Calculate losses from a batch of inputs and data samples. diff --git a/tests/models/recognizers/test_recognizer2d.py b/tests/models/recognizers/test_recognizer2d.py index e73c7a7dcf..300e63b460 100644 --- a/tests/models/recognizers/test_recognizer2d.py +++ b/tests/models/recognizers/test_recognizer2d.py @@ -256,6 +256,7 @@ def test_tanet(): def test_timm_backbone(): # test tsn from timm + register_all_modules() config = get_recognizer_cfg( 'tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py') config.model['backbone']['pretrained'] = None From f0f07de4cec73d433c1cb23808d6af389c684029 Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Thu, 1 Dec 2022 11:33:42 +0800 Subject: [PATCH 24/57] [Feature] support MViT (#2007) --- configs/_base_/models/mvit_small.py | 14 + configs/recognition/mvit/README.md | 77 ++ configs/recognition/mvit/metafile.yml | 115 +++ .../mvit-base-p244_32x3x1_kinetics400-rgb.py | 150 +++ .../mvit/mvit-base-p244_u32_sthv2-rgb.py | 140 +++ .../mvit-large-p244_40x3x1_kinetics400-rgb.py | 152 ++++ .../mvit/mvit-large-p244_u40_sthv2-rgb.py | 142 +++ .../mvit-small-p244_16x4x1_kinetics400-rgb.py | 145 +++ .../mvit/mvit-small-p244_u16_sthv2-rgb.py | 124 +++ mmaction/datasets/transforms/__init__.py | 12 +- mmaction/datasets/transforms/loading.py | 85 ++ mmaction/datasets/transforms/processing.py | 170 ++++ mmaction/models/backbones/__init__.py | 4 +- mmaction/models/backbones/mvit.py | 860 ++++++++++++++++++ mmaction/models/heads/__init__.py | 4 +- mmaction/models/heads/mvit_head.py | 75 ++ mmaction/models/recognizers/recognizer3d.py | 22 +- mmaction/models/utils/__init__.py | 7 +- mmaction/models/utils/blending_utils.py | 68 ++ mmaction/models/utils/embed.py | 234 +++++ tests/models/backbones/test_mvit.py | 134 +++ tests/models/heads/test_mvit_head.py | 32 + tests/models/utils/test_blending_utils.py | 42 +- 23 files changed, 2790 insertions(+), 18 deletions(-) create mode 100644 configs/_base_/models/mvit_small.py create mode 100644 configs/recognition/mvit/README.md create mode 100644 configs/recognition/mvit/metafile.yml create mode 100644 configs/recognition/mvit/mvit-base-p244_32x3x1_kinetics400-rgb.py create mode 100644 configs/recognition/mvit/mvit-base-p244_u32_sthv2-rgb.py create mode 100644 configs/recognition/mvit/mvit-large-p244_40x3x1_kinetics400-rgb.py create mode 100644 configs/recognition/mvit/mvit-large-p244_u40_sthv2-rgb.py create mode 100644 configs/recognition/mvit/mvit-small-p244_16x4x1_kinetics400-rgb.py create mode 100644 configs/recognition/mvit/mvit-small-p244_u16_sthv2-rgb.py create mode 100644 mmaction/models/backbones/mvit.py create mode 100644 mmaction/models/heads/mvit_head.py create mode 100644 mmaction/models/utils/embed.py create mode 100644 tests/models/backbones/test_mvit.py create mode 100644 tests/models/heads/test_mvit_head.py diff --git a/configs/_base_/models/mvit_small.py b/configs/_base_/models/mvit_small.py new file mode 100644 index 0000000000..d6a94daa23 --- /dev/null +++ b/configs/_base_/models/mvit_small.py @@ -0,0 +1,14 @@ +model = dict( + type='Recognizer3D', + backbone=dict(type='MViT', arch='small', drop_path_rate=0.2), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + format_shape='NCTHW'), + cls_head=dict( + type='MViTHead', + in_channels=768, + num_classes=400, + label_smooth_eps=0.1, + average_clips='prob')) diff --git a/configs/recognition/mvit/README.md b/configs/recognition/mvit/README.md new file mode 100644 index 0000000000..ccd9611c2d --- /dev/null +++ b/configs/recognition/mvit/README.md @@ -0,0 +1,77 @@ +# MViT V2 + +> [MViTv2: Improved Multiscale Vision Transformers for Classification and Detection](http://openaccess.thecvf.com//content/CVPR2022/papers/Li_MViTv2_Improved_Multiscale_Vision_Transformers_for_Classification_and_Detection_CVPR_2022_paper.pdf) + + + +## Abstract + + + +In this paper, we study Multiscale Vision Transformers (MViTv2) as a unified architecture for image and video +classification, as well as object detection. We present an improved version of MViT that incorporates +decomposed relative positional embeddings and residual pooling connections. We instantiate this architecture +in five sizes and evaluate it for ImageNet classification, COCO detection and Kinetics video recognition where +it outperforms prior work. We further compare MViTv2s' pooling attention to window attention mechanisms where +it outperforms the latter in accuracy/compute. Without bells-and-whistles, MViTv2 has state-of-the-art +performance in 3 domains: 88.8% accuracy on ImageNet classification, 58.7 boxAP on COCO object detection as +well as 86.1% on Kinetics-400 video classification. + + + +
+ +
+ +## Results and models + +### Kinetics-400 + +| frame sampling strategy | resolution | backbone | pretrain | top1 acc | top5 acc | reference top1 acc | reference top1 acc | testing protocol | FLOPs | params | config | ckpt | +| :---------------------: | :------------: | :--------: | :----------: | :------: | :------: | :-----------------------------: | :-----------------------------: | :--------------: | :---: | :----: | :-----------------: | :---------------: | +| 16x4x1 | short-side 320 | MViTv2-S\* | From scratch | 81.1 | 94.7 | [81.0](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [94.6](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 5 clips x 1 crop | 64G | 34.5M | [config](/configs/recognition/mvit/mvit-small-p244_16x4x1_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-small-p244_16x4x1_kinetics400-rgb_20221021-9ebaaeed.pth) | +| 32x3x1 | short-side 320 | MViTv2-B\* | From scratch | 82.6 | 95.8 | [82.9](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [95.7](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 5 clips x 1 crop | 225G | 51.2M | [config](/configs/recognition/mvit/mvit-base-p244_32x3x1_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-base-p244_32x3x1_kinetics400-rgb_20221021-f392cd2d.pth) | +| 40x3x1 | short-side 320 | MViTv2-L\* | From scratch | 85.4 | 96.2 | [86.1](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [97.0](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 5 clips x 3 crop | 2828G | 213M | [config](/configs/recognition/mvit/mvit-large-p244_40x3x1_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-large-p244_40x3x1_kinetics400-rgb_20221021-11fe1f97.pth) | + +### Something-Something V2 + +| frame sampling strategy | resolution | backbone | pretrain | top1 acc | top5 acc | reference top1 acc | reference top1 acc | testing protocol | FLOPs | params | config | ckpt | +| :---------------------: | :------------: | :--------: | :----------: | :------: | :------: | :----------------------------: | :-----------------------------: | :---------------: | :---: | :----: | :-----------------: | :---------------: | +| uniform 16 | short-side 320 | MViTv2-S\* | K400 | 68.1 | 91.0 | [68.2](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [91.4](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 1 clips x 3 crops | 64G | 34.4M | [config](/configs/recognition/mvit/mvit-small-p244_u16_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-small-p244_u16_sthv2-rgb_20221021-65ecae7d.pth) | +| uniform 32 | short-side 320 | MViTv2-B\* | K400 | 70.8 | 92.7 | [70.5](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [92.7](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 1 clips x 3 crops | 225G | 51.1M | [config](/configs/recognition/mvit/mvit-base-p244_u32_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-base-p244_u32_sthv2-rgb_20221021-d5de5da6.pth) | +| uniform 40 | short-side 320 | MViTv2-L\* | IN21K + K400 | 73.2 | 94.0 | [73.3](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [94.0](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 1 clips x 3 crops | 2828G | 213M | [config](/configs/recognition/mvit/mvit-large-p244_u40_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-large-p244_u40_sthv2-rgb_20221021-61696e07.pth) | + +*Models with * are ported from the repo [SlowFast](https://github.com/facebookresearch/SlowFast/) and tested on our data. Currently, we only support the testing of MViT models, training will be available soon.* + +1. The values in columns named after "reference" are copied from paper +2. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available. + +For more details on data preparation, you can refer to [Kinetics400](/tools/data/kinetics/README.md). + +## Test + +You can use the following command to test a model. + +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test MViT model on Kinetics-400 dataset and dump the result to a pkl file. + +```shell +python tools/test.py configs/recognition/mvit/mvit-small-p244_16x4x1_kinetics400-rgb.py \ + checkpoints/SOME_CHECKPOINT.pth --dump result.pkl +``` + +For more details, you can refer to the **Test** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). + +## Citation + +```bibtex +@inproceedings{li2021improved, + title={MViTv2: Improved multiscale vision transformers for classification and detection}, + author={Li, Yanghao and Wu, Chao-Yuan and Fan, Haoqi and Mangalam, Karttikeya and Xiong, Bo and Malik, Jitendra and Feichtenhofer, Christoph}, + booktitle={CVPR}, + year={2022} +} +``` diff --git a/configs/recognition/mvit/metafile.yml b/configs/recognition/mvit/metafile.yml new file mode 100644 index 0000000000..c5d7107482 --- /dev/null +++ b/configs/recognition/mvit/metafile.yml @@ -0,0 +1,115 @@ +Collections: +- Name: MViT + README: configs/recognition/MViT/README.md + Paper: + URL: http://openaccess.thecvf.com//content/CVPR2022/papers/Li_MViTv2_Improved_Multiscale_Vision_Transformers_for_Classification_and_Detection_CVPR_2022_paper.pdf + Title: "MViTv2: Improved Multiscale Vision Transformers for Classification and Detection" + +Models: + - Name: mvit-small-p244_16x4x1_kinetics400-rgb + Config: configs/recognition/mvit/mvit-small-p244_16x4x1_kinetics400-rgb.py + In Collection: MViT + Metadata: + Architecture: MViT-small + Resolution: short-side 320 + Modality: RGB + Converted From: + Weights: https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md + Code: https://github.com/facebookresearch/SlowFast/ + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 81.1 + Top 5 Accuracy: 94.7 + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-small-p244_16x4x1_kinetics400-rgb_20221021-9ebaaeed.pth + + - Name: mvit-base-p244_32x3x1_kinetics400-rgb + Config: configs/recognition/mvit/mvit-base-p244_32x3x1_kinetics400-rgb.py + In Collection: MViT + Metadata: + Architecture: MViT-base + Resolution: short-side 320 + Modality: RGB + Converted From: + Weights: https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md + Code: https://github.com/facebookresearch/SlowFast/ + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 81.1 + Top 5 Accuracy: 94.7 + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-base-p244_32x3x1_kinetics400-rgb_20221021-f392cd2d.pth + + - Name: mvit-large-p244_40x3x1_kinetics400-rgb + Config: configs/recognition/mvit/mvit-large-p244_40x3x1_kinetics400-rgb.py + In Collection: MViT + Metadata: + Architecture: MViT-large + Resolution: short-side 446 + Modality: RGB + Converted From: + Weights: https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md + Code: https://github.com/facebookresearch/SlowFast/ + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 81.1 + Top 5 Accuracy: 94.7 + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-large-p244_40x3x1_kinetics400-rgb_20221021-11fe1f97.pth + + - Name: mvit-small-p244_u16_sthv2-rgb + Config: configs/recognition/mvit/mvit-small-p244_u16_sthv2-rgb.py + In Collection: MViT + Metadata: + Architecture: MViT-small + Resolution: short-side 320 + Modality: RGB + Converted From: + Weights: https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md + Code: https://github.com/facebookresearch/SlowFast/ + Results: + - Dataset: SthV2 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 68.1 + Top 5 Accuracy: 91.0 + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-small-p244_u16_sthv2-rgb_20221021-65ecae7d.pth + + - Name: mvit-base-p244_u32_sthv2-rgb + Config: configs/recognition/mvit/mvit-base-p244_u32_sthv2-rgb.py + In Collection: MViT + Metadata: + Architecture: MViT-small + Resolution: short-side 320 + Modality: RGB + Converted From: + Weights: https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md + Code: https://github.com/facebookresearch/SlowFast/ + Results: + - Dataset: SthV2 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 70.8 + Top 5 Accuracy: 92.7 + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-base-p244_u32_sthv2-rgb_20221021-d5de5da6.pth + + - Name: mvit-large-p244_u40_sthv2-rgb + Config: configs/recognition/mvit/mvit-large-p244_u40_sthv2-rgb.py + In Collection: MViT + Metadata: + Architecture: MViT-small + Resolution: short-side 446 + Modality: RGB + Converted From: + Weights: https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md + Code: https://github.com/facebookresearch/SlowFast/ + Results: + - Dataset: SthV2 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 73.2 + Top 5 Accuracy: 94.0 + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-large-p244_u40_sthv2-rgb_20221021-61696e07.pth diff --git a/configs/recognition/mvit/mvit-base-p244_32x3x1_kinetics400-rgb.py b/configs/recognition/mvit/mvit-base-p244_32x3x1_kinetics400-rgb.py new file mode 100644 index 0000000000..b1e186f195 --- /dev/null +++ b/configs/recognition/mvit/mvit-base-p244_32x3x1_kinetics400-rgb.py @@ -0,0 +1,150 @@ +_base_ = [ + '../../_base_/models/mvit_small.py', '../../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + arch='base', + temporal_size=32, + drop_path_rate=0.3, + ), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[114.75, 114.75, 114.75], + std=[57.375, 57.375, 57.375], + blending=dict( + type='RandomBatchAugment', + augments=[ + dict(type='MixupBlending', alpha=0.8, num_classes=400), + dict(type='CutmixBlending', alpha=1, num_classes=400) + ]), + format_shape='NCTHW'), +) + +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') +train_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='SampleFrames', clip_len=32, frame_interval=3, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='PytorchVideoWrapper', + op='RandAugment', + magnitude=7, + num_layers=4), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='RandomErasing', erase_prob=0.25, mode='rand'), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=3, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=32, + frame_interval=3, + num_clips=5, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 224)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=30, val_begin=1, val_interval=3) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', lr=1.6e-3, betas=(0.9, 0.999), weight_decay=0.05)) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.1, + by_epoch=True, + begin=0, + end=30, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=200, + eta_min=0, + by_epoch=True, + begin=0, + end=200, + convert_to_iter_based=True) +] + +default_hooks = dict( + checkpoint=dict(interval=3, max_keep_ckpts=5), logger=dict(interval=100)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) diff --git a/configs/recognition/mvit/mvit-base-p244_u32_sthv2-rgb.py b/configs/recognition/mvit/mvit-base-p244_u32_sthv2-rgb.py new file mode 100644 index 0000000000..c954b60b54 --- /dev/null +++ b/configs/recognition/mvit/mvit-base-p244_u32_sthv2-rgb.py @@ -0,0 +1,140 @@ +_base_ = [ + '../../_base_/models/mvit_small.py', '../../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + arch='base', + temporal_size=32, + drop_path_rate=0.3, + ), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[114.75, 114.75, 114.75], + std=[57.375, 57.375, 57.375], + blending=dict( + type='RandomBatchAugment', + augments=[ + dict(type='MixupBlending', alpha=0.8, num_classes=174), + dict(type='CutmixBlending', alpha=1, num_classes=174) + ]), + format_shape='NCTHW'), + cls_head=dict(num_classes=174)) + +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/sthv2/videos' +data_root_val = 'data/sthv2/videos' +ann_file_train = 'data/sthv2/sthv2_train_list_videos.txt' +ann_file_val = 'data/sthv2/sthv2_val_list_videos.txt' +ann_file_test = 'data/sthv2/sthv2_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') +train_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='UniformSample', clip_len=32), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict( + type='PytorchVideoWrapper', + op='RandAugment', + magnitude=7, + num_layers=4), + dict(type='RandomErasing', erase_prob=0.25, mode='rand'), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='UniformSample', clip_len=32, test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='UniformSample', clip_len=32, test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 224)), + dict(type='ThreeCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=100, val_begin=1, val_interval=3) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +base_lr = 1.6e-3 +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', lr=base_lr, betas=(0.9, 0.999), weight_decay=0.05)) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.1, + by_epoch=True, + begin=0, + end=30, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=70, + eta_min=base_lr / 100, + by_epoch=True, + begin=30, + end=100, + convert_to_iter_based=True) +] + +default_hooks = dict( + checkpoint=dict(interval=3, max_keep_ckpts=5), logger=dict(interval=100)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) diff --git a/configs/recognition/mvit/mvit-large-p244_40x3x1_kinetics400-rgb.py b/configs/recognition/mvit/mvit-large-p244_40x3x1_kinetics400-rgb.py new file mode 100644 index 0000000000..8c93519914 --- /dev/null +++ b/configs/recognition/mvit/mvit-large-p244_40x3x1_kinetics400-rgb.py @@ -0,0 +1,152 @@ +_base_ = [ + '../../_base_/models/mvit_small.py', '../../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + arch='large', + temporal_size=40, + spatial_size=312, + drop_path_rate=0.75, + ), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[114.75, 114.75, 114.75], + std=[57.375, 57.375, 57.375], + blending=dict( + type='RandomBatchAugment', + augments=[ + dict(type='MixupBlending', alpha=0.8, num_classes=400), + dict(type='CutmixBlending', alpha=1, num_classes=400) + ]), + format_shape='NCTHW'), + cls_head=dict(in_channels=1152), + test_cfg=dict(max_testing_views=5)) + +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') +train_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='SampleFrames', clip_len=40, frame_interval=3, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 356)), + dict( + type='PytorchVideoWrapper', + op='RandAugment', + magnitude=7, + num_layers=4), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(312, 312), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='RandomErasing', erase_prob=0.25, mode='rand'), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=40, + frame_interval=3, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 356)), + dict(type='CenterCrop', crop_size=312), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=40, + frame_interval=3, + num_clips=5, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 312)), + dict(type='ThreeCrop', crop_size=312), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=30, val_begin=1, val_interval=3) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', lr=1.6e-3, betas=(0.9, 0.999), weight_decay=0.05)) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.1, + by_epoch=True, + begin=0, + end=30, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=200, + eta_min=0, + by_epoch=True, + begin=0, + end=200, + convert_to_iter_based=True) +] + +default_hooks = dict( + checkpoint=dict(interval=3, max_keep_ckpts=5), logger=dict(interval=100)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=True, base_batch_size=512) diff --git a/configs/recognition/mvit/mvit-large-p244_u40_sthv2-rgb.py b/configs/recognition/mvit/mvit-large-p244_u40_sthv2-rgb.py new file mode 100644 index 0000000000..b3fde41a78 --- /dev/null +++ b/configs/recognition/mvit/mvit-large-p244_u40_sthv2-rgb.py @@ -0,0 +1,142 @@ +_base_ = [ + '../../_base_/models/mvit_small.py', '../../_base_/default_runtime.py' +] + +model = dict( + backbone=dict( + arch='large', + temporal_size=40, + spatial_size=312, + drop_path_rate=0.75, + ), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[114.75, 114.75, 114.75], + std=[57.375, 57.375, 57.375], + blending=dict( + type='RandomBatchAugment', + augments=[ + dict(type='MixupBlending', alpha=0.8, num_classes=400), + dict(type='CutmixBlending', alpha=1, num_classes=400) + ]), + format_shape='NCTHW'), + cls_head=dict(in_channels=1152, num_classes=174), + test_cfg=dict(max_testing_views=5)) + +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/sthv2/videos' +data_root_val = 'data/sthv2/videos' +ann_file_train = 'data/sthv2/sthv2_train_list_videos.txt' +ann_file_val = 'data/sthv2/sthv2_val_list_videos.txt' +ann_file_test = 'data/sthv2/sthv2_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') +train_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='UniformSample', clip_len=40), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict( + type='PytorchVideoWrapper', + op='RandAugment', + magnitude=7, + num_layers=4), + dict(type='RandomErasing', erase_prob=0.25, mode='rand'), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='UniformSample', clip_len=40, test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='UniformSample', clip_len=40, test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 224)), + dict(type='ThreeCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=100, val_begin=1, val_interval=3) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +base_lr = 1.6e-3 +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', lr=base_lr, betas=(0.9, 0.999), weight_decay=0.05)) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.1, + by_epoch=True, + begin=0, + end=30, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=70, + eta_min=base_lr / 100, + by_epoch=True, + begin=30, + end=100, + convert_to_iter_based=True) +] + +default_hooks = dict( + checkpoint=dict(interval=3, max_keep_ckpts=5), logger=dict(interval=10)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) diff --git a/configs/recognition/mvit/mvit-small-p244_16x4x1_kinetics400-rgb.py b/configs/recognition/mvit/mvit-small-p244_16x4x1_kinetics400-rgb.py new file mode 100644 index 0000000000..4da89b5a4a --- /dev/null +++ b/configs/recognition/mvit/mvit-small-p244_16x4x1_kinetics400-rgb.py @@ -0,0 +1,145 @@ +_base_ = [ + '../../_base_/models/mvit_small.py', '../../_base_/default_runtime.py' +] + +model = dict( + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[114.75, 114.75, 114.75], + std=[57.375, 57.375, 57.375], + blending=dict( + type='RandomBatchAugment', + augments=[ + dict(type='MixupBlending', alpha=0.8, num_classes=400), + dict(type='CutmixBlending', alpha=1, num_classes=400) + ]), + format_shape='NCTHW'), ) + +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/kinetics400/videos_train' +data_root_val = 'data/kinetics400/videos_val' +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') +train_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='SampleFrames', clip_len=16, frame_interval=4, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='PytorchVideoWrapper', + op='RandAugment', + magnitude=7, + num_layers=4), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='RandomErasing', erase_prob=0.25, mode='rand'), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=16, + frame_interval=4, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict( + type='SampleFrames', + clip_len=16, + frame_interval=4, + num_clips=5, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 224)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=200, val_begin=1, val_interval=3) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +base_lr = 1.6e-3 +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', lr=base_lr, betas=(0.9, 0.999), weight_decay=0.05)) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.1, + by_epoch=True, + begin=0, + end=30, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=200, + eta_min=base_lr / 100, + by_epoch=True, + begin=30, + end=200, + convert_to_iter_based=True) +] + +default_hooks = dict( + checkpoint=dict(interval=3, max_keep_ckpts=5), logger=dict(interval=100)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=True, base_batch_size=512) diff --git a/configs/recognition/mvit/mvit-small-p244_u16_sthv2-rgb.py b/configs/recognition/mvit/mvit-small-p244_u16_sthv2-rgb.py new file mode 100644 index 0000000000..08934b9a5e --- /dev/null +++ b/configs/recognition/mvit/mvit-small-p244_u16_sthv2-rgb.py @@ -0,0 +1,124 @@ +_base_ = [ + '../../_base_/models/mvit_small.py', '../../_base_/default_runtime.py' +] + +model = dict(cls_head=dict(num_classes=174)) + +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/sthv2/videos' +data_root_val = 'data/sthv2/videos' +ann_file_train = 'data/sthv2/sthv2_train_list_videos.txt' +ann_file_val = 'data/sthv2/sthv2_val_list_videos.txt' +ann_file_test = 'data/sthv2/sthv2_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') +train_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='UniformSample', clip_len=16), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict( + type='PytorchVideoWrapper', + op='RandAugment', + magnitude=7, + num_layers=4), + dict(type='RandomErasing', erase_prob=0.25, mode='rand'), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='UniformSample', clip_len=16, test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='DecordInit', **file_client_args), + dict(type='UniformSample', clip_len=16, test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 224)), + dict(type='ThreeCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root_val), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_test, + data_prefix=dict(video=data_root_val), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=100, val_begin=1, val_interval=3) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +base_lr = 1.6e-3 +optim_wrapper = dict( + type='AmpOptimWrapper', + optimizer=dict( + type='AdamW', lr=base_lr, betas=(0.9, 0.999), weight_decay=0.05), + paramwise_cfg=dict(norm_decay_mult=0.0, bias_decay_mult=0.0)) + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.1, + by_epoch=True, + begin=0, + end=30, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=100, + eta_min=base_lr / 100, + by_epoch=True, + begin=30, + end=100, + convert_to_iter_based=True) +] + +default_hooks = dict( + checkpoint=dict(interval=3, max_keep_ckpts=5), logger=dict(interval=100)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) diff --git a/mmaction/datasets/transforms/__init__.py b/mmaction/datasets/transforms/__init__.py index f21e6d01b9..09e0111e4c 100644 --- a/mmaction/datasets/transforms/__init__.py +++ b/mmaction/datasets/transforms/__init__.py @@ -10,7 +10,7 @@ LoadProposals, OpenCVDecode, OpenCVInit, PIMSDecode, PIMSInit, PyAVDecode, PyAVDecodeMotionVector, PyAVInit, RawFrameDecode, SampleAVAFrames, SampleFrames, - UntrimmedSampleFrames) + UniformSample, UntrimmedSampleFrames) from .pose_loading import (GeneratePoseTarget, LoadKineticsPose, PaddingWithLoop, PoseDecode, UniformSampleFrames) from .processing import (AudioAmplify, CenterCrop, ColorJitter, Flip, Fuse, @@ -30,9 +30,9 @@ 'AudioAmplify', 'MelSpectrogram', 'AudioDecode', 'FormatAudioShape', 'LoadAudioFeature', 'AudioFeatureSelector', 'AudioDecodeInit', 'ImageDecode', 'BuildPseudoClip', 'RandomRescale', 'PIMSDecode', - 'PyAVDecodeMotionVector', 'UniformSampleFrames', 'PoseDecode', - 'LoadKineticsPose', 'GeneratePoseTarget', 'PIMSInit', 'FormatGCNInput', - 'PaddingWithLoop', 'ArrayDecode', 'JointToBone', 'PackActionInputs', - 'PackLocalizationInputs', 'ImgAug', 'TorchVisionWrapper', - 'PytorchVideoWrapper', 'PoseCompact' + 'PyAVDecodeMotionVector', 'UniformSample', 'UniformSampleFrames', + 'PoseDecode', 'LoadKineticsPose', 'GeneratePoseTarget', 'PIMSInit', + 'FormatGCNInput', 'PaddingWithLoop', 'ArrayDecode', 'JointToBone', + 'PackActionInputs', 'PackLocalizationInputs', 'ImgAug', + 'TorchVisionWrapper', 'PytorchVideoWrapper', 'PoseCompact' ] diff --git a/mmaction/datasets/transforms/loading.py b/mmaction/datasets/transforms/loading.py index ceb761d638..e0b5ce75a6 100644 --- a/mmaction/datasets/transforms/loading.py +++ b/mmaction/datasets/transforms/loading.py @@ -265,6 +265,91 @@ def __repr__(self): return repr_str +@TRANSFORMS.register_module() +class UniformSample(BaseTransform): + """Uniformly sample frames from the video. Currently used for Something- + Something V2 dataset. Modified from + https://github.com/facebookresearch/SlowFast/blob/64a + bcc90ccfdcbb11cf91d6e525bed60e92a8796/slowfast/datasets/ssv2.py#L159. + + To sample an n-frame clip from the video. UniformSampleFrames basically + divides the video into n segments of equal length and randomly samples one + frame from each segment. + + Required keys: + + - total_frames + - start_index + + Added keys: + + - frame_inds + - clip_len + - frame_interval + - num_clips + + Args: + clip_len (int): Frames of each sampled output clip. + num_clips (int): Number of clips to be sampled. Default: 1. + test_mode (bool): Store True when building test or validation dataset. + Default: False. + """ + + def __init__(self, + clip_len: int, + num_clips: int = 1, + test_mode: bool = False) -> None: + + self.clip_len = clip_len + self.num_clips = num_clips + self.test_mode = test_mode + + def _get_sample_clips(self, num_frames: int) -> np.array: + """When video frames is shorter than target clip len, this strategy + would repeat sample frame, rather than loop sample in 'loop' mode. In + test mode, this strategy would sample the middle frame of each segment, + rather than set a random seed, and therefore only support sample 1 + clip. + + Args: + num_frames (int): Total number of frame in the video. + Returns: + seq (list): the indexes of frames of sampled from the video. + """ + assert self.num_clips == 1 + seg_size = float(num_frames - 1) / self.clip_len + inds = [] + for i in range(self.clip_len): + start = int(np.round(seg_size * i)) + end = int(np.round(seg_size * (i + 1))) + if not self.test_mode: + inds.append(np.random.randint(start, end + 1)) + else: + inds.append((start + end) // 2) + + return np.array(inds) + + def transform(self, results: dict): + num_frames = results['total_frames'] + + inds = self._get_sample_clips(num_frames) + start_index = results['start_index'] + inds = inds + start_index + + results['frame_inds'] = inds.astype(np.int32) + results['clip_len'] = self.clip_len + results['frame_interval'] = None + results['num_clips'] = self.num_clips + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'clip_len={self.clip_len}, ' + f'num_clips={self.num_clips}, ' + f'test_mode={self.test_mode}') + return repr_str + + @TRANSFORMS.register_module() class UntrimmedSampleFrames(BaseTransform): """Sample frames from the untrimmed video. diff --git a/mmaction/datasets/transforms/processing.py b/mmaction/datasets/transforms/processing.py index 6ea381030f..d34bc93327 100644 --- a/mmaction/datasets/transforms/processing.py +++ b/mmaction/datasets/transforms/processing.py @@ -1,12 +1,15 @@ # Copyright (c) OpenMMLab. All rights reserved. import random import warnings +from numbers import Number +from typing import Sequence import cv2 import mmcv import mmengine import numpy as np from mmcv.transforms import BaseTransform +from mmcv.transforms.utils import cache_randomness from torch.nn.modules.utils import _pair from mmaction.registry import TRANSFORMS @@ -1491,3 +1494,170 @@ def __repr__(self): f'n_mels={self.n_mels}, ' f'fixed_length={self.fixed_length})') return repr_str + + +@TRANSFORMS.register_module() +class RandomErasing(BaseTransform): + """Randomly selects a rectangle region in an image and erase pixels. + basically refer mmcls. + + **Required Keys:** + + - img + + **Modified Keys:** + + - img + + Args: + erase_prob (float): Probability that image will be randomly erased. + Default: 0.5 + min_area_ratio (float): Minimum erased area / input image area + Default: 0.02 + max_area_ratio (float): Maximum erased area / input image area + Default: 1/3 + aspect_range (sequence | float): Aspect ratio range of erased area. + if float, it will be converted to (aspect_ratio, 1/aspect_ratio) + Default: (3/10, 10/3) + mode (str): Fill method in erased area, can be: + + - const (default): All pixels are assign with the same value. + - rand: each pixel is assigned with a random value in [0, 255] + + fill_color (sequence | Number): Base color filled in erased area. + Defaults to (128, 128, 128). + fill_std (sequence | Number, optional): If set and ``mode`` is 'rand', + fill erased area with random color from normal distribution + (mean=fill_color, std=fill_std); If not set, fill erased area with + random color from uniform distribution (0~255). Defaults to None. + + Note: + See `Random Erasing Data Augmentation + `_ + + This paper provided 4 modes: RE-R, RE-M, RE-0, RE-255, and use RE-M as + default. The config of these 4 modes are: + + - RE-R: RandomErasing(mode='rand') + - RE-M: RandomErasing(mode='const', fill_color=(123.67, 116.3, 103.5)) + - RE-0: RandomErasing(mode='const', fill_color=0) + - RE-255: RandomErasing(mode='const', fill_color=255) + """ + + def __init__(self, + erase_prob=0.5, + min_area_ratio=0.02, + max_area_ratio=1 / 3, + aspect_range=(3 / 10, 10 / 3), + mode='const', + fill_color=(128, 128, 128), + fill_std=None): + assert isinstance(erase_prob, float) and 0. <= erase_prob <= 1. + assert isinstance(min_area_ratio, float) and 0. <= min_area_ratio <= 1. + assert isinstance(max_area_ratio, float) and 0. <= max_area_ratio <= 1. + assert min_area_ratio <= max_area_ratio, \ + 'min_area_ratio should be smaller than max_area_ratio' + if isinstance(aspect_range, float): + aspect_range = min(aspect_range, 1 / aspect_range) + aspect_range = (aspect_range, 1 / aspect_range) + assert isinstance(aspect_range, Sequence) and len(aspect_range) == 2 \ + and all(isinstance(x, float) for x in aspect_range), \ + 'aspect_range should be a float or Sequence with two float.' + assert all(x > 0 for x in aspect_range), \ + 'aspect_range should be positive.' + assert aspect_range[0] <= aspect_range[1], \ + 'In aspect_range (min, max), min should be smaller than max.' + assert mode in ['const', 'rand'], \ + 'Please select `mode` from ["const", "rand"].' + if isinstance(fill_color, Number): + fill_color = [fill_color] * 3 + assert isinstance(fill_color, Sequence) and len(fill_color) == 3 \ + and all(isinstance(x, Number) for x in fill_color), \ + 'fill_color should be a float or Sequence with three int.' + if fill_std is not None: + if isinstance(fill_std, Number): + fill_std = [fill_std] * 3 + assert isinstance(fill_std, Sequence) and len(fill_std) == 3 \ + and all(isinstance(x, Number) for x in fill_std), \ + 'fill_std should be a float or Sequence with three int.' + + self.erase_prob = erase_prob + self.min_area_ratio = min_area_ratio + self.max_area_ratio = max_area_ratio + self.aspect_range = aspect_range + self.mode = mode + self.fill_color = fill_color + self.fill_std = fill_std + + def _img_fill_pixels(self, img, top, left, h, w): + """Fill pixels to the patch of image.""" + if self.mode == 'const': + patch = np.empty((h, w, 3), dtype=np.uint8) + patch[:, :] = np.array(self.fill_color, dtype=np.uint8) + elif self.fill_std is None: + # Uniform distribution + patch = np.random.uniform(0, 256, (h, w, 3)).astype(np.uint8) + else: + # Normal distribution + patch = np.random.normal(self.fill_color, self.fill_std, (h, w, 3)) + patch = np.clip(patch.astype(np.int32), 0, 255).astype(np.uint8) + + img[top:top + h, left:left + w] = patch + return img + + def _fill_pixels(self, imgs, top, left, h, w): + """Fill pixels to the patch of each image in frame clip.""" + return [self._img_fill_pixels(img, top, left, h, w) for img in imgs] + + @cache_randomness + def random_disable(self): + """Randomly disable the transform.""" + return np.random.rand() > self.erase_prob + + @cache_randomness + def random_patch(self, img_h, img_w): + """Randomly generate patch the erase.""" + # convert the aspect ratio to log space to equally handle width and + # height. + log_aspect_range = np.log( + np.array(self.aspect_range, dtype=np.float32)) + aspect_ratio = np.exp(np.random.uniform(*log_aspect_range)) + area = img_h * img_w + area *= np.random.uniform(self.min_area_ratio, self.max_area_ratio) + + h = min(int(round(np.sqrt(area * aspect_ratio))), img_h) + w = min(int(round(np.sqrt(area / aspect_ratio))), img_w) + top = np.random.randint(0, img_h - h) if img_h > h else 0 + left = np.random.randint(0, img_w - w) if img_w > w else 0 + return top, left, h, w + + def transform(self, results): + """ + Args: + results (dict): Results dict from pipeline + + Returns: + dict: Results after the transformation. + """ + if self.random_disable(): + return results + + imgs = results['imgs'] + img_h, img_w = imgs[0].shape[:2] + + imgs = self._fill_pixels(imgs, *self.random_patch(img_h, img_w)) + + results['imgs'] = imgs + + return results + + def __repr__(self): + repr_str = self.__class__.__name__ + repr_str += f'(erase_prob={self.erase_prob}, ' + repr_str += f'min_area_ratio={self.min_area_ratio}, ' + repr_str += f'max_area_ratio={self.max_area_ratio}, ' + repr_str += f'aspect_range={self.aspect_range}, ' + repr_str += f'mode={self.mode}, ' + repr_str += f'fill_color={self.fill_color}, ' + repr_str += f'fill_std={self.fill_std})' + return repr_str diff --git a/mmaction/models/backbones/__init__.py b/mmaction/models/backbones/__init__.py index 30301b2b28..6a2c7b526a 100644 --- a/mmaction/models/backbones/__init__.py +++ b/mmaction/models/backbones/__init__.py @@ -4,6 +4,7 @@ from .c3d import C3D from .mobilenet_v2 import MobileNetV2 from .mobilenet_v2_tsm import MobileNetV2TSM +from .mvit import MViT from .resnet import ResNet from .resnet2plus1d import ResNet2Plus1d from .resnet3d import ResNet3d, ResNet3dLayer @@ -24,5 +25,6 @@ 'C2D', 'C3D', 'ResNet', 'ResNet3d', 'ResNetTSM', 'ResNet2Plus1d', 'ResNet3dSlowFast', 'ResNet3dSlowOnly', 'ResNet3dCSN', 'ResNetTIN', 'X3D', 'ResNet3dLayer', 'MobileNetV2TSM', 'MobileNetV2', 'TANet', 'TimeSformer', - 'STGCN', 'AGCN', 'ResNetAudio', 'SwinTransformer3D', 'VisionTransformer' + 'STGCN', 'AGCN', 'ResNetAudio', 'SwinTransformer3D', 'VisionTransformer', + 'MViT' ] diff --git a/mmaction/models/backbones/mvit.py b/mmaction/models/backbones/mvit.py new file mode 100644 index 0000000000..95f917f136 --- /dev/null +++ b/mmaction/models/backbones/mvit.py @@ -0,0 +1,860 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Sequence, Tuple, Union + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_activation_layer, build_norm_layer +from mmcv.cnn.bricks import DropPath +from mmengine.model import BaseModule, ModuleList +from mmengine.model.weight_init import trunc_normal_ +from mmengine.utils import to_3tuple + +from mmaction.registry import MODELS +from ..utils.embed import PatchEmbed3D + + +def resize_pos_embed(pos_embed: torch.Tensor, + src_shape: Tuple[int], + dst_shape: Tuple[int], + mode: str = 'trilinear', + num_extra_tokens: int = 1) -> torch.Tensor: + """Resize pos_embed weights. + + Args: + pos_embed (torch.Tensor): Position embedding weights with shape + [1, L, C]. + src_shape (tuple): The resolution of downsampled origin training + image, in format (T, H, W). + dst_shape (tuple): The resolution of downsampled new training + image, in format (T, H, W). + mode (str): Algorithm used for upsampling. Choose one from 'nearest', + 'linear', 'bilinear', 'bicubic' and 'trilinear'. + Defaults to 'trilinear'. + num_extra_tokens (int): The number of extra tokens, such as cls_token. + Defaults to 1. + + Returns: + torch.Tensor: The resized pos_embed of shape [1, L_new, C] + """ + if src_shape[0] == dst_shape[0] and src_shape[1] == dst_shape[1] \ + and src_shape[2] == dst_shape[2]: + return pos_embed + assert pos_embed.ndim == 3, 'shape of pos_embed must be [1, L, C]' + _, L, C = pos_embed.shape + src_t, src_h, src_w = src_shape + assert L == src_t * src_h * src_w + num_extra_tokens, \ + f"The length of `pos_embed` ({L}) doesn't match the expected " \ + f'shape ({src_t}*{src_h}*{src_w}+{num_extra_tokens}).' \ + 'Please check the `img_size` argument.' + extra_tokens = pos_embed[:, :num_extra_tokens] + + src_weight = pos_embed[:, num_extra_tokens:] + src_weight = src_weight.reshape(1, src_t, src_h, src_w, + C).permute(0, 4, 1, 2, 3) + + dst_weight = F.interpolate( + src_weight, size=dst_shape, align_corners=False, mode=mode) + dst_weight = torch.flatten(dst_weight, 2).transpose(1, 2) + + return torch.cat((extra_tokens, dst_weight), dim=1) + + +def resize_decomposed_rel_pos(rel_pos: torch.Tensor, q_size: int, + k_size: int) -> torch.Tensor: + """Get relative positional embeddings according to the relative positions + of query and key sizes. + + Args: + rel_pos (Tensor): relative position embeddings (L, C). + q_size (int): size of query q. + k_size (int): size of key k. + + Returns: + Extracted positional embeddings according to relative positions. + """ + max_rel_dist = int(2 * max(q_size, k_size) - 1) + # Interpolate rel pos if needed. + if rel_pos.shape[0] != max_rel_dist: + # Interpolate rel pos. + resized = F.interpolate( + # (L, C) -> (1, C, L) + rel_pos.transpose(0, 1).unsqueeze(0), + size=max_rel_dist, + mode='linear', + ) + # (1, C, L) -> (L, C) + resized = resized.squeeze(0).transpose(0, 1) + else: + resized = rel_pos + + # Scale the coords with short length if shapes for q and k are different. + q_h_ratio = max(k_size / q_size, 1.0) + k_h_ratio = max(q_size / k_size, 1.0) + q_coords = torch.arange(q_size)[:, None] * q_h_ratio + k_coords = torch.arange(k_size)[None, :] * k_h_ratio + relative_coords = (q_coords - k_coords) + (k_size - 1) * k_h_ratio + + return resized[relative_coords.long()] + + +def add_decomposed_rel_pos(attn: torch.Tensor, + q: torch.Tensor, + q_shape: Sequence[int], + k_shape: Sequence[int], + rel_pos_h: torch.Tensor, + rel_pos_w: torch.Tensor, + rel_pos_t: torch.Tensor, + with_cls_token: bool = False) -> torch.Tensor: + """Spatiotemporal Relative Positional Embeddings.""" + sp_idx = 1 if with_cls_token else 0 + B, num_heads, _, C = q.shape + q_t, q_h, q_w = q_shape + k_t, k_h, k_w = k_shape + + Rt = resize_decomposed_rel_pos(rel_pos_t, q_t, k_t) + Rh = resize_decomposed_rel_pos(rel_pos_h, q_h, k_h) + Rw = resize_decomposed_rel_pos(rel_pos_w, q_w, k_w) + + r_q = q[:, :, sp_idx:].reshape(B, num_heads, q_t, q_h, q_w, C) + rel_t = torch.einsum('bythwc,tkc->bythwk', r_q, Rt) + rel_h = torch.einsum('bythwc,hkc->bythwk', r_q, Rh) + rel_w = torch.einsum('bythwc,wkc->bythwk', r_q, Rw) + rel_pos_embed = ( + rel_t[:, :, :, :, :, :, None, None] + + rel_h[:, :, :, :, :, None, :, None] + + rel_w[:, :, :, :, :, None, None, :]) + + attn_map = attn[:, :, sp_idx:, sp_idx:].view(B, -1, q_t, q_h, q_w, k_t, + k_h, k_w) + attn_map += rel_pos_embed + attn[:, :, sp_idx:, sp_idx:] = attn_map.view(B, -1, q_t * q_h * q_w, + k_t * k_h * k_w) + + return attn + + +class MLP(BaseModule): + """Two-layer multilayer perceptron. + + Comparing with :class:`mmcv.cnn.bricks.transformer.FFN`, this class allows + different input and output channel numbers. + + Args: + in_channels (int): The number of input channels. + hidden_channels (int, optional): The number of hidden layer channels. + If None, same as the ``in_channels``. Defaults to None. + out_channels (int, optional): The number of output channels. If None, + same as the ``in_channels``. Defaults to None. + act_cfg (dict): The config of activation function. + Defaults to ``dict(type='GELU')``. + init_cfg (dict, optional): The config of weight initialization. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + hidden_channels: Optional[int] = None, + out_channels: Optional[int] = None, + act_cfg: Dict = dict(type='GELU'), + init_cfg: Optional[Union[Dict, List[Dict]]] = None) -> None: + super().__init__(init_cfg=init_cfg) + out_channels = out_channels or in_channels + hidden_channels = hidden_channels or in_channels + self.fc1 = nn.Linear(in_channels, hidden_channels) + self.act = build_activation_layer(act_cfg) + self.fc2 = nn.Linear(hidden_channels, out_channels) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.fc1(x) + x = self.act(x) + x = self.fc2(x) + return x + + +def attention_pool(x: torch.Tensor, + pool: nn.Module, + in_size: Tuple[int], + with_cls_token: bool = False, + norm: Optional[nn.Module] = None) -> tuple: + """Pooling the feature tokens. + + Args: + x (torch.Tensor): The input tensor, should be with shape + ``(B, num_heads, L, C)`` or ``(B, L, C)``. + pool (nn.Module): The pooling module. + in_size (Tuple[int]): The shape of the input feature map. + with_cls_token (bool): Whether concatenating class token into video + tokens as transformer input. Defaults to True. + norm (nn.Module, optional): The normalization module. + Defaults to None. + """ + ndim = x.ndim + if ndim == 4: + B, num_heads, L, C = x.shape + elif ndim == 3: + num_heads = 1 + B, L, C = x.shape + x = x.unsqueeze(1) + else: + raise RuntimeError(f'Unsupported input dimension {x.shape}') + + T, H, W = in_size + assert L == T * H * W + with_cls_token + + if with_cls_token: + cls_tok, x = x[:, :, :1, :], x[:, :, 1:, :] + + # (B, num_heads, T*H*W, C) -> (B*num_heads, C, T, H, W) + x = x.reshape(B * num_heads, T, H, W, C).permute(0, 4, 1, 2, + 3).contiguous() + x = pool(x) + out_size = x.shape[2:] + + # (B*num_heads, C, T', H', W') -> (B, num_heads, T'*H'*W', C) + x = x.reshape(B, num_heads, C, -1).transpose(2, 3) + + if with_cls_token: + x = torch.cat((cls_tok, x), dim=2) + + if norm is not None: + x = norm(x) + + if ndim == 3: + x = x.squeeze(1) + + return x, out_size + + +class MultiScaleAttention(BaseModule): + """Multiscale Multi-head Attention block. + + Args: + in_dims (int): Number of input channels. + out_dims (int): Number of output channels. + num_heads (int): Number of attention heads. + qkv_bias (bool): If True, add a learnable bias to query, key and + value. Defaults to True. + norm_cfg (dict): The config of normalization layers. + Defaults to ``dict(type='LN')``. + pool_kernel (tuple): kernel size for qkv pooling layers. + Defaults to (3, 3, 3). + stride_q (int): stride size for q pooling layer. + Defaults to (1, 1, 1). + stride_kv (int): stride size for kv pooling layer. + Defaults to (1, 1, 1). + rel_pos_embed (bool): Whether to enable the spatial and temporal + relative position embedding. Defaults to True. + residual_pooling (bool): Whether to enable the residual connection + after attention pooling. Defaults to True. + input_size (Tuple[int], optional): The input resolution, necessary + if enable the ``rel_pos_embed``. Defaults to None. + rel_pos_zero_init (bool): If True, zero initialize relative + positional parameters. Defaults to False. + with_cls_token (bool): Whether concatenating class token into video + tokens as transformer input. Defaults to True. + init_cfg (dict, optional): The config of weight initialization. + Defaults to None. + """ + + def __init__(self, + in_dims: int, + out_dims: int, + num_heads: int, + qkv_bias: bool = True, + norm_cfg: Dict = dict(type='LN'), + pool_kernel: Tuple[int] = (3, 3, 3), + stride_q: Tuple[int] = (1, 1, 1), + stride_kv: Tuple[int] = (1, 1, 1), + rel_pos_embed: bool = True, + residual_pooling: bool = True, + input_size: Optional[Tuple[int]] = None, + rel_pos_zero_init: bool = False, + with_cls_token: bool = True, + init_cfg: Optional[dict] = None) -> None: + super().__init__(init_cfg=init_cfg) + self.num_heads = num_heads + self.with_cls_token = with_cls_token + self.in_dims = in_dims + self.out_dims = out_dims + + head_dim = out_dims // num_heads + self.scale = head_dim**-0.5 + + self.qkv = nn.Linear(in_dims, out_dims * 3, bias=qkv_bias) + self.proj = nn.Linear(out_dims, out_dims) + + # qkv pooling + pool_padding = [k // 2 for k in pool_kernel] + pool_dims = out_dims // num_heads + + def build_pooling(stride): + pool = nn.Conv3d( + pool_dims, + pool_dims, + pool_kernel, + stride=stride, + padding=pool_padding, + groups=pool_dims, + bias=False, + ) + norm = build_norm_layer(norm_cfg, pool_dims)[1] + return pool, norm + + self.pool_q, self.norm_q = build_pooling(stride_q) + self.pool_k, self.norm_k = build_pooling(stride_kv) + self.pool_v, self.norm_v = build_pooling(stride_kv) + + self.residual_pooling = residual_pooling + + self.rel_pos_embed = rel_pos_embed + self.rel_pos_zero_init = rel_pos_zero_init + if self.rel_pos_embed: + # initialize relative positional embeddings + assert input_size[1] == input_size[2] + + size = input_size[1] + rel_dim = 2 * max(size // stride_q[1], size // stride_kv[1]) - 1 + self.rel_pos_h = nn.Parameter(torch.zeros(rel_dim, head_dim)) + self.rel_pos_w = nn.Parameter(torch.zeros(rel_dim, head_dim)) + self.rel_pos_t = nn.Parameter( + torch.zeros(2 * input_size[0] - 1, head_dim)) + + def init_weights(self) -> None: + """Weight initialization.""" + super().init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress rel_pos_zero_init if use pretrained model. + return + + if not self.rel_pos_zero_init: + trunc_normal_(self.rel_pos_h, std=0.02) + trunc_normal_(self.rel_pos_w, std=0.02) + trunc_normal_(self.rel_pos_t, std=0.02) + + def forward(self, x: torch.Tensor, in_size: Tuple[int]) -> tuple: + """Forward the MultiScaleAttention.""" + B, N, _ = x.shape # (B, H*W, C) + + # qkv: (B, H*W, 3, num_heads, C) + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, -1) + # q, k, v: (B, num_heads, H*W, C) + q, k, v = qkv.permute(2, 0, 3, 1, 4).unbind(0) + + q, q_shape = attention_pool( + q, + self.pool_q, + in_size, + norm=self.norm_q, + with_cls_token=self.with_cls_token) + k, k_shape = attention_pool( + k, + self.pool_k, + in_size, + norm=self.norm_k, + with_cls_token=self.with_cls_token) + v, v_shape = attention_pool( + v, + self.pool_v, + in_size, + norm=self.norm_v, + with_cls_token=self.with_cls_token) + + attn = (q * self.scale) @ k.transpose(-2, -1) + if self.rel_pos_embed: + attn = add_decomposed_rel_pos(attn, q, q_shape, k_shape, + self.rel_pos_h, self.rel_pos_w, + self.rel_pos_t, self.with_cls_token) + + attn = attn.softmax(dim=-1) + x = attn @ v + + if self.residual_pooling: + if self.with_cls_token: + x[:, :, 1:, :] += q[:, :, 1:, :] + else: + x = x + q + + # (B, num_heads, H'*W', C'//num_heads) -> (B, H'*W', C') + x = x.transpose(1, 2).reshape(B, -1, self.out_dims) + x = self.proj(x) + + return x, q_shape + + +class MultiScaleBlock(BaseModule): + """Multiscale Transformer blocks. + + Args: + in_dims (int): Number of input channels. + out_dims (int): Number of output channels. + num_heads (int): Number of attention heads. + mlp_ratio (float): Ratio of hidden dimensions in MLP layers. + Defaults to 4.0. + qkv_bias (bool): If True, add a learnable bias to query, key and + value. Defaults to True. + drop_path (float): Stochastic depth rate. Defaults to 0. + norm_cfg (dict): The config of normalization layers. + Defaults to ``dict(type='LN')``. + act_cfg (dict): The config of activation function. + Defaults to ``dict(type='GELU')``. + qkv_pool_kernel (tuple): kernel size for qkv pooling layers. + Defaults to (3, 3, 3). + stride_q (int): stride size for q pooling layer. + Defaults to (1, 1, 1). + stride_kv (int): stride size for kv pooling layer. + Defaults to (1, 1, 1). + rel_pos_embed (bool): Whether to enable the spatial relative + position embedding. Defaults to True. + residual_pooling (bool): Whether to enable the residual connection + after attention pooling. Defaults to True. + with_cls_token (bool): Whether concatenating class token into video + tokens as transformer input. Defaults to True. + dim_mul_in_attention (bool): Whether to multiply the ``embed_dims`` in + attention layers. If False, multiply it in MLP layers. + Defaults to True. + input_size (Tuple[int], optional): The input resolution, necessary + if enable the ``rel_pos_embed``. Defaults to None. + rel_pos_zero_init (bool): If True, zero initialize relative + positional parameters. Defaults to False. + init_cfg (dict, optional): The config of weight initialization. + Defaults to None. + """ + + def __init__( + self, + in_dims: int, + out_dims: int, + num_heads: int, + mlp_ratio: float = 4.0, + qkv_bias: bool = True, + drop_path: float = 0.0, + norm_cfg: Dict = dict(type='LN'), + act_cfg: Dict = dict(type='GELU'), + qkv_pool_kernel: Tuple = (3, 3, 3), + stride_q: Tuple = (1, 1, 1), + stride_kv: Tuple = (1, 1, 1), + rel_pos_embed: bool = True, + residual_pooling: bool = True, + with_cls_token: bool = True, + dim_mul_in_attention: bool = True, + input_size: Optional[Tuple[int]] = None, + rel_pos_zero_init: bool = False, + init_cfg: Optional[Dict] = None, + ) -> None: + super().__init__(init_cfg=init_cfg) + self.with_cls_token = with_cls_token + self.in_dims = in_dims + self.out_dims = out_dims + self.norm1 = build_norm_layer(norm_cfg, in_dims)[1] + self.dim_mul_in_attention = dim_mul_in_attention + + attn_dims = out_dims if dim_mul_in_attention else in_dims + self.attn = MultiScaleAttention( + in_dims, + attn_dims, + num_heads=num_heads, + qkv_bias=qkv_bias, + norm_cfg=norm_cfg, + pool_kernel=qkv_pool_kernel, + stride_q=stride_q, + stride_kv=stride_kv, + rel_pos_embed=rel_pos_embed, + residual_pooling=residual_pooling, + input_size=input_size, + rel_pos_zero_init=rel_pos_zero_init) + self.drop_path = DropPath( + drop_path) if drop_path > 0.0 else nn.Identity() + + self.norm2 = build_norm_layer(norm_cfg, attn_dims)[1] + + self.mlp = MLP( + in_channels=attn_dims, + hidden_channels=int(attn_dims * mlp_ratio), + out_channels=out_dims, + act_cfg=act_cfg) + + if in_dims != out_dims: + self.proj = nn.Linear(in_dims, out_dims) + else: + self.proj = None + + if np.prod(stride_q) > 1: + kernel_skip = [s + 1 if s > 1 else s for s in stride_q] + padding_skip = [int(skip // 2) for skip in kernel_skip] + self.pool_skip = nn.MaxPool3d( + kernel_skip, stride_q, padding_skip, ceil_mode=False) + + if input_size is not None: + input_size = to_3tuple(input_size) + out_size = [size // s for size, s in zip(input_size, stride_q)] + self.init_out_size = out_size + else: + self.init_out_size = None + else: + self.pool_skip = None + self.init_out_size = input_size + + def forward(self, x: torch.Tensor, in_size: Tuple[int]) -> tuple: + x_norm = self.norm1(x) + x_attn, out_size = self.attn(x_norm, in_size) + + if self.dim_mul_in_attention and self.proj is not None: + skip = self.proj(x_norm) + else: + skip = x + + if self.pool_skip is not None: + skip, _ = attention_pool( + skip, + self.pool_skip, + in_size, + with_cls_token=self.with_cls_token) + + x = skip + self.drop_path(x_attn) + x_norm = self.norm2(x) + x_mlp = self.mlp(x_norm) + + if not self.dim_mul_in_attention and self.proj is not None: + skip = self.proj(x_norm) + else: + skip = x + + x = skip + self.drop_path(x_mlp) + + return x, out_size + + +@MODELS.register_module() +class MViT(BaseModule): + """Multi-scale ViT v2. + + A PyTorch implement of : `MViTv2: Improved Multiscale Vision Transformers + for Classification and Detection `_ + + Inspiration from `the official implementation + `_ and `the mmclassification + implementation `_ + + Args: + arch (str | dict): MViT architecture. If use string, choose + from 'tiny', 'small', 'base' and 'large'. If use dict, it should + have below keys: + + - **embed_dims** (int): The dimensions of embedding. + - **num_layers** (int): The number of layers. + - **num_heads** (int): The number of heads in attention + modules of the initial layer. + - **downscale_indices** (List[int]): The layer indices to downscale + the feature map. + + Defaults to 'base'. + spatial_size (int): The expected input spatial_size shape. + Defaults to 224. + temporal_size (int): The expected input temporal_size shape. + Defaults to 224. + in_channels (int): The num of input channels. Defaults to 3. + out_scales (int | Sequence[int]): The output scale indices. + They should not exceed the length of ``downscale_indices``. + Defaults to -1, which means the last scale. + drop_path_rate (float): Stochastic depth rate. Defaults to 0.1. + use_abs_pos_embed (bool): If True, add absolute position embedding to + the patch embedding. Defaults to False. + interpolate_mode (str): Select the interpolate mode for absolute + position embedding vector resize. Defaults to "trilinear". + pool_kernel (tuple): kernel size for qkv pooling layers. + Defaults to (3, 3, 3). + dim_mul (int): The magnification for ``embed_dims`` in the downscale + layers. Defaults to 2. + head_mul (int): The magnification for ``num_heads`` in the downscale + layers. Defaults to 2. + adaptive_kv_stride (int): The stride size for kv pooling in the initial + layer. Defaults to (1, 8, 8). + rel_pos_embed (bool): Whether to enable the spatial and temporal + relative position embedding. Defaults to True. + residual_pooling (bool): Whether to enable the residual connection + after attention pooling. Defaults to True. + dim_mul_in_attention (bool): Whether to multiply the ``embed_dims`` in + attention layers. If False, multiply it in MLP layers. + Defaults to True. + with_cls_token (bool): Whether concatenating class token into video + tokens as transformer input. Defaults to True. + output_cls_token (bool): Whether output the cls_token. If set True, + ``with_cls_token`` must be True. Defaults to True. + rel_pos_zero_init (bool): If True, zero initialize relative + positional parameters. Defaults to False. + mlp_ratio (float): Ratio of hidden dimensions in MLP layers. + Defaults to 4.0. + qkv_bias (bool): enable bias for qkv if True. Defaults to True. + norm_cfg (dict): Config dict for normalization layer for all output + features. Defaults to ``dict(type='LN', eps=1e-6)``. + patch_cfg (dict): Config dict for the patch embedding layer. + Defaults to + ``dict(kernel_size=(3, 7, 7), + stride=(2, 4, 4), + padding=(1, 3, 3))``. + init_cfg (dict, optional): The Config for initialization. Defaults to + ``[ + dict(type='TruncNormal', layer=['Conv2d', 'Conv3d'], std=0.02), + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.02), + ]`` + + Examples: + >>> import torch + >>> from mmaction.registry import MODELS + >>> from mmaction.utils import register_all_modules + >>> register_all_modules() + >>> + >>> cfg = dict(type='MViT', arch='tiny', out_scales=[0, 1, 2, 3]) + >>> model = MODELS.build(cfg) + >>> model.init_weights() + >>> inputs = torch.rand(1, 3, 16, 224, 224) + >>> outputs = model(inputs) + >>> for i, output in enumerate(outputs): + >>> print(f'scale{i}: {output.shape}') + scale0: torch.Size([1, 96, 8, 56, 56]) + scale1: torch.Size([1, 192, 8, 28, 28]) + scale2: torch.Size([1, 384, 8, 14, 14]) + scale3: torch.Size([1, 768, 8, 7, 7]) + """ + arch_zoo = { + 'tiny': { + 'embed_dims': 96, + 'num_layers': 10, + 'num_heads': 1, + 'downscale_indices': [1, 3, 8] + }, + 'small': { + 'embed_dims': 96, + 'num_layers': 16, + 'num_heads': 1, + 'downscale_indices': [1, 3, 14] + }, + 'base': { + 'embed_dims': 96, + 'num_layers': 24, + 'num_heads': 1, + 'downscale_indices': [2, 5, 21] + }, + 'large': { + 'embed_dims': 144, + 'num_layers': 48, + 'num_heads': 2, + 'downscale_indices': [2, 8, 44] + }, + } + num_extra_tokens = 1 + + def __init__( + self, + arch: str = 'base', + spatial_size: int = 224, + temporal_size: int = 16, + in_channels: int = 3, + pretrained: Optional[str] = None, + out_scales: Union[int, Sequence[int]] = -1, + drop_path_rate: float = 0., + use_abs_pos_embed: bool = False, + interpolate_mode: str = 'trilinear', + pool_kernel: tuple = (3, 3, 3), + dim_mul: int = 2, + head_mul: int = 2, + adaptive_kv_stride: tuple = (1, 8, 8), + rel_pos_embed: bool = True, + residual_pooling: bool = True, + dim_mul_in_attention: bool = True, + with_cls_token: bool = True, + output_cls_token: bool = True, + rel_pos_zero_init: bool = False, + mlp_ratio: float = 4., + qkv_bias: bool = True, + norm_cfg: Dict = dict(type='LN', eps=1e-6), + patch_cfg: Dict = dict( + kernel_size=(3, 7, 7), stride=(2, 4, 4), padding=(1, 3, 3)), + init_cfg: Optional[Union[Dict, List[Dict]]] = [ + dict(type='TruncNormal', layer=['Conv2d', 'Conv3d'], std=0.02), + dict(type='TruncNormal', layer='Linear', std=0.02, bias=0.), + dict(type='Constant', layer='LayerNorm', val=1., bias=0.02), + ] + ) -> None: + if pretrained: + self.init_cfg = dict(type='Pretrained', checkpoint=pretrained) + super().__init__(init_cfg=init_cfg) + + if isinstance(arch, str): + arch = arch.lower() + assert arch in set(self.arch_zoo), \ + f'Arch {arch} is not in default archs {set(self.arch_zoo)}' + self.arch_settings = self.arch_zoo[arch] + else: + essential_keys = { + 'embed_dims', 'num_layers', 'num_heads', 'downscale_indices' + } + assert isinstance(arch, dict) and essential_keys <= set(arch), \ + f'Custom arch needs a dict with keys {essential_keys}' + self.arch_settings = arch + + self.embed_dims = self.arch_settings['embed_dims'] + self.num_layers = self.arch_settings['num_layers'] + self.num_heads = self.arch_settings['num_heads'] + self.downscale_indices = self.arch_settings['downscale_indices'] + self.num_scales = len(self.downscale_indices) + 1 + self.stage_indices = { + index - 1: i + for i, index in enumerate(self.downscale_indices) + } + self.stage_indices[self.num_layers - 1] = self.num_scales - 1 + self.use_abs_pos_embed = use_abs_pos_embed + self.interpolate_mode = interpolate_mode + + if isinstance(out_scales, int): + out_scales = [out_scales] + assert isinstance(out_scales, Sequence), \ + f'"out_scales" must by a sequence or int, ' \ + f'get {type(out_scales)} instead.' + for i, index in enumerate(out_scales): + if index < 0: + out_scales[i] = self.num_scales + index + assert 0 <= out_scales[i] <= self.num_scales, \ + f'Invalid out_scales {index}' + self.out_scales = sorted(list(out_scales)) + + # Set patch embedding + _patch_cfg = dict( + in_channels=in_channels, + input_size=(temporal_size, spatial_size, spatial_size), + embed_dims=self.embed_dims, + conv_type='Conv3d', + ) + _patch_cfg.update(patch_cfg) + self.patch_embed = PatchEmbed3D(**_patch_cfg) + self.patch_resolution = self.patch_embed.init_out_size + + # Set cls token + if output_cls_token: + assert with_cls_token is True, f'with_cls_token must be True if' \ + f'set output_cls_token to True, but got {with_cls_token}' + self.with_cls_token = with_cls_token + self.output_cls_token = output_cls_token + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dims)) + + # Set absolute position embedding + if self.use_abs_pos_embed: + num_patches = np.prod(self.patch_resolution) + self.pos_embed = nn.Parameter( + torch.zeros(1, num_patches + self.num_extra_tokens, + self.embed_dims)) + + # stochastic depth decay rule + dpr = np.linspace(0, drop_path_rate, self.num_layers) + + self.blocks = ModuleList() + out_dims_list = [self.embed_dims] + num_heads = self.num_heads + stride_kv = adaptive_kv_stride + input_size = self.patch_resolution + for i in range(self.num_layers): + if i in self.downscale_indices: + num_heads *= head_mul + stride_q = [1, 2, 2] + stride_kv = [max(s // 2, 1) for s in stride_kv] + else: + stride_q = [1, 1, 1] + + # Set output embed_dims + if dim_mul_in_attention and i in self.downscale_indices: + # multiply embed_dims in downscale layers. + out_dims = out_dims_list[-1] * dim_mul + elif not dim_mul_in_attention and i + 1 in self.downscale_indices: + # multiply embed_dims before downscale layers. + out_dims = out_dims_list[-1] * dim_mul + else: + out_dims = out_dims_list[-1] + + attention_block = MultiScaleBlock( + in_dims=out_dims_list[-1], + out_dims=out_dims, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + drop_path=dpr[i], + norm_cfg=norm_cfg, + qkv_pool_kernel=pool_kernel, + stride_q=stride_q, + stride_kv=stride_kv, + rel_pos_embed=rel_pos_embed, + residual_pooling=residual_pooling, + dim_mul_in_attention=dim_mul_in_attention, + input_size=input_size, + rel_pos_zero_init=rel_pos_zero_init) + self.blocks.append(attention_block) + + input_size = attention_block.init_out_size + out_dims_list.append(out_dims) + + if i in self.stage_indices: + stage_index = self.stage_indices[i] + if stage_index in self.out_scales: + norm_layer = build_norm_layer(norm_cfg, out_dims)[1] + self.add_module(f'norm{stage_index}', norm_layer) + + def init_weights(self, pretrained: Optional[str] = None) -> None: + super().init_weights() + + if (isinstance(self.init_cfg, dict) + and self.init_cfg['type'] == 'Pretrained'): + # Suppress default init if use pretrained model. + return + + if self.use_abs_pos_embed: + trunc_normal_(self.pos_embed, std=0.02) + + def forward(self, x: torch.Tensor) ->\ + Tuple[Union[torch.Tensor, List[torch.Tensor]]]: + """Forward the MViT.""" + B = x.shape[0] + x, patch_resolution = self.patch_embed(x) + + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + + if self.use_abs_pos_embed: + x = x + resize_pos_embed( + self.pos_embed, + self.patch_resolution, + patch_resolution, + mode=self.interpolate_mode, + num_extra_tokens=self.num_extra_tokens) + + if not self.with_cls_token: + # Remove class token for transformer encoder input + x = x[:, 1:] + + outs = [] + for i, block in enumerate(self.blocks): + x, patch_resolution = block(x, patch_resolution) + + if i in self.stage_indices: + stage_index = self.stage_indices[i] + if stage_index in self.out_scales: + B, _, C = x.shape + x = getattr(self, f'norm{stage_index}')(x) + tokens = x.transpose(1, 2) + if self.with_cls_token: + patch_token = tokens[:, :, 1:].reshape( + B, C, *patch_resolution) + cls_token = tokens[:, :, 0] + else: + patch_token = tokens.reshape(B, C, *patch_resolution) + cls_token = None + if self.output_cls_token: + out = [patch_token, cls_token] + else: + out = patch_token + outs.append(out) + + return tuple(outs) diff --git a/mmaction/models/heads/__init__.py b/mmaction/models/heads/__init__.py index 79f852dc26..c803fc8561 100644 --- a/mmaction/models/heads/__init__.py +++ b/mmaction/models/heads/__init__.py @@ -1,6 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. from .base import BaseHead from .i3d_head import I3DHead +from .mvit_head import MViTHead from .slowfast_head import SlowFastHead from .stgcn_head import STGCNHead from .timesformer_head import TimeSformerHead @@ -13,5 +14,6 @@ __all__ = [ 'TSNHead', 'I3DHead', 'BaseHead', 'TSMHead', 'SlowFastHead', 'TPNHead', - 'X3DHead', 'TRNHead', 'TimeSformerHead', 'STGCNHead', 'TSNAudioHead' + 'X3DHead', 'TRNHead', 'TimeSformerHead', 'STGCNHead', 'TSNAudioHead', + 'MViTHead' ] diff --git a/mmaction/models/heads/mvit_head.py b/mmaction/models/heads/mvit_head.py new file mode 100644 index 0000000000..3797bb616d --- /dev/null +++ b/mmaction/models/heads/mvit_head.py @@ -0,0 +1,75 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Tuple + +from mmengine.model.weight_init import constant_init, trunc_normal_init +from torch import Tensor, nn + +from mmaction.registry import MODELS +from mmaction.utils import ConfigType +from .base import BaseHead + + +@MODELS.register_module() +class MViTHead(BaseHead): + """Classification head for Multi-scale ViT. + + A PyTorch implement of : `MViTv2: Improved Multiscale Vision Transformers + for Classification and Detection `_ + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict or ConfigDict): Config for building loss. + Defaults to `dict(type='CrossEntropyLoss')`. + dropout_ratio (float): Probability of dropout layer. Default: 0.5. + init_std (float): Std value for Initiation. Defaults to 0.02. + kwargs (dict, optional): Any keyword argument to be used to initialize + the head. + """ + + def __init__(self, + num_classes: int, + in_channels: int, + loss_cls: ConfigType = dict(type='CrossEntropyLoss'), + dropout_ratio: float = 0.5, + init_std: float = 0.02, + **kwargs) -> None: + super().__init__(num_classes, in_channels, loss_cls, **kwargs) + self.init_std = init_std + self.dropout_ratio = dropout_ratio + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + self.fc_cls = nn.Linear(self.in_channels, self.num_classes) + + def init_weights(self) -> None: + """Initiate the parameters from scratch.""" + trunc_normal_init(self.fc_cls.weight, std=self.init_std) + constant_init(self.fc_cls.bias, 0.02) + + def pre_logits(self, feats: Tuple[List[Tensor]]) -> Tensor: + """The process before the final classification head. + + The input ``feats`` is a tuple of list of tensor, and each tensor is + the feature of a backbone stage. + """ + _, cls_token = feats[-1] + return cls_token + + def forward(self, x: Tuple[List[Tensor]], **kwargs) -> Tensor: + """Defines the computation performed at every call. + + Args: + x (Tuple[List[Tensor]]): The input data. + + Returns: + Tensor: The classification scores for input samples. + """ + x = self.pre_logits(x) + if self.dropout is not None: + x = self.dropout(x) + # [N, in_channels] + cls_score = self.fc_cls(x) + # [N, num_classes] + return cls_score diff --git a/mmaction/models/recognizers/recognizer3d.py b/mmaction/models/recognizers/recognizer3d.py index 9de211d618..81b86534ac 100644 --- a/mmaction/models/recognizers/recognizer3d.py +++ b/mmaction/models/recognizers/recognizer3d.py @@ -69,14 +69,22 @@ def extract_feat(self, feat, _ = self.neck(feat) feats.append(feat) view_ptr += max_testing_views - # should consider the case that feat is a tuple + # recursively traverse feats until it's a tensor, then concat + + def recursively_cat(feats): + out_feats = [] + for e_idx, elem in enumerate(feats[0]): + batch_elem = [feat[e_idx] for feat in feats] + if not isinstance(elem, torch.Tensor): + batch_elem = recursively_cat(batch_elem) + else: + batch_elem = torch.cat(batch_elem) + out_feats.append(batch_elem) + + return tuple(out_feats) + if isinstance(feats[0], tuple): - len_tuple = len(feats[0]) - feats = [ - torch.cat([each[i] for each in feats]) - for i in range(len_tuple) - ] - x = tuple(feats) + x = recursively_cat(feats) else: x = torch.cat(feats) else: diff --git a/mmaction/models/utils/__init__.py b/mmaction/models/utils/__init__.py index ed6ac50522..865ccbea99 100644 --- a/mmaction/models/utils/__init__.py +++ b/mmaction/models/utils/__init__.py @@ -1,6 +1,9 @@ # Copyright (c) OpenMMLab. All rights reserved. from .blending_utils import (BaseMiniBatchBlending, CutmixBlending, - MixupBlending) + MixupBlending, RandomBatchAugment) from .graph import Graph -__all__ = ['BaseMiniBatchBlending', 'CutmixBlending', 'MixupBlending', 'Graph'] +__all__ = [ + 'BaseMiniBatchBlending', 'CutmixBlending', 'MixupBlending', 'Graph', + 'RandomBatchAugment' +] diff --git a/mmaction/models/utils/blending_utils.py b/mmaction/models/utils/blending_utils.py index 94b929d7ff..64808d32f7 100644 --- a/mmaction/models/utils/blending_utils.py +++ b/mmaction/models/utils/blending_utils.py @@ -1,6 +1,8 @@ # Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod +from typing import Union +import numpy as np import torch import torch.nn.functional as F from torch import Tensor @@ -177,3 +179,69 @@ def do_blending(self, imgs: Tensor, label: Tensor, **kwargs) -> tuple: label = lam * label + (1 - lam) * label[rand_index, :] return imgs, label + + +@MODELS.register_module() +class RandomBatchAugment(BaseMiniBatchBlending): + """Randomly choose one batch augmentation to apply. + + Args: + augments (dict | list): configs of batch + augmentations. + probs (float | List[float] | None): The probabilities of each batch + augmentations. If None, choose evenly. Defaults to None. + + Example: + >>> augments_cfg = [ + ... dict(type='CutmixBlending', alpha=1., num_classes=10), + ... dict(type='MixupBlending', alpha=1., num_classes=10) + ... ] + >>> batch_augment = RandomBatchAugment(augments_cfg, probs=[0.5, 0.3]) + >>> imgs = torch.randn(16, 3, 8, 32, 32) + >>> label = torch.randint(0, 10, (16, )) + >>> imgs, label = batch_augment(imgs, label) + + .. note :: + + To decide which batch augmentation will be used, it picks one of + ``augments`` based on the probabilities. In the example above, the + probability to use CutmixBlending is 0.5, to use MixupBlending is 0.3, + and to do nothing is 0.2. + """ + + def __init__(self, augments: Union[dict, list], probs=None): + if not isinstance(augments, (tuple, list)): + augments = [augments] + + self.augments = [] + for aug in augments: + assert isinstance(aug, dict), \ + f'blending augment config must be a dict. Got {type(aug)}' + self.augments.append(MODELS.build(aug)) + + self.num_classes = augments[0].get('num_classes') + + if isinstance(probs, float): + probs = [probs] + + if probs is not None: + assert len(augments) == len(probs), \ + '``augments`` and ``probs`` must have same lengths. ' \ + f'Got {len(augments)} vs {len(probs)}.' + assert sum(probs) <= 1, \ + 'The total probability of batch augments exceeds 1.' + self.augments.append(None) + probs.append(1 - sum(probs)) + + self.probs = probs + + def do_blending(self, imgs: Tensor, label: Tensor, **kwargs) -> tuple: + """Randomly apply batch augmentations to the batch inputs and batch + data samples.""" + aug_index = np.random.choice(len(self.augments), p=self.probs) + aug = self.augments[aug_index] + + if aug is not None: + return aug.do_blending(imgs, label, **kwargs) + else: + return imgs, label diff --git a/mmaction/models/utils/embed.py b/mmaction/models/utils/embed.py new file mode 100644 index 0000000000..bfe805fb32 --- /dev/null +++ b/mmaction/models/utils/embed.py @@ -0,0 +1,234 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch.nn as nn +import torch.nn.functional as F +from mmcv.cnn import build_conv_layer, build_norm_layer +from mmengine.model import BaseModule +from mmengine.utils import to_3tuple + + +class AdaptivePadding(nn.Module): + """Applies padding adaptively to the input. + + This module can make input get fully covered by filter + you specified. It support two modes "same" and "corner". The + "same" mode is same with "SAME" padding mode in TensorFlow, pad + zero around input. The "corner" mode would pad zero + to bottom right. + + Args: + kernel_size (int | tuple): Size of the kernel. Default: 1. + stride (int | tuple): Stride of the filter. Default: 1. + dilation (int | tuple): Spacing between kernel elements. + Default: 1. + padding (str): Support "same" and "corner", "corner" mode + would pad zero to bottom right, and "same" mode would + pad zero around input. Default: "corner". + + Example: + >>> kernel_size = 16 + >>> stride = 16 + >>> dilation = 1 + >>> input = torch.rand(1, 1, 15, 17) + >>> adap_pad = AdaptivePadding( + >>> kernel_size=kernel_size, + >>> stride=stride, + >>> dilation=dilation, + >>> padding="corner") + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + >>> input = torch.rand(1, 1, 16, 17) + >>> out = adap_pad(input) + >>> assert (out.shape[2], out.shape[3]) == (16, 32) + """ + + def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'): + super().__init__() + assert padding in ('same', 'corner') + + kernel_size = to_3tuple(kernel_size) + stride = to_3tuple(stride) + dilation = to_3tuple(dilation) + + self.padding = padding + self.kernel_size = kernel_size + self.stride = stride + self.dilation = dilation + + def get_pad_shape(self, input_shape): + """Calculate the padding size of input. + + Args: + input_shape (:obj:`torch.Size`): arrange as (H, W). + + Returns: + Tuple[int]: The padding size along the + original H and W directions + """ + input_t, input_h, input_w = input_shape + kernel_d, kernel_h, kernel_w = self.kernel_size + stride_d, stride_h, stride_w = self.stride + output_d = math.ceil(input_t / stride_d) + output_h = math.ceil(input_h / stride_h) + output_w = math.ceil(input_w / stride_w) + pad_d = max((output_d - 1) * stride_d + + (kernel_d - 1) * self.dilation[0] + 1 - input_t, 0) + pad_h = max((output_h - 1) * stride_h + + (kernel_h - 1) * self.dilation[1] + 1 - input_h, 0) + pad_w = max((output_w - 1) * stride_w + + (kernel_w - 1) * self.dilation[2] + 1 - input_w, 0) + return pad_d, pad_h, pad_w + + def forward(self, x): + """Add padding to `x` + + Args: + x (Tensor): Input tensor has shape (B, C, H, W). + + Returns: + Tensor: The tensor with adaptive padding + """ + pad_d, pad_h, pad_w = self.get_pad_shape(x.size()[-2:]) + if pad_d > 0 or pad_h > 0 or pad_w > 0: + if self.padding == 'corner': + x = F.pad(x, [0, pad_w, 0, pad_h, 0, pad_d]) + elif self.padding == 'same': + x = F.pad(x, [ + pad_w // 2, + pad_w - pad_w // 2, + pad_h // 2, + pad_h - pad_h // 2, + pad_d // 2, + pad_d - pad_d // 2, + ]) + return x + + +class PatchEmbed3D(BaseModule): + """Video to Patch Embedding. + + We use a conv layer to implement PatchEmbed. + + Args: + in_channels (int): The num of input channels. Default: 3 + embed_dims (int): The dimensions of embedding. Default: 768 + conv_type (str): The type of convolution + to generate patch embedding. Default: "Conv3d". + kernel_size (int): The kernel_size of embedding conv. + Default: (2, 4, 4). + stride (int): The slide stride of embedding conv. + Default: (2, 4, 4). + padding (int | tuple | string): The padding length of + embedding conv. When it is a string, it means the mode + of adaptive padding, support "same" and "corner" now. + Default: "corner". + dilation (int): The dilation rate of embedding conv. Default: 1. + bias (bool): Bias of embed conv. Default: True. + norm_cfg (dict, optional): Config dict for normalization layer. + Default: None. + input_size (int | tuple | None): The size of input, which will be + used to calculate the out size. Only works when `dynamic_size` + is False. Default: None. + init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization. + Default: None. + """ + + def __init__(self, + in_channels=3, + embed_dims=768, + conv_type='Conv3d', + kernel_size=(2, 4, 4), + stride=(2, 4, 4), + padding='corner', + dilation=1, + bias=True, + norm_cfg=None, + input_size=None, + init_cfg=None): + super().__init__(init_cfg=init_cfg) + + self.embed_dims = embed_dims + if stride is None: + stride = kernel_size + + kernel_size = to_3tuple(kernel_size) + stride = to_3tuple(stride) + dilation = to_3tuple(dilation) + + if isinstance(padding, str): + self.adaptive_padding = AdaptivePadding( + kernel_size=kernel_size, + stride=stride, + dilation=dilation, + padding=padding) + # disable the padding of conv + padding = 0 + else: + self.adaptive_padding = None + padding = to_3tuple(padding) + + self.projection = build_conv_layer( + dict(type=conv_type), + in_channels=in_channels, + out_channels=embed_dims, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + bias=bias) + + if norm_cfg is not None: + self.norm = build_norm_layer(norm_cfg, embed_dims)[1] + else: + self.norm = None + + if input_size: + input_size = to_3tuple(input_size) + # `init_out_size` would be used outside to + # calculate the num_patches + # e.g. when `use_abs_pos_embed` outside + self.init_input_size = input_size + if self.adaptive_padding: + pad_d, pad_h, pad_w = self.adaptive_padding.get_pad_shape( + input_size) + input_t, input_h, input_w = input_size + input_t = input_t + pad_d + input_h = input_h + pad_h + input_w = input_w + pad_w + input_size = (input_t, input_h, input_w) + + # https://pytorch.org/docs/stable/generated/torch.nn.Conv3d.html + t_out = (input_size[0] + 2 * padding[0] - dilation[0] * + (kernel_size[0] - 1) - 1) // stride[0] + 1 + h_out = (input_size[1] + 2 * padding[1] - dilation[1] * + (kernel_size[1] - 1) - 1) // stride[1] + 1 + w_out = (input_size[2] + 2 * padding[2] - dilation[2] * + (kernel_size[2] - 1) - 1) // stride[2] + 1 + self.init_out_size = (t_out, h_out, w_out) + else: + self.init_input_size = None + self.init_out_size = None + + def forward(self, x): + """ + Args: + x (Tensor): Has shape (B, C, T, H, W). In most case, C is 3. + + Returns: + tuple: Contains merged results and its spatial shape. + + - x (Tensor): Has shape (B, out_t * out_h * out_w, embed_dims) + - out_size (tuple[int]): Spatial shape of x, arrange as + (out_t, out_h, out_w). + """ + + if self.adaptive_padding: + x = self.adaptive_padding(x) + + x = self.projection(x) + out_size = (x.shape[2], x.shape[3], x.shape[4]) + x = x.flatten(2).transpose(1, 2) + if self.norm is not None: + x = self.norm(x) + return x, out_size diff --git a/tests/models/backbones/test_mvit.py b/tests/models/backbones/test_mvit.py new file mode 100644 index 0000000000..633cf73872 --- /dev/null +++ b/tests/models/backbones/test_mvit.py @@ -0,0 +1,134 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math +from copy import deepcopy +from unittest import TestCase + +import torch + +from mmaction.models import MViT + + +class TestMViT(TestCase): + + def setUp(self): + self.cfg = dict(arch='tiny', drop_path_rate=0.1) + + def test_structure(self): + # Test invalid default arch + with self.assertRaisesRegex(AssertionError, 'not in default archs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = 'unknown' + MViT(**cfg) + + # Test invalid custom arch + with self.assertRaisesRegex(AssertionError, 'Custom arch needs'): + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'num_layers': 24, + 'num_heads': 16, + 'feedforward_channels': 4096 + } + MViT(**cfg) + + # Test custom arch + cfg = deepcopy(self.cfg) + cfg['arch'] = { + 'embed_dims': 96, + 'num_layers': 10, + 'num_heads': 1, + 'downscale_indices': [2, 5, 8] + } + stage_indices = [0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3] + model = MViT(**cfg) + self.assertEqual(model.embed_dims, 96) + self.assertEqual(model.num_layers, 10) + for i, block in enumerate(model.blocks): + stage = stage_indices[i] + self.assertEqual(block.out_dims, 96 * 2**(stage)) + + # Test out_indices + cfg = deepcopy(self.cfg) + cfg['out_scales'] = {1: 1} + with self.assertRaisesRegex(AssertionError, "get "): + MViT(**cfg) + cfg['out_scales'] = [0, 13] + with self.assertRaisesRegex(AssertionError, 'Invalid out_scales 13'): + MViT(**cfg) + + # Test model structure + cfg = deepcopy(self.cfg) + model = MViT(**cfg) + stage_indices = [0, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3] + self.assertEqual(len(model.blocks), 10) + dpr_inc = 0.1 / (10 - 1) + dpr = 0 + for i, block in enumerate(model.blocks): + stage = stage_indices[i] + print(i, stage) + self.assertEqual(block.attn.num_heads, 2**stage) + if dpr > 0: + self.assertAlmostEqual(block.drop_path.drop_prob, dpr) + dpr += dpr_inc + + def test_init_weights(self): + # test weight init cfg + cfg = deepcopy(self.cfg) + cfg['init_cfg'] = [ + dict( + type='Kaiming', + layer='Conv3d', + mode='fan_in', + nonlinearity='linear') + ] + cfg['use_abs_pos_embed'] = True + model = MViT(**cfg) + ori_weight = model.patch_embed.projection.weight.clone().detach() + # The pos_embed is all zero before initialize + self.assertTrue(torch.allclose(model.pos_embed, torch.tensor(0.))) + + model.init_weights() + initialized_weight = model.patch_embed.projection.weight + self.assertFalse(torch.allclose(ori_weight, initialized_weight)) + self.assertFalse(torch.allclose(model.pos_embed, torch.tensor(0.))) + + def test_forward(self): + imgs = torch.randn(1, 3, 6, 64, 64) + + cfg = deepcopy(self.cfg) + model = MViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token, cls_token = outs[-1] + self.assertEqual(patch_token.shape, (1, 768, 3, 2, 2)) + + # Test forward with multi out scales + cfg = deepcopy(self.cfg) + cfg['out_scales'] = (0, 1, 2, 3) + model = MViT(**cfg) + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 4) + for stage, out in enumerate(outs): + stride = 2**stage + patch_token, cls_token = out + self.assertEqual(patch_token.shape, + (1, 96 * stride, 3, 16 // stride, 16 // stride)) + self.assertEqual(cls_token.shape, (1, 96 * stride)) + + # Test forward with dynamic input size + imgs1 = torch.randn(1, 3, 2, 64, 64) + imgs2 = torch.randn(1, 3, 2, 96, 96) + imgs3 = torch.randn(1, 3, 2, 96, 128) + cfg = deepcopy(self.cfg) + model = MViT(**cfg) + for imgs in [imgs1, imgs2, imgs3]: + outs = model(imgs) + self.assertIsInstance(outs, tuple) + self.assertEqual(len(outs), 1) + patch_token, cls_token = outs[-1] + expect_feat_shape = (math.ceil(imgs.shape[2] / 2), + math.ceil(imgs.shape[3] / 32), + math.ceil(imgs.shape[4] / 32)) + self.assertEqual(patch_token.shape, (1, 768, *expect_feat_shape)) + self.assertEqual(cls_token.shape, (1, 768)) diff --git a/tests/models/heads/test_mvit_head.py b/tests/models/heads/test_mvit_head.py new file mode 100644 index 0000000000..8f64f5bf06 --- /dev/null +++ b/tests/models/heads/test_mvit_head.py @@ -0,0 +1,32 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest import TestCase + +import torch +import torch.nn as nn + +from mmaction.models import MViTHead + + +class TestMViTHead(TestCase): + DEFAULT_ARGS = dict(in_channels=768, num_classes=5) + fake_feats = ([torch.rand(4, 768, 3, 2, 2), torch.rand(4, 768)], ) + + def test_init(self): + head = MViTHead(**self.DEFAULT_ARGS) + head.init_weights() + self.assertEqual(head.dropout.p, head.dropout_ratio) + self.assertIsInstance(head.fc_cls, nn.Linear) + self.assertEqual(head.num_classes, 5) + self.assertEqual(head.dropout_ratio, 0.5) + self.assertEqual(head.in_channels, 768) + self.assertEqual(head.init_std, 0.02) + + def test_pre_logits(self): + head = MViTHead(**self.DEFAULT_ARGS) + pre_logits = head.pre_logits(self.fake_feats) + self.assertIs(pre_logits, self.fake_feats[-1][1]) + + def test_forward(self): + head = MViTHead(**self.DEFAULT_ARGS) + cls_score = head(self.fake_feats) + self.assertEqual(cls_score.shape, (4, 5)) diff --git a/tests/models/utils/test_blending_utils.py b/tests/models/utils/test_blending_utils.py index 2c19267681..359d9225dc 100644 --- a/tests/models/utils/test_blending_utils.py +++ b/tests/models/utils/test_blending_utils.py @@ -1,8 +1,10 @@ # Copyright (c) OpenMMLab. All rights reserved. +import numpy as np +import pytest import torch from mmengine.structures import LabelData -from mmaction.models import CutmixBlending, MixupBlending +from mmaction.models import CutmixBlending, MixupBlending, RandomBatchAugment from mmaction.structures import ActionDataSample @@ -53,3 +55,41 @@ def test_cutmix(): mixed_imgs, mixed_label = mixup(imgs, label) assert mixed_imgs.shape == torch.Size((4, 4, 2, 3, 32, 32)) assert len(mixed_label) == 4 + + +def test_rand_blend(): + alpha_mixup = 0.2 + alpha_cutmix = 0.2 + num_classes = 10 + label = get_label(torch.randint(0, num_classes, (4, ))) + blending_augs = [ + dict(type='MixupBlending', alpha=alpha_mixup, num_classes=num_classes), + dict( + type='CutmixBlending', alpha=alpha_cutmix, num_classes=num_classes) + ] + + # test assertion + with pytest.raises(AssertionError): + rand_mix = RandomBatchAugment(blending_augs, [0.5, 0.6]) + + # mixup, cutmix + rand_mix = RandomBatchAugment(blending_augs, probs=None) + assert rand_mix.probs is None + + # mixup, cutmix and None + probs = [0.5, 0.4] + rand_mix = RandomBatchAugment(blending_augs, probs) + + np.testing.assert_allclose(rand_mix.probs[-1], 0.1) + + # test call + imgs = torch.randn(4, 4, 3, 32, 32) # NCHW imgs + mixed_imgs, mixed_label = rand_mix(imgs, label) + assert mixed_imgs.shape == torch.Size((4, 4, 3, 32, 32)) + assert len(mixed_label) == 4 + + imgs = torch.randn(4, 4, 2, 3, 32, 32) # NCTHW imgs + label = get_label(torch.randint(0, num_classes, (4, ))) + mixed_imgs, mixed_label = rand_mix(imgs, label) + assert mixed_imgs.shape == torch.Size((4, 4, 2, 3, 32, 32)) + assert len(mixed_label) == 4 From acff6a2897e10918b290721219f549d3d8065d65 Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Thu, 1 Dec 2022 17:55:20 +0800 Subject: [PATCH 25/57] [Doc] fix sth-sth and jester dataset link (#2103) --- README.md | 6 +++--- tools/data/jester/README.md | 8 ++++---- tools/data/jester/README_zh-CN.md | 6 +++--- tools/data/sthv1/README.md | 8 ++++---- tools/data/sthv1/README_zh-CN.md | 4 ++-- tools/data/sthv2/README.md | 3 ++- tools/data/sthv2/README_zh-CN.md | 4 ++-- 7 files changed, 20 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index e25ea7be14..2956dcdf0b 100644 --- a/README.md +++ b/README.md @@ -165,10 +165,10 @@ If you have any feature requests, please feel free to leave a comment in [Issues Kinetics-[400/600/700] (Homepage) (CVPR'2017) - SthV1 (Homepage) (ICCV'2017) - SthV2 (Homepage) (ICCV'2017) + SthV1 (ICCV'2017) + SthV2 (Homepage) (ICCV'2017) Diving48 (Homepage) (ECCV'2018) - Jester (Homepage) (ICCV'2019) + Jester (Homepage) (ICCV'2019) Moments in Time (Homepage) (TPAMI'2019) diff --git a/tools/data/jester/README.md b/tools/data/jester/README.md index 2e054ab33d..e90841a850 100644 --- a/tools/data/jester/README.md +++ b/tools/data/jester/README.md @@ -14,18 +14,18 @@ } ``` -For basic dataset information, you can refer to the dataset [website](https://20bn.com/datasets/jester/v1). +For basic dataset information, you can refer to the dataset [website](https://developer.qualcomm.com/software/ai-datasets/jester). Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/jester/`. ## Step 1. Prepare Annotations -First of all, you have to sign in and download annotations to `$MMACTION2/data/jester/annotations` on the official [website](https://20bn.com/datasets/jester/v1). +First of all, you have to sign in and download annotations to `$MMACTION2/data/jester/annotations` on the official [website](https://developer.qualcomm.com/software/ai-datasets/jester). ## Step 2. Prepare RGB Frames -Since the [jester website](https://20bn.com/datasets/jester/v1) doesn't provide the original video data and only extracted RGB frames are available, you have to directly download RGB frames from [jester website](https://20bn.com/datasets/jester/v1). +Since the [jester website](https://developer.qualcomm.com/software/ai-datasets/jester) doesn't provide the original video data and only extracted RGB frames are available, you have to directly download RGB frames from [jester website](https://developer.qualcomm.com/software/ai-datasets/jester). -You can download all RGB frame parts on [jester website](https://20bn.com/datasets/jester/v1) to `$MMACTION2/data/jester/` and use the following command to extract. +You can download all RGB frame parts on [jester website](https://developer.qualcomm.com/software/ai-datasets/jester) to `$MMACTION2/data/jester/` and use the following command to extract. ```shell cd $MMACTION2/data/jester/ diff --git a/tools/data/jester/README_zh-CN.md b/tools/data/jester/README_zh-CN.md index 4b3fb17f0b..7660c23ae7 100644 --- a/tools/data/jester/README_zh-CN.md +++ b/tools/data/jester/README_zh-CN.md @@ -14,16 +14,16 @@ } ``` -用户可以参照数据集 [官网](https://20bn.com/datasets/jester/v1),获取数据集相关的基本信息。 +用户可以参照数据集 [官网](https://developer.qualcomm.com/software/ai-datasets/jester),获取数据集相关的基本信息。 在准备数据集前,请确保命令行当前路径为 `$MMACTION2/tools/data/jester/`。 ## 步骤 1. 下载标注文件 -首先,用户需要在 [官网](https://20bn.com/datasets/jester/v1) 完成注册,才能下载标注文件。下载好的标注文件需要放在 `$MMACTION2/data/jester/annotations` 文件夹下。 +首先,用户需要在 [官网](https://developer.qualcomm.com/software/ai-datasets/jester) 完成注册,才能下载标注文件。下载好的标注文件需要放在 `$MMACTION2/data/jester/annotations` 文件夹下。 ## 步骤 2. 准备 RGB 帧 -[jester 官网](https://20bn.com/datasets/jester/v1) 并未提供原始视频文件,只提供了对原视频文件进行抽取得到的 RGB 帧,用户可在 [jester 官网](https://20bn.com/datasets/jester/v1) 直接下载。 +[jester 官网](https://developer.qualcomm.com/software/ai-datasets/jester) 并未提供原始视频文件,只提供了对原视频文件进行抽取得到的 RGB 帧,用户可在 [jester 官网](https://developer.qualcomm.com/software/ai-datasets/jester) 直接下载。 将下载好的压缩文件放在 `$MMACTION2/data/jester/` 文件夹下,并使用以下脚本进行解压。 diff --git a/tools/data/sthv1/README.md b/tools/data/sthv1/README.md index 75f4c11134..a441c8bd91 100644 --- a/tools/data/sthv1/README.md +++ b/tools/data/sthv1/README.md @@ -15,18 +15,18 @@ } ``` -For basic dataset information, you can refer to the dataset [website](https://20bn.com/datasets/something-something/v1). +For basic dataset information, you can refer to the dataset [paper](https://arxiv.org/pdf/1706.04261.pdf). Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/sthv1/`. ## Step 1. Prepare Annotations -First of all, you have to sign in and download annotations to `$MMACTION2/data/sthv1/annotations` on the official [website](https://20bn.com/datasets/something-something/v1). +Since the official [website](https://20bn.com/datasets/something-something/v1) of Something-Something V1 is currently unavailable, you can download the annotations from third-part source to `$MMACTION2/data/sthv1/` . ## Step 2. Prepare RGB Frames -Since the [sthv1 website](https://20bn.com/datasets/something-something/v1) doesn't provide the original video data and only extracted RGB frames are available, you have to directly download RGB frames from [sthv1 website](https://20bn.com/datasets/something-something/v1). +Since the official dataset doesn't provide the original video data and only extracted RGB frames are available, you have to directly download RGB frames. -You can download all compressed file parts on [sthv1 website](https://20bn.com/datasets/something-something/v1) to `$MMACTION2/data/sthv1/` and use the following command to uncompress. +You can download all compressed file parts from third-part source to `$MMACTION2/data/sthv1/` and use the following command to uncompress. ```shell cd $MMACTION2/data/sthv1/ diff --git a/tools/data/sthv1/README_zh-CN.md b/tools/data/sthv1/README_zh-CN.md index 11cc9318be..dc10fa4cc9 100644 --- a/tools/data/sthv1/README_zh-CN.md +++ b/tools/data/sthv1/README_zh-CN.md @@ -18,11 +18,11 @@ ## 步骤 1. 下载标注文件 -首先,用户需要在 [官网](https://20bn.com/datasets/something-something/v1) 进行注册,才能下载标注文件。下载好的标注文件需要放在 `$MMACTION2/data/sthv1/annotations` 文件夹下。 +由于 Something-Something V1 的官方网站已经失效,用户需要通过第三方源下载原始数据集。下载好的标注文件需要放在 `$MMACTION2/data/sthv1/annotations` 文件夹下。 ## 步骤 2. 准备 RGB 帧 -[官网](https://20bn.com/datasets/something-something/v1) 并未提供原始视频文件,只提供了对原视频文件进行抽取得到的 RGB 帧,用户可在 [官网](https://20bn.com/datasets/something-something/v1) 直接下载。 +官方数据集并未提供原始视频文件,只提供了对原视频文件进行抽取得到的 RGB 帧,用户可在第三方源直接下载视频帧。 将下载好的压缩文件放在 `$MMACTION2/data/sthv1/` 文件夹下,并使用以下脚本进行解压。 diff --git a/tools/data/sthv2/README.md b/tools/data/sthv2/README.md index af112872da..c382ce7630 100644 --- a/tools/data/sthv2/README.md +++ b/tools/data/sthv2/README.md @@ -15,7 +15,7 @@ } ``` -For basic dataset information, you can refer to the dataset [website](https://20bn.com/datasets/something-something/v2). +For basic dataset information, you can refer to the dataset [website](https://developer.qualcomm.com/software/ai-datasets/something-something). Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/sthv2/`. ## Step 1. Prepare Annotations @@ -116,3 +116,4 @@ mmaction2 ``` For training and evaluating on Something-Something V2, please refer to [getting_started.md](/docs/getting_started.md). +s/getting_started.md). diff --git a/tools/data/sthv2/README_zh-CN.md b/tools/data/sthv2/README_zh-CN.md index 7d8080c5a4..bc48e10a11 100644 --- a/tools/data/sthv2/README_zh-CN.md +++ b/tools/data/sthv2/README_zh-CN.md @@ -15,12 +15,12 @@ } ``` -用户可参考该数据集的 [官网](https://20bn.com/datasets/something-something/v2),以获取数据集相关的基本信息。 +用户可参考该数据集的 [官网](https://developer.qualcomm.com/software/ai-datasets/something-something),以获取数据集相关的基本信息。 在数据集准备前,请确保命令行当前路径为 `$MMACTION2/tools/data/sthv2/`。 ## 步骤 1. 下载标注文件 -首先,用户需要在 [官网](https://20bn.com/datasets/something-something/v2) 完成注册,才能下载标注文件。下载好的标注文件需要放在 `$MMACTION2/data/sthv2/annotations` 文件夹下。 +首先,用户需要在 [官网](https://developer.qualcomm.com/software/ai-datasets/something-something) 完成注册,才能下载标注文件。下载好的标注文件需要放在 `$MMACTION2/data/sthv2/annotations` 文件夹下。 ## 步骤 2. 准备视频 From 0ecb89843b5b0e5719113c256c203a4ebad46975 Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Thu, 1 Dec 2022 19:43:53 +0800 Subject: [PATCH 26/57] [Doc] add mvit in readme (#2101) --- README.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 2956dcdf0b..d162eb29c6 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,7 @@ The 1.x branch works with **PyTorch 1.6+**. - **Modular design**: We decompose a video understanding framework into different components. One can easily construct a customized video understanding framework by combining different modules. -- **Support four major video understanding tasks**: MMAction2 implements various algorithms for multiple video understanding tasks, including action recognition, action localization, spatio-temporal action detection, and skeleton-based action detection. We support **27** different algorithms and **20** different datasets for the four major tasks. +- **Support four major video understanding tasks**: MMAction2 implements various algorithms for multiple video understanding tasks, including action recognition, action localization, spatio-temporal action detection, and skeleton-based action detection. - **Well tested and documented**: We provide detailed documentation and API reference, as well as unit tests. @@ -114,6 +114,13 @@ Please refer to [install.md](https://mmaction2.readthedocs.io/en/1.x/get_started VideoSwin (CVPR'2022) VideoMAE (NeurIPS'2022) + + MViT V2 (CVPR'2022) + + + + + Action Localization From 0d6372380fe53ae58b498ea48fa2dbd43a6cf29c Mon Sep 17 00:00:00 2001 From: wxDai Date: Fri, 2 Dec 2022 15:26:49 +0800 Subject: [PATCH 27/57] [Refactor] Refactor STGCN and related pipelines (#2087) --- README.md | 1 + configs/_base_/models/stgcn.py | 14 - configs/skeleton/stgcn/README.md | 73 +- configs/skeleton/stgcn/metafile.yml | 417 ++++-- .../stgcn_1xb16-80e-ntu60-xsub-keypoint-3d.py | 69 - .../stgcn_1xb16-80e_ntu60-xsub-keypoint.py | 76 - .../stgcn/stgcn_8xb16-80e-babel60-wfl.py | 13 - .../stgcn/stgcn_8xb16-80e_babel120-wfl.py | 16 - .../stgcn/stgcn_8xb16-80e_babel120.py | 74 - .../skeleton/stgcn/stgcn_8xb16-80e_babel60.py | 74 - ...motion-u100-80e_ntu120-xsub-keypoint-2d.py | 67 + ...motion-u100-80e_ntu120-xsub-keypoint-3d.py | 67 + ...-motion-u100-80e_ntu60-xsub-keypoint-2d.py | 67 + ...-motion-u100-80e_ntu60-xsub-keypoint-3d.py | 67 + ...6-bone-u100-80e_ntu120-xsub-keypoint-2d.py | 67 + ...6-bone-u100-80e_ntu120-xsub-keypoint-3d.py | 67 + ...16-bone-u100-80e_ntu60-xsub-keypoint-2d.py | 67 + ...16-bone-u100-80e_ntu60-xsub-keypoint-3d.py | 67 + ...motion-u100-80e_ntu120-xsub-keypoint-2d.py | 67 + ...motion-u100-80e_ntu120-xsub-keypoint-3d.py | 67 + ...-motion-u100-80e_ntu60-xsub-keypoint-2d.py | 67 + ...-motion-u100-80e_ntu60-xsub-keypoint-3d.py | 67 + ...-joint-u100-80e_ntu120-xsub-keypoint-2d.py | 102 ++ ...-joint-u100-80e_ntu120-xsub-keypoint-3d.py | 102 ++ ...6-joint-u100-80e_ntu60-xsub-keypoint-2d.py | 102 ++ ...6-joint-u100-80e_ntu60-xsub-keypoint-3d.py | 102 ++ docs/en/user_guides/useful_tools.md | 21 + mmaction/datasets/transforms/__init__.py | 26 +- mmaction/datasets/transforms/formatting.py | 166 +-- mmaction/datasets/transforms/pose_loading.py | 683 --------- .../datasets/transforms/pose_transforms.py | 1289 +++++++++++++++++ mmaction/datasets/transforms/processing.py | 109 -- mmaction/models/backbones/stgcn.py | 423 +++--- mmaction/models/heads/__init__.py | 4 +- mmaction/models/heads/base.py | 6 - mmaction/models/heads/gcn_head.py | 71 + mmaction/models/heads/stgcn_head.py | 73 - mmaction/models/recognizers/recognizer_gcn.py | 29 +- mmaction/models/utils/__init__.py | 1 + mmaction/models/utils/gcn_utils.py | 162 +++ mmaction/models/utils/graph.py | 315 ++-- tests/datasets/transforms/test_formating.py | 74 +- ...ose_loading.py => test_pose_transforms.py} | 459 ++++-- tests/datasets/transforms/test_processing.py | 53 +- tests/models/backbones/test_agcn.py | 19 - tests/models/backbones/test_stgcn.py | 153 +- tests/models/heads/test_gcn_head.py | 27 + tests/models/heads/test_stgcn_head.py | 41 - .../models/recognizers/test_recognizer_gcn.py | 86 +- tools/analysis_tools/report_accuracy.py | 6 +- tools/test.py | 3 +- 51 files changed, 4031 insertions(+), 2307 deletions(-) delete mode 100644 configs/_base_/models/stgcn.py delete mode 100644 configs/skeleton/stgcn/stgcn_1xb16-80e-ntu60-xsub-keypoint-3d.py delete mode 100644 configs/skeleton/stgcn/stgcn_1xb16-80e_ntu60-xsub-keypoint.py delete mode 100644 configs/skeleton/stgcn/stgcn_8xb16-80e-babel60-wfl.py delete mode 100644 configs/skeleton/stgcn/stgcn_8xb16-80e_babel120-wfl.py delete mode 100644 configs/skeleton/stgcn/stgcn_8xb16-80e_babel120.py delete mode 100644 configs/skeleton/stgcn/stgcn_8xb16-80e_babel60.py create mode 100644 configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d.py create mode 100644 configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d.py create mode 100644 configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py create mode 100644 configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py create mode 100644 configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d.py create mode 100644 configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d.py create mode 100644 configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py create mode 100644 configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py create mode 100644 configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d.py create mode 100644 configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d.py create mode 100644 configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py create mode 100644 configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py create mode 100644 configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d.py create mode 100644 configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d.py create mode 100644 configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py create mode 100644 configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py delete mode 100644 mmaction/datasets/transforms/pose_loading.py create mode 100644 mmaction/datasets/transforms/pose_transforms.py create mode 100644 mmaction/models/heads/gcn_head.py delete mode 100644 mmaction/models/heads/stgcn_head.py create mode 100644 mmaction/models/utils/gcn_utils.py rename tests/datasets/transforms/{test_pose_loading.py => test_pose_transforms.py} (60%) delete mode 100644 tests/models/backbones/test_agcn.py create mode 100644 tests/models/heads/test_gcn_head.py delete mode 100644 tests/models/heads/test_stgcn_head.py diff --git a/README.md b/README.md index d162eb29c6..7fa850bd5b 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,7 @@ The 1.x branch works with **PyTorch 1.6+**. ## What's New +- (2022-11-30) We refine our skeleton-based pipelines and support the joint training of multi-stream skeleton information, including **joint, bone, joint-motion, and bone-motion**. - (2022-10-11) We support **Video Swin Transformer** on Kinetics400 and additionally train a Swin-L model on Kinetics700 to extract video features for downstream tasks. - (2022-10-25) We support **VideoMAE** on Kinetics400. - (2022-10-28) We support **C2D** on Kinetics400, achieve 73.57% Top-1 accuracy (higher than 71.8% in the [paper](https://arxiv.org/abs/1711.07971)). diff --git a/configs/_base_/models/stgcn.py b/configs/_base_/models/stgcn.py deleted file mode 100644 index 78fbcf5d2c..0000000000 --- a/configs/_base_/models/stgcn.py +++ /dev/null @@ -1,14 +0,0 @@ -model = dict( - type='RecognizerGCN', - backbone=dict( - type='STGCN', - in_channels=3, - edge_importance_weighting=True, - graph_cfg=dict(layout='ntu-rgb+d', strategy='spatial')), - cls_head=dict( - type='STGCNHead', - num_classes=60, - in_channels=256, - loss_cls=dict(type='CrossEntropyLoss')), - train_cfg=None, - test_cfg=None) diff --git a/configs/skeleton/stgcn/README.md b/configs/skeleton/stgcn/README.md index 7387291b50..77d89cff88 100644 --- a/configs/skeleton/stgcn/README.md +++ b/configs/skeleton/stgcn/README.md @@ -18,23 +18,52 @@ Dynamics of human body skeletons convey significant information for human action ## Results and Models -### NTU60_XSub - -| frame sampling strategy | keypoint | gpus | backbone | Top-1 | config | ckpt | log | -| :---------------------: | :------: | :--: | :------: | :---: | :--------------------------------------------------: | :------------------------------------------------: | :------------------------------------------------: | -| padding 300 | 2d | 1 | STGCN | 86.91 | [config](/configs/skeleton/stgcn/stgcn_1xb16-80e_ntu60-xsub-keypoint.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_1xb16-80e_ntu60-xsub-keypoint/stgcn_1xb16-80e_ntu60-xsub-keypoint-e7bb9653.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_1xb16-80e_ntu60-xsub-keypoint/stgcn_1xb16-80e_ntu60-xsub-keypoint.log) | -| padding 300 | 3d | 1 | STGCN | 86.91 | [config](/configs/skeleton/stgcn/stgcn_1xb32-80e-ntu60_xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_1xb32-80e-ntu60_xsub-keypoint-3d/stgcn_1xb32-80e-ntu60_xsub-keypoint-3d-13e7ccf0.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_1xb32-80e-ntu60_xsub-keypoint-3d/stgcn_1xb32-80e-ntu60_xsub-keypoint-3d.log) | - -### BABEL - -| dataset | gpus | backbone | Top-1 | Mean Top-1 | Top-1 Official (AGCN) | Mean Top-1 Official (AGCN) | config | ckpt | log | -| :----------: | :--: | :------: | :-------: | :--------: | :-------------------: | :------------------------: | :------------------------------------: | :----------------------------------: | :----------------------------------: | -| babel60 | 8 | STGCN | **42.39** | **28.28** | 41.14 | 24.46 | [config](/configs/skeleton/stgcn/stgcn_8xb16-80e_babel60.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-80e_babel60/stgcn_8xb16-80e_babel60-3d206418.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-80e_babel60/stgcn_8xb16-80e_babel60.log) | -| babel60-wfl | 8 | STGCN | **40.31** | 29.79 | 33.41 | **30.42** | [config](/configs/skeleton/stgcn/stgcn_8xb16-80e-babel60-wfl.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-80e-babel60-wfl/stgcn_8xb16-80e-babel60-wfl-1a9102d7.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-80e_babel60/stgcn_8xb16-80e-babel60-wfl.log) | -| babel120 | 8 | ST-GCN | **38.95** | **20.58** | 38.41 | 17.56 | [config](/configs/skeleton/stgcn/stgcn_8xb16-80e_babel120.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-80e_babel120/stgcn_8xb16-80e_babel120-e41eb6d7.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-80e_babel60/stgcn_8xb16-80e_babel120.log) | -| babel120-wfl | 8 | ST-GCN | **33.00** | 24.33 | 27.91 | **26.17**\* | [config](/configs/skeleton/stgcn/stgcn_8xb16-80e_babel120-wfl.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-80e_babel120-wfl/stgcn_8xb16-80e_babel120-wfl-3f2c100d.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-80e_babel60/stgcn_8xb16-80e_babel120-wfl.log) | - -\* The number is copied from the [paper](https://arxiv.org/pdf/2106.09696.pdf), the performance of the [released checkpoints](https://github.com/abhinanda-punnakkal/BABEL/tree/main/action_recognition) for BABEL-120 is inferior. +### NTU60_XSub_2D + +| frame sampling strategy | modality | gpus | backbone | top1 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :----------: | :--: | :------: | :------: | :--------------: | :---: | :----: | :---------------------------------------: | :-------------------------------------: | :------------------------------------: | +| uniform 100 | joint | 8 | STGCN | 88.95 | 10 clips | 38.2G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d_20221129-484a394a.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.log) | +| uniform 100 | bone | 8 | STGCN | 91.69 | 10 clips | 38.2G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d_20221129-c4b44488.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.log) | +| uniform 100 | joint-motion | 8 | STGCN | 86.90 | 10 clips | 38.2G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d_20221129-f18eb408.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.log) | +| uniform 100 | bone-motion | 8 | STGCN | 87.86 | 10 clips | 38.2G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d_20221129-99c60e2d.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.log) | +| | two-stream | | | 92.12 | | | | | | | +| | four-stream | | | 92.34 | | | | | | | + +### NTU60_XSub_3D + +| frame sampling strategy | modality | gpus | backbone | top1 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :----------: | :--: | :------: | :------: | :--------------: | :---: | :----: | :---------------------------------------: | :-------------------------------------: | :------------------------------------: | +| uniform 100 | joint | 8 | STGCN | 88.11 | 10 clips | 57.1G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d_20221129-850308e1.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.log) | +| uniform 100 | bone | 8 | STGCN | 88.76 | 10 clips | 57.1G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d_20221129-9c8d2970.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.log) | +| uniform 100 | joint-motion | 8 | STGCN | 86.06 | 10 clips | 57.1G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d_20221129-927648ea.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.log) | +| uniform 100 | bone-motion | 8 | STGCN | 85.49 | 10 clips | 57.1G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d_20221129-593162ca.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.log) | +| | two-stream | | | 90.14 | | | | | | | +| | four-stream | | | 90.39 | | | | | | | + +### NTU120_XSub_2D + +| frame sampling strategy | modality | gpus | backbone | top1 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :----------: | :--: | :------: | :------: | :--------------: | :---: | :----: | :---------------------------------------: | :-------------------------------------: | :------------------------------------: | +| uniform 100 | joint | 8 | STGCN | 83.19 | 10 clips | 38.2G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d_20221129-612416c6.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d.log) | +| uniform 100 | bone | 8 | STGCN | 83.36 | 10 clips | 38.2G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d_20221129-131e63c3.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d.log) | +| uniform 100 | joint-motion | 8 | STGCN | 78.87 | 10 clips | 38.2G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d_20221129-7cb38ec2.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d.log) | +| uniform 100 | bone-motion | 8 | STGCN | 79.55 | 10 clips | 38.2G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d_20221129-f5b19892.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d.log) | +| | two-stream | | | 84.84 | | | | | | | +| | four-stream | | | 85.23 | | | | | | | + +### NTU120_XSub_3D + +| frame sampling strategy | modality | gpus | backbone | top1 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :----------: | :--: | :------: | :------: | :--------------: | :---: | :----: | :---------------------------------------: | :-------------------------------------: | :------------------------------------: | +| uniform 100 | joint | 8 | STGCN | 82.15 | 10 clips | 57.1G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d_20221129-0484f579.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d.log) | +| uniform 100 | bone | 8 | STGCN | 84.28 | 10 clips | 57.1G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d_20221129-bc007510.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d.log) | +| uniform 100 | joint-motion | 8 | STGCN | 78.93 | 10 clips | 57.1G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d_20221129-5d54f525.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d.log) | +| uniform 100 | bone-motion | 8 | STGCN | 80.02 | 10 clips | 57.1G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d_20221129-3cb0e4e1.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d.log) | +| | two-stream | | | 85.68 | | | | | | | +| | four-stream | | | 86.19 | | | | | | | + +1. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size, and the original batch size. +2. For two-stream fusion, we use **joint : bone = 1 : 1**. For four-stream fusion, we use **joint : joint-motion : bone : bone-motion = 2 : 1 : 2 : 1**. For more details about multi-stream fusion, please refer to this [tutorial](/docs/en/user_guides/useful_tools.md#multi-stream-fusion). ## Train @@ -47,9 +76,8 @@ python tools/train.py ${CONFIG_FILE} [optional arguments] Example: train STGCN model on NTU60 dataset in a deterministic option with periodic validation. ```shell -python tools/train.py configs/skeleton/stgcn/stgcn_1xb16-80e_ntu60-xsub-keypoint.py \ - --work-dir work_dirs/stgcn_1xb16-80e_ntu60-xsub-keypoint \ - --cfg-options randomness.seed=0 randomness.deterministic=True +python tools/train.py configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py \ + --seed 0 --deterministic ``` For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). @@ -65,9 +93,8 @@ python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] Example: test STGCN model on NTU60 dataset and dump the result to a pickle file. ```shell -python tools/test.py configs/skeleton/stgcn/stgcn_1xb16-80e_ntu60-xsub-keypoint.py \ - checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy mean_class_accuracy \ - --out result.pkl +python tools/test.py configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py \ + checkpoints/SOME_CHECKPOINT.pth --dump result.pkl ``` For more details, you can refer to the **Test** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). diff --git a/configs/skeleton/stgcn/metafile.yml b/configs/skeleton/stgcn/metafile.yml index 76b4eec7bb..0480fd91b7 100644 --- a/configs/skeleton/stgcn/metafile.yml +++ b/configs/skeleton/stgcn/metafile.yml @@ -1,110 +1,311 @@ Collections: -- Name: STGCN - README: configs/skeleton/stgcn/README.md + - Name: STGCN + README: configs/skeleton/stgcn/README.md + Paper: + URL: https://arxiv.org/abs/1801.07455 + Title: 'Spatial Temporal Graph Convolutional Networks for Skeleton-Based Action Recognition' + Models: -- Config: configs/skeleton/stgcn/stgcn_1xb16-80e_ntu60-xsub-keypoint.py - In Collection: STGCN - Metadata: - Architecture: STGCN - Batch Size: 16 - Epochs: 80 - Parameters: 3088704 - Training Data: NTU60-XSub - Training Resources: 2 GPUs - Name: stgcn_1xb16-80e_ntu60-xsub-keypoint - Results: - Dataset: NTU60-XSub - Metrics: - Top 1 Accuracy: 86.91 - Task: Skeleton-based Action Recognition - Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_1xb16-80e_ntu60-xsub-keypoint/stgcn_1xb16-80e_ntu60-xsub-keypoint.log - Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_1xb16-80e_ntu60-xsub-keypoint/stgcn_1xb16-80e_ntu60-xsub-keypoint-e7bb9653.pth -- Config: configs/skeleton/stgcn/stgcn_1xb32-80e-ntu60_xsub-keypoint-3d.py - In Collection: STGCN - Metadata: - Architecture: STGCN - Batch Size: 32 - Epochs: 80 - Parameters: 3088704 - Training Data: NTU60-XSub - Training Resources: 1 GPU - Name: stgcn_1xb32-80e-ntu60_xsub-keypoint-3d - Results: - Dataset: NTU60-XSub - Metrics: - Top 1 Accuracy: 84.61 - Task: Skeleton-based Action Recognition - Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_1xb32-80e-ntu60_xsub-keypoint-3d/stgcn_1xb32-80e-ntu60_xsub-keypoint-3d.log - Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_1xb32-80e-ntu60_xsub-keypoint-3d/stgcn_1xb32-80e-ntu60_xsub-keypoint-3d-13e7ccf0.pth -- Config: configs/skeleton/stgcn/stgcn_8xb16-80e_babel60.py - In Collection: STGCN - Metadata: - Architecture: STGCN - Batch Size: 128 - Epochs: 80 - Parameters: 3088704 - Training Data: BABEL60 - Training Resources: 8 GPU - Name: stgcn_8xb16-80e_babel60 - Results: - Dataset: BABEL60 - Metrics: - Top 1 Accuracy: 42.39 - Mean Top 1 Accuracy: 28.28 - Task: Skeleton-based Action Recognition - Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-80e_babel60/stgcn_8xb16-80e_babel60.log - Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-80e_babel60/stgcn_8xb16-80e_babel60-3d206418.pth -- Config: configs/skeleton/stgcn/stgcn_8xb16-80e-babel60-wfl.py - In Collection: STGCN - Metadata: - Architecture: STGCN - Batch Size: 128 - Epochs: 80 - Parameters: 3088704 - Training Data: BABEL60 - Training Resources: 8 GPU - Name: stgcn_8xb16-80e-babel60-wfl - Results: - Dataset: BABEL60 - Metrics: - Top 1 Accuracy: 40.31 - Mean Top 1 Accuracy: 29.79 - Task: Skeleton-based Action Recognition - Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-80e-babel60-wfl/stgcn_8xb16-80e-babel60-wfl.log - Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-80e-babel60-wfl/stgcn_8xb16-80e-babel60-wfl-1a9102d7.pth -- Config: configs/skeleton/stgcn/stgcn_8xb16-80e_babel120.py - In Collection: STGCN - Metadata: - Architecture: STGCN - Batch Size: 128 - Epochs: 80 - Parameters: 3104320 - Training Data: BABEL120 - Training Resources: 8 GPU - Name: stgcn_8xb16-80e_babel120 - Results: - Dataset: BABEL120 - Metrics: - Top 1 Accuracy: 38.95 - Mean Top 1 Accuracy: 20.58 - Task: Skeleton-based Action Recognition - Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-80e_babel120/stgcn_8xb16-80e_babel120.log - Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-80e_babel120/stgcn_8xb16-80e_babel120-e41eb6d7.pth -- Config: configs/skeleton/stgcn/stgcn_8xb16-80e_babel120-wfl.py - In Collection: STGCN - Metadata: - Architecture: STGCN - Batch Size: 128 - Epochs: 80 - Parameters: 3104320 - Training Data: BABEL120 - Training Resources: 8 GPU - Name: stgcn_8xb16-80e_babel120-wfl - Results: - Dataset: BABEL120 - Metrics: - Top 1 Accuracy: 33.00 - Mean Top 1 Accuracy: 24.33 - Task: Skeleton-based Action Recognition - Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-80e_babel120-wfl/stgcn_8xb16-80e_babel120-wfl.log - Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-80e_babel120-wfl/stgcn_8xb16-80e_babel120-wfl-3f2c100d.pth + - Name: stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d + Config: configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py + In Collection: STGCN + Metadata: + Architecture: STGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 38.2G + Parameters: 3.1M + Training Data: NTU60-XSub-2D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-2D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 88.95 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d_20221129-484a394a.pth + + - Name: stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d + Config: configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py + In Collection: STGCN + Metadata: + Architecture: STGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 38.2G + Parameters: 3.1M + Training Data: NTU60-XSub-2D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-2D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 91.69 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d_20221129-c4b44488.pth + + - Name: stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d + Config: configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py + In Collection: STGCN + Metadata: + Architecture: STGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 38.2G + Parameters: 3.1M + Training Data: NTU60-XSub-2D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-2D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 86.90 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d_20221129-f18eb408.pth + + - Name: stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d + Config: configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py + In Collection: STGCN + Metadata: + Architecture: STGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 38.2G + Parameters: 3.1M + Training Data: NTU60-XSub-2D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-2D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 87.86 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d_20221129-99c60e2d.pth + + - Name: stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d + Config: configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py + In Collection: STGCN + Metadata: + Architecture: STGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 57.1G + Parameters: 3.1M + Training Data: NTU60-XSub-3D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-3D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 88.11 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d_20221129-850308e1.pth + + - Name: stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d + Config: configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py + In Collection: STGCN + Metadata: + Architecture: STGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 57.1G + Parameters: 3.1M + Training Data: NTU60-XSub-3D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-3D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 88.76 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d_20221129-9c8d2970.pth + + - Name: stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d + Config: configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py + In Collection: STGCN + Metadata: + Architecture: STGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 57.1G + Parameters: 3.1M + Training Data: NTU60-XSub-3D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-3D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 86.06 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d_20221129-927648ea.pth + + - Name: stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d + Config: configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py + In Collection: STGCN + Metadata: + Architecture: STGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 57.1G + Parameters: 3.1M + Training Data: NTU60-XSub-3D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-3D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 85.49 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d_20221129-593162ca.pth + + - Name: stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d + Config: configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d.py + In Collection: STGCN + Metadata: + Architecture: STGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 38.2G + Parameters: 3.1M + Training Data: NTU120-XSub-2D + Training Resources: 8 GPUs + Results: + Dataset: NTU120-XSub-2D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 83.19 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d_20221129-612416c6.pth + + - Name: stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d + Config: configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d.py + In Collection: STGCN + Metadata: + Architecture: STGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 38.2G + Parameters: 3.1M + Training Data: NTU120-XSub-2D + Training Resources: 8 GPUs + Results: + Dataset: NTU120-XSub-2D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 83.36 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d_20221129-131e63c3.pth + + - Name: stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d + Config: configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d.py + In Collection: STGCN + Metadata: + Architecture: STGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 38.2G + Parameters: 3.1M + Training Data: NTU120-XSub-2D + Training Resources: 8 GPUs + Results: + Dataset: NTU120-XSub-2D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 78.87 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d_20221129-7cb38ec2.pth + + - Name: stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d + Config: configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d.py + In Collection: STGCN + Metadata: + Architecture: STGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 38.2G + Parameters: 3.1M + Training Data: NTU120-XSub-2D + Training Resources: 8 GPUs + Results: + Dataset: NTU120-XSub-2D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 79.55 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d_20221129-f5b19892.pth + + - Name: stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d + Config: configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d.py + In Collection: STGCN + Metadata: + Architecture: STGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 57.1G + Parameters: 3.1M + Training Data: NTU120-XSub-3D + Training Resources: 8 GPUs + Results: + Dataset: NTU120-XSub-3D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 82.15 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d_20221129-0484f579.pth + + - Name: stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d + Config: configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d.py + In Collection: STGCN + Metadata: + Architecture: STGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 57.1G + Parameters: 3.1M + Training Data: NTU120-XSub-3D + Training Resources: 8 GPUs + Results: + Dataset: NTU120-XSub-3D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 84.28 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d_20221129-bc007510.pth + + - Name: stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d + Config: configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d.py + In Collection: STGCN + Metadata: + Architecture: STGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 57.1G + Parameters: 3.1M + Training Data: NTU120-XSub-3D + Training Resources: 8 GPUs + Results: + Dataset: NTU120-XSub-3D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 78.93 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d_20221129-5d54f525.pth + + - Name: stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d + Config: configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d.py + In Collection: STGCN + Metadata: + Architecture: STGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 57.1G + Parameters: 3.1M + Training Data: NTU120-XSub-3D + Training Resources: 8 GPUs + Results: + Dataset: NTU120-XSub-3D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 80.02 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d_20221129-3cb0e4e1.pth diff --git a/configs/skeleton/stgcn/stgcn_1xb16-80e-ntu60-xsub-keypoint-3d.py b/configs/skeleton/stgcn/stgcn_1xb16-80e-ntu60-xsub-keypoint-3d.py deleted file mode 100644 index 11a81e3be5..0000000000 --- a/configs/skeleton/stgcn/stgcn_1xb16-80e-ntu60-xsub-keypoint-3d.py +++ /dev/null @@ -1,69 +0,0 @@ -_base_ = ['../../_base_/models/stgcn.py', '../../_base_/default_runtime.py'] - -dataset_type = 'PoseDataset' -ann_file_train = 'data/ntu/nturgb+d_skeletons_60_3d/xsub/train.pkl' -ann_file_val = 'data/ntu/nturgb+d_skeletons_60_3d/xsub/val.pkl' -train_pipeline = [ - dict(type='PaddingWithLoop', clip_len=300), - dict(type='PoseDecode'), - dict(type='FormatGCNInput', input_format='NCTVM'), - dict(type='PackActionInputs') -] -test_pipeline = [ - dict(type='PaddingWithLoop', clip_len=300), - dict(type='PoseDecode'), - dict(type='FormatGCNInput', input_format='NCTVM'), - dict(type='PackActionInputs') -] - -train_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, ann_file=ann_file_train, pipeline=train_pipeline)) -val_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - ann_file=ann_file_val, - pipeline=test_pipeline, - test_mode=True)) -test_dataloader = dict( - batch_size=1, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - ann_file=ann_file_val, - pipeline=test_pipeline, - test_mode=True)) - -val_evaluator = dict(type='AccMetric') -test_evaluator = val_evaluator - -train_cfg = dict( - type='EpochBasedTrainLoop', max_epochs=80, val_begin=1, val_interval=5) -val_cfg = dict(type='ValLoop') -test_cfg = dict(type='TestLoop') - -param_scheduler = [ - dict( - type='MultiStepLR', - begin=0, - end=80, - by_epoch=True, - milestones=[10, 50], - gamma=0.1) -] - -optim_wrapper = dict( - optimizer=dict( - type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001, nesterov=True)) - -default_hooks = dict(checkpoint=dict(interval=5), logger=dict(interval=100)) diff --git a/configs/skeleton/stgcn/stgcn_1xb16-80e_ntu60-xsub-keypoint.py b/configs/skeleton/stgcn/stgcn_1xb16-80e_ntu60-xsub-keypoint.py deleted file mode 100644 index a963e0a5c5..0000000000 --- a/configs/skeleton/stgcn/stgcn_1xb16-80e_ntu60-xsub-keypoint.py +++ /dev/null @@ -1,76 +0,0 @@ -_base_ = ['../../_base_/models/stgcn.py', '../../_base_/default_runtime.py'] - -model = dict( - backbone=dict(graph_cfg=dict(layout='coco')), - data_preprocessor=dict( - type='ActionDataPreprocessor', - mean=[960., 540., 0.5], - std=[1920, 1080, 1.], - format_shape='NCTVM')) - -dataset_type = 'PoseDataset' -ann_file_train = 'data/posec3d/ntu60_xsub_train.pkl' -ann_file_val = 'data/posec3d/ntu60_xsub_val.pkl' -train_pipeline = [ - dict(type='PaddingWithLoop', clip_len=300), - dict(type='PoseDecode'), - dict(type='FormatGCNInput', input_format='NCTVM'), - dict(type='PackActionInputs') -] -test_pipeline = [ - dict(type='PaddingWithLoop', clip_len=300), - dict(type='PoseDecode'), - dict(type='FormatGCNInput', input_format='NCTVM'), - dict(type='PackActionInputs') -] -train_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, ann_file=ann_file_train, pipeline=train_pipeline)) -val_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - ann_file=ann_file_val, - pipeline=test_pipeline, - test_mode=True)) -test_dataloader = dict( - batch_size=1, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - ann_file=ann_file_val, - pipeline=test_pipeline, - test_mode=True)) - -val_evaluator = dict(type='AccMetric') -test_evaluator = val_evaluator - -train_cfg = dict( - type='EpochBasedTrainLoop', max_epochs=80, val_begin=1, val_interval=5) -val_cfg = dict(type='ValLoop') -test_cfg = dict(type='TestLoop') - -param_scheduler = [ - dict( - type='MultiStepLR', - begin=0, - end=80, - by_epoch=True, - milestones=[10, 50], - gamma=0.1) -] - -optim_wrapper = dict( - optimizer=dict( - type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001, nesterov=True)) - -default_hooks = dict(checkpoint=dict(interval=5), logger=dict(interval=100)) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-80e-babel60-wfl.py b/configs/skeleton/stgcn/stgcn_8xb16-80e-babel60-wfl.py deleted file mode 100644 index e9677d3c71..0000000000 --- a/configs/skeleton/stgcn/stgcn_8xb16-80e-babel60-wfl.py +++ /dev/null @@ -1,13 +0,0 @@ -_base_ = './stgcn_80e_8xb16_babel60.py' - -samples_per_cls = [ - 518, 1993, 6260, 508, 208, 3006, 431, 724, 4527, 2131, 199, 1255, 487, 302, - 136, 571, 267, 646, 1180, 405, 731, 842, 1619, 271, 1198, 1012, 865, 462, - 526, 405, 487, 168, 271, 609, 503, 167, 415, 421, 283, 2069, 715, 196, 989, - 122, 599, 396, 245, 380, 236, 260, 325, 133, 206, 191, 394, 145, 277, 268, - 172, 146 -] - -model = dict( - cls_head=dict( - loss_cls=dict(type='CBFocalLoss', samples_per_cls=samples_per_cls))) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-80e_babel120-wfl.py b/configs/skeleton/stgcn/stgcn_8xb16-80e_babel120-wfl.py deleted file mode 100644 index fa2720d6de..0000000000 --- a/configs/skeleton/stgcn/stgcn_8xb16-80e_babel120-wfl.py +++ /dev/null @@ -1,16 +0,0 @@ -_base_ = './stgcn_80e_8xb16_babel120.py' - -samples_per_cls = [ - 518, 1993, 6260, 508, 208, 3006, 431, 724, 4527, 2131, 199, 1255, 487, 302, - 136, 571, 267, 646, 1180, 405, 72, 731, 842, 1619, 271, 27, 1198, 1012, - 110, 865, 462, 526, 405, 487, 101, 24, 84, 64, 168, 271, 609, 503, 76, 167, - 415, 137, 421, 283, 2069, 715, 196, 66, 44, 989, 122, 43, 599, 396, 245, - 380, 34, 236, 260, 325, 127, 133, 119, 66, 125, 50, 206, 191, 394, 69, 98, - 145, 38, 21, 29, 64, 277, 65, 39, 31, 35, 85, 54, 80, 133, 66, 39, 64, 268, - 34, 172, 54, 33, 21, 110, 19, 40, 55, 146, 39, 37, 75, 101, 20, 46, 55, 43, - 21, 43, 87, 29, 36, 24, 37, 28, 39 -] - -model = dict( - cls_head=dict( - loss_cls=dict(type='CBFocalLoss', samples_per_cls=samples_per_cls))) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-80e_babel120.py b/configs/skeleton/stgcn/stgcn_8xb16-80e_babel120.py deleted file mode 100644 index 0d07a41bd3..0000000000 --- a/configs/skeleton/stgcn/stgcn_8xb16-80e_babel120.py +++ /dev/null @@ -1,74 +0,0 @@ -_base_ = ['../../_base_/models/stgcn.py', '../../_base_/default_runtime.py'] - -model = dict(cls_head=dict(num_classes=120, num_person=1)) - -dataset_type = 'PoseDataset' -ann_file_train = 'data/babel/babel120_train.pkl' -ann_file_val = 'data/babel/babel120_val.pkl' -train_pipeline = [ - dict(type='PoseDecode'), - dict(type='FormatGCNInput', input_format='NCTVM', num_person=1), - dict(type='PackActionInputs') -] -test_pipeline = [ - dict(type='PoseDecode'), - dict(type='FormatGCNInput', input_format='NCTVM', num_person=1), - dict(type='PackActionInputs') -] - -train_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='RepeatDataset', - times=5, - dataset=dict( - type=dataset_type, - ann_file=ann_file_train, - pipeline=train_pipeline))) -val_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - ann_file=ann_file_val, - pipeline=test_pipeline, - test_mode=True)) -test_dataloader = dict( - batch_size=1, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - ann_file=ann_file_val, - pipeline=test_pipeline, - test_mode=True)) - -val_evaluator = dict(type='AccMetric') -test_evaluator = val_evaluator - -train_cfg = dict( - type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1) -val_cfg = dict(type='ValLoop') -test_cfg = dict(type='TestLoop') - -param_scheduler = [ - dict( - type='MultiStepLR', - begin=0, - end=16, - by_epoch=True, - milestones=[10, 14], - gamma=0.1) -] - -optim_wrapper = dict( - optimizer=dict( - type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001, nesterov=True)) - -default_hooks = dict(logger=dict(interval=100)) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-80e_babel60.py b/configs/skeleton/stgcn/stgcn_8xb16-80e_babel60.py deleted file mode 100644 index 976e56e580..0000000000 --- a/configs/skeleton/stgcn/stgcn_8xb16-80e_babel60.py +++ /dev/null @@ -1,74 +0,0 @@ -_base_ = ['../../_base_/models/stgcn.py', '../../_base_/default_runtime.py'] - -model = dict(cls_head=dict(num_person=1)) - -dataset_type = 'PoseDataset' -ann_file_train = 'data/babel/babel60_train.pkl' -ann_file_val = 'data/babel/babel60_val.pkl' -train_pipeline = [ - dict(type='PoseDecode'), - dict(type='FormatGCNInput', input_format='NCTVM', num_person=1), - dict(type='PackActionInputs') -] -test_pipeline = [ - dict(type='PoseDecode'), - dict(type='FormatGCNInput', input_format='NCTVM', num_person=1), - dict(type='PackActionInputs') -] - -train_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='RepeatDataset', - times=5, - dataset=dict( - type=dataset_type, - ann_file=ann_file_train, - pipeline=train_pipeline))) -val_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - ann_file=ann_file_val, - pipeline=test_pipeline, - test_mode=True)) -test_dataloader = dict( - batch_size=1, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - ann_file=ann_file_val, - pipeline=test_pipeline, - test_mode=True)) - -val_evaluator = dict(type='AccMetric') -test_evaluator = val_evaluator - -train_cfg = dict( - type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1) -val_cfg = dict(type='ValLoop') -test_cfg = dict(type='TestLoop') - -param_scheduler = [ - dict( - type='MultiStepLR', - begin=0, - end=16, - by_epoch=True, - milestones=[10, 14], - gamma=0.1) -] - -optim_wrapper = dict( - optimizer=dict( - type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001, nesterov=True)) - -default_hooks = dict(logger=dict(interval=100)) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d.py b/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d.py new file mode 100644 index 0000000000..2ce7e3754f --- /dev/null +++ b/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu120_2d.pkl' +train_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['bm']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['bm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['bm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d.py b/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d.py new file mode 100644 index 0000000000..636370939d --- /dev/null +++ b/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu120_3d.pkl' +train_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['bm']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['bm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['bm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py b/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py new file mode 100644 index 0000000000..6f09e2bb78 --- /dev/null +++ b/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_2d.pkl' +train_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['bm']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['bm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['bm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py b/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py new file mode 100644 index 0000000000..09a84143a5 --- /dev/null +++ b/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_3d.pkl' +train_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['bm']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['bm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['bm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d.py b/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d.py new file mode 100644 index 0000000000..43155203ee --- /dev/null +++ b/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu120_2d.pkl' +train_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['b']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['b']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['b']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d.py b/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d.py new file mode 100644 index 0000000000..21b22ea00f --- /dev/null +++ b/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu120_3d.pkl' +train_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['b']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['b']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['b']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py b/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py new file mode 100644 index 0000000000..bea47a7e40 --- /dev/null +++ b/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_2d.pkl' +train_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['b']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['b']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['b']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py b/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py new file mode 100644 index 0000000000..15c45f8c32 --- /dev/null +++ b/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_3d.pkl' +train_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['b']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['b']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['b']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d.py b/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d.py new file mode 100644 index 0000000000..0bb5f9c860 --- /dev/null +++ b/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu120_2d.pkl' +train_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['jm']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['jm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['jm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d.py b/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d.py new file mode 100644 index 0000000000..4dc6ad2bc7 --- /dev/null +++ b/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu120_3d.pkl' +train_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['jm']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['jm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['jm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py b/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py new file mode 100644 index 0000000000..1102327bd9 --- /dev/null +++ b/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_2d.pkl' +train_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['jm']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['jm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['jm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py b/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py new file mode 100644 index 0000000000..cacfdb73b1 --- /dev/null +++ b/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_3d.pkl' +train_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['jm']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['jm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['jm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d.py b/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d.py new file mode 100644 index 0000000000..d3367e7a12 --- /dev/null +++ b/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d.py @@ -0,0 +1,102 @@ +_base_ = '../../_base_/default_runtime.py' + +model = dict( + type='RecognizerGCN', + backbone=dict( + type='STGCN', graph_cfg=dict(layout='coco', mode='stgcn_spatial')), + cls_head=dict(type='GCNHead', num_classes=120, in_channels=256)) + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu120_2d.pkl' +train_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['j']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['j']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['j']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) + +val_evaluator = [dict(type='AccMetric')] +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + eta_min=0, + T_max=16, + by_epoch=True, + convert_to_iter_based=True) +] + +optim_wrapper = dict( + optimizer=dict( + type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True)) + +default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d.py b/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d.py new file mode 100644 index 0000000000..b1c111cebb --- /dev/null +++ b/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d.py @@ -0,0 +1,102 @@ +_base_ = '../../_base_/default_runtime.py' + +model = dict( + type='RecognizerGCN', + backbone=dict( + type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')), + cls_head=dict(type='GCNHead', num_classes=120, in_channels=256)) + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu120_3d.pkl' +train_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) + +val_evaluator = [dict(type='AccMetric')] +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + eta_min=0, + T_max=16, + by_epoch=True, + convert_to_iter_based=True) +] + +optim_wrapper = dict( + optimizer=dict( + type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True)) + +default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py b/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py new file mode 100644 index 0000000000..22487d8a38 --- /dev/null +++ b/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py @@ -0,0 +1,102 @@ +_base_ = '../../_base_/default_runtime.py' + +model = dict( + type='RecognizerGCN', + backbone=dict( + type='STGCN', graph_cfg=dict(layout='coco', mode='stgcn_spatial')), + cls_head=dict(type='GCNHead', num_classes=60, in_channels=256)) + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_2d.pkl' +train_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['j']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['j']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['j']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) + +val_evaluator = [dict(type='AccMetric')] +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + eta_min=0, + T_max=16, + by_epoch=True, + convert_to_iter_based=True) +] + +optim_wrapper = dict( + optimizer=dict( + type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True)) + +default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) diff --git a/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py b/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py new file mode 100644 index 0000000000..2c672c54a0 --- /dev/null +++ b/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py @@ -0,0 +1,102 @@ +_base_ = '../../_base_/default_runtime.py' + +model = dict( + type='RecognizerGCN', + backbone=dict( + type='STGCN', graph_cfg=dict(layout='nturgb+d', mode='stgcn_spatial')), + cls_head=dict(type='GCNHead', num_classes=60, in_channels=256)) + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_3d.pkl' +train_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) + +val_evaluator = [dict(type='AccMetric')] +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + eta_min=0, + T_max=16, + by_epoch=True, + convert_to_iter_based=True) +] + +optim_wrapper = dict( + optimizer=dict( + type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True)) + +default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) diff --git a/docs/en/user_guides/useful_tools.md b/docs/en/user_guides/useful_tools.md index d9c207f745..2fe3b1977a 100644 --- a/docs/en/user_guides/useful_tools.md +++ b/docs/en/user_guides/useful_tools.md @@ -14,6 +14,7 @@ Apart from training/testing scripts, We provide lots of useful tools under the ` - [Evaluating a metric](#evaluating-a-metric) - [Print the entire config](#print-the-entire-config) - [Check videos](#check-videos) + - [Multi-Stream Fusion](#multi-stream-fusion) @@ -69,3 +70,23 @@ python tools/analysis_tools/print_config.py ${CONFIG} [-h] [--options ${OPTIONS ```shell python tools/analysis_tools/check_videos.py ${CONFIG} [-h] [--options OPTIONS [OPTIONS ...]] [--cfg-options CFG_OPTIONS [CFG_OPTIONS ...]] [--output-file OUTPUT_FILE] [--split SPLIT] [--decoder DECODER] [--num-processes NUM_PROCESSES] [--remove-corrupted-videos] ``` + +### Multi-Stream Fusion + +`tools/analysis_tools/report_accuracy.py` uses the dumped results (by setting `--dump res.pkl` when testing) to fuse the multi-stream prediction scores, i.e., late fusion. + +```shell +python tools/analysis_tools/report_accuracy.py [--preds ${RESULT_PKL_1 [RESULT_PKL_2 ...]}] [--coefficients ${COEFFICIENT_1 [COEFFICIENT_2, ...]}] [--apply-softmax] +``` + +Take joint-bone fusion as an example, which is a general practice in the task of skeleton-based action recognition. + +```shell +python tools/analysis_tools/report_accuracy.py --preds demo/fuse/joint.pkl demo/fuse/bone.pkl --coefficients 1.0 1.0 +``` + +```{note} +Mean Class Accuracy: 0.9180 +Top 1 Accuracy: 0.9333 +Top 5 Accuracy: 0.9833 +``` diff --git a/mmaction/datasets/transforms/__init__.py b/mmaction/datasets/transforms/__init__.py index 09e0111e4c..8d0648c435 100644 --- a/mmaction/datasets/transforms/__init__.py +++ b/mmaction/datasets/transforms/__init__.py @@ -1,7 +1,6 @@ # Copyright (c) OpenMMLab. All rights reserved. from .formatting import (FormatAudioShape, FormatGCNInput, FormatShape, - JointToBone, PackActionInputs, PackLocalizationInputs, - Transpose) + PackActionInputs, PackLocalizationInputs, Transpose) from .loading import (ArrayDecode, AudioDecode, AudioDecodeInit, AudioFeatureSelector, BuildPseudoClip, DecordDecode, DecordInit, DenseSampleFrames, @@ -11,12 +10,14 @@ PIMSInit, PyAVDecode, PyAVDecodeMotionVector, PyAVInit, RawFrameDecode, SampleAVAFrames, SampleFrames, UniformSample, UntrimmedSampleFrames) -from .pose_loading import (GeneratePoseTarget, LoadKineticsPose, - PaddingWithLoop, PoseDecode, UniformSampleFrames) +from .pose_transforms import (GeneratePoseTarget, GenSkeFeat, JointToBone, + LoadKineticsPose, MergeSkeFeat, PadTo, + PoseCompact, PoseDecode, PreNormalize2D, + PreNormalize3D, ToMotion, UniformSampleFrames) from .processing import (AudioAmplify, CenterCrop, ColorJitter, Flip, Fuse, - MelSpectrogram, MultiScaleCrop, PoseCompact, - RandomCrop, RandomRescale, RandomResizedCrop, Resize, - TenCrop, ThreeCrop) + MelSpectrogram, MultiScaleCrop, RandomCrop, + RandomRescale, RandomResizedCrop, Resize, TenCrop, + ThreeCrop) from .wrappers import ImgAug, PytorchVideoWrapper, TorchVisionWrapper __all__ = [ @@ -30,9 +31,10 @@ 'AudioAmplify', 'MelSpectrogram', 'AudioDecode', 'FormatAudioShape', 'LoadAudioFeature', 'AudioFeatureSelector', 'AudioDecodeInit', 'ImageDecode', 'BuildPseudoClip', 'RandomRescale', 'PIMSDecode', - 'PyAVDecodeMotionVector', 'UniformSample', 'UniformSampleFrames', - 'PoseDecode', 'LoadKineticsPose', 'GeneratePoseTarget', 'PIMSInit', - 'FormatGCNInput', 'PaddingWithLoop', 'ArrayDecode', 'JointToBone', - 'PackActionInputs', 'PackLocalizationInputs', 'ImgAug', - 'TorchVisionWrapper', 'PytorchVideoWrapper', 'PoseCompact' + 'PyAVDecodeMotionVector', 'UniformSampleFrames', 'PoseDecode', + 'LoadKineticsPose', 'GeneratePoseTarget', 'PIMSInit', 'FormatGCNInput', + 'PadTo', 'ArrayDecode', 'JointToBone', 'PackActionInputs', + 'PackLocalizationInputs', 'ImgAug', 'TorchVisionWrapper', + 'PytorchVideoWrapper', 'PoseCompact', 'PreNormalize3D', 'ToMotion', + 'MergeSkeFeat', 'GenSkeFeat', 'PreNormalize2D', 'UniformSample' ] diff --git a/mmaction/datasets/transforms/formatting.py b/mmaction/datasets/transforms/formatting.py index d29e4d5278..7477f51080 100644 --- a/mmaction/datasets/transforms/formatting.py +++ b/mmaction/datasets/transforms/formatting.py @@ -1,5 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. -from typing import Sequence +from typing import Dict, Sequence import numpy as np import torch @@ -12,11 +12,22 @@ @TRANSFORMS.register_module() class PackActionInputs(BaseTransform): - """Pack the inputs data for the recognition. + """Pack the input data for the recognition. + + PackActionInputs first packs one of 'imgs', 'keypoint' and 'audios' into + the `packed_results['inputs']`, which are the three basic input modalities + for the task of rgb-based, skeleton-based and audio-based action + recognition, as well as spatio-temporal action detection in the case + of 'img'. Next, it prepares a `data_sample` for the task of action + recognition (only a single label of `torch.LongTensor` format, which is + saved in the `data_sample.gt_labels.item`) or spatio-temporal action + detection respectively. Then, it saves the meta keys defined in + the `meta_keys` in `data_sample.metainfo`, and packs the `data_sample` + into the `packed_results['data_samples']`. Args: meta_keys (Sequence[str]): The meta keys to saved in the - ``metainfo`` of the packed ``data_sample``. + `metainfo` of the `data_sample`. Defaults to ``('img_shape', 'img_key', 'video_id', 'timestamp')``. """ @@ -32,17 +43,14 @@ def __init__( ) -> None: self.meta_keys = meta_keys - def transform(self, results: dict) -> dict: - """Method to pack the input data. + def transform(self, results: Dict) -> Dict: + """The transform function of :class:`PackActionInputs`. Args: - results (dict): Result dict from the data pipeline. + results (dict): The result dict. Returns: - dict: - - 'inputs' (torch.Tensor): The forward data of models. - - 'data_sample' (:obj:`ActionDataSample`): The annotation - info of the sample. + dict: The result dict. """ packed_results = dict() if 'imgs' in results: @@ -307,128 +315,68 @@ def __repr__(self): return repr_str -@TRANSFORMS.register_module() -class JointToBone(BaseTransform): - """Convert the joint information to bone information. - - Required keys are "keypoint" , - added or modified keys are "keypoint". - - Args: - dataset (str): Define the type of dataset: 'nturgb+d', 'openpose-18', - 'coco'. Default: 'nturgb+d'. - """ - - def __init__(self, dataset='nturgb+d'): - self.dataset = dataset - if self.dataset not in ['nturgb+d', 'openpose-18', 'coco']: - raise ValueError( - f'The dataset type {self.dataset} is not supported') - if self.dataset == 'nturgb+d': - self.pairs = [(0, 1), (1, 20), (2, 20), (3, 2), (4, 20), (5, 4), - (6, 5), (7, 6), (8, 20), (9, 8), (10, 9), (11, 10), - (12, 0), (13, 12), (14, 13), (15, 14), (16, 0), - (17, 16), (18, 17), (19, 18), (21, 22), (20, 20), - (22, 7), (23, 24), (24, 11)] - elif self.dataset == 'openpose-18': - self.pairs = ((0, 0), (1, 0), (2, 1), (3, 2), (4, 3), (5, 1), - (6, 5), (7, 6), (8, 2), (9, 8), (10, 9), (11, 5), - (12, 11), (13, 12), (14, 0), (15, 0), (16, 14), (17, - 15)) - elif self.dataset == 'coco': - self.pairs = ((0, 0), (1, 0), (2, 0), (3, 1), (4, 2), (5, 0), - (6, 0), (7, 5), (8, 6), (9, 7), (10, 8), (11, 0), - (12, 0), (13, 11), (14, 12), (15, 13), (16, 14)) - - def transform(self, results): - """Performs the Bone formatting. - - Args: - results (dict): The resulting dict to be modified and passed - to the next transform in pipeline. - """ - keypoint = results['keypoint'] - M, T, V, C = keypoint.shape - bone = np.zeros((M, T, V, C), dtype=np.float32) - - assert C in [2, 3] - for v1, v2 in self.pairs: - bone[..., v1, :] = keypoint[..., v1, :] - keypoint[..., v2, :] - if C == 3 and self.dataset in ['openpose-18', 'coco']: - score = (keypoint[..., v1, 2] + keypoint[..., v2, 2]) / 2 - bone[..., v1, 2] = score - - results['keypoint'] = bone - return results - - def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f"(dataset_type='{self.dataset}')" - return repr_str - - @TRANSFORMS.register_module() class FormatGCNInput(BaseTransform): - """Format final skeleton shape to the given ``input_format``. + """Format final skeleton shape. Required Keys: - - keypoint - - keypoint_score (optional) + - keypoint + - keypoint_score (optional) + - num_clips (optional) Modified Key: - - keypoint - - Added Key: - - - input_shape + - keypoint Args: - input_format (str): Define the final skeleton format. + num_person (int): The maximum number of people. Defaults to 2. + mode (str): The padding mode. Defaults to ``'zero'``. """ - def __init__(self, input_format: str, num_person: int = 2) -> None: - self.input_format = input_format - if self.input_format not in ['NCTVM']: - raise ValueError( - f'The input format {self.input_format} is invalid.') + def __init__(self, num_person: int = 2, mode: str = 'zero') -> None: self.num_person = num_person + assert mode in ['zero', 'loop'] + self.mode = mode - def transform(self, results: dict) -> dict: - """Performs the FormatShape formatting. + def transform(self, results: Dict) -> Dict: + """The transform function of :class:`FormatGCNInput`. Args: - results (dict): The resulting dict to be modified and passed - to the next transform in pipeline. + results (dict): The result dict. + + Returns: + dict: The result dict. """ keypoint = results['keypoint'] - if 'keypoint_score' in results: - keypoint_confidence = results['keypoint_score'] - keypoint_confidence = np.expand_dims(keypoint_confidence, -1) - keypoint_3d = np.concatenate((keypoint, keypoint_confidence), - axis=-1) - else: - keypoint_3d = keypoint + keypoint = np.concatenate( + (keypoint, results['keypoint_score'][..., None]), axis=-1) - keypoint_3d = np.transpose(keypoint_3d, - (3, 1, 2, 0)) # M T V C -> C T V M - - if keypoint_3d.shape[-1] < self.num_person: - pad_dim = self.num_person - keypoint_3d.shape[-1] + cur_num_person = keypoint.shape[0] + if cur_num_person < self.num_person: + pad_dim = self.num_person - cur_num_person pad = np.zeros( - keypoint_3d.shape[:-1] + (pad_dim, ), dtype=keypoint_3d.dtype) - keypoint_3d = np.concatenate((keypoint_3d, pad), axis=-1) - elif keypoint_3d.shape[-1] > self.num_person: - keypoint_3d = keypoint_3d[:, :, :, :self.num_person] + (pad_dim, ) + keypoint.shape[1:], dtype=keypoint.dtype) + keypoint = np.concatenate((keypoint, pad), axis=0) + if self.mode == 'loop' and cur_num_person == 1: + for i in range(1, self.num_person): + keypoint[i] = keypoint[0] + + elif cur_num_person > self.num_person: + keypoint = keypoint[:self.num_person] + + M, T, V, C = keypoint.shape + nc = results.get('num_clips', 1) + assert T % nc == 0 + keypoint = keypoint.reshape( + (M, nc, T // nc, V, C)).transpose(1, 0, 2, 3, 4) - results['keypoint'] = keypoint_3d - results['input_shape'] = keypoint_3d.shape + results['keypoint'] = np.ascontiguousarray(keypoint) return results - def __repr__(self): + def __repr__(self) -> str: repr_str = (f'{self.__class__.__name__}(' - f'input_format={self.input_format}, ' - f'num_person={self.num_person})') + f'num_person={self.num_person}, ' + f'mode={self.mode})') return repr_str diff --git a/mmaction/datasets/transforms/pose_loading.py b/mmaction/datasets/transforms/pose_loading.py deleted file mode 100644 index 58748eacb6..0000000000 --- a/mmaction/datasets/transforms/pose_loading.py +++ /dev/null @@ -1,683 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import copy as cp -import pickle - -import numpy as np -from mmcv.transforms import BaseTransform -from mmengine.fileio import FileClient -from scipy.stats import mode - -from mmaction.registry import TRANSFORMS -from .processing import Flip - - -@TRANSFORMS.register_module() -class UniformSampleFrames(BaseTransform): - """Uniformly sample frames from the video. - - To sample an n-frame clip from the video. UniformSampleFrames basically - divide the video into n segments of equal length and randomly sample one - frame from each segment. To make the testing results reproducible, a - random seed is set during testing, to make the sampling results - deterministic. - - Required keys are ``'total_frames'``, ``'start_index'`` , added or - modified keys are ``'frame_inds'``, ``'clip_len'``, - ``'frame_interval'`` and ``'num_clips'``. - - Args: - clip_len (int): Frames of each sampled output clip. - num_clips (int): Number of clips to be sampled. Defaults to 1. - test_mode (bool): Store True when building test or validation dataset. - Defaults to False. - seed (int): The random seed used during test time. Defaults to 255. - """ - - def __init__(self, clip_len, num_clips=1, test_mode=False, seed=255): - - self.clip_len = clip_len - self.num_clips = num_clips - self.test_mode = test_mode - self.seed = seed - - def _get_train_clips(self, num_frames, clip_len): - """Uniformly sample indices for training clips. - - Args: - num_frames (int): The number of frames. - clip_len (int): The length of the clip. - """ - - assert self.num_clips == 1 - if num_frames < clip_len: - start = np.random.randint(0, num_frames) - inds = np.arange(start, start + clip_len) - elif clip_len <= num_frames < 2 * clip_len: - basic = np.arange(clip_len) - inds = np.random.choice( - clip_len + 1, num_frames - clip_len, replace=False) - offset = np.zeros(clip_len + 1, dtype=np.int32) - offset[inds] = 1 - offset = np.cumsum(offset) - inds = basic + offset[:-1] - else: - bids = np.array( - [i * num_frames // clip_len for i in range(clip_len + 1)]) - bsize = np.diff(bids) - bst = bids[:clip_len] - offset = np.random.randint(bsize) - inds = bst + offset - return inds - - def _get_test_clips(self, num_frames, clip_len): - """Uniformly sample indices for testing clips. - - Args: - num_frames (int): The number of frames. - clip_len (int): The length of the clip. - """ - - np.random.seed(self.seed) - if num_frames < clip_len: - # Then we use a simple strategy - if num_frames < self.num_clips: - start_inds = list(range(self.num_clips)) - else: - start_inds = [ - i * num_frames // self.num_clips - for i in range(self.num_clips) - ] - inds = np.concatenate( - [np.arange(i, i + clip_len) for i in start_inds]) - elif clip_len <= num_frames < clip_len * 2: - all_inds = [] - for i in range(self.num_clips): - basic = np.arange(clip_len) - inds = np.random.choice( - clip_len + 1, num_frames - clip_len, replace=False) - offset = np.zeros(clip_len + 1, dtype=np.int32) - offset[inds] = 1 - offset = np.cumsum(offset) - inds = basic + offset[:-1] - all_inds.append(inds) - inds = np.concatenate(all_inds) - else: - bids = np.array( - [i * num_frames // clip_len for i in range(clip_len + 1)]) - bsize = np.diff(bids) - bst = bids[:clip_len] - all_inds = [] - for i in range(self.num_clips): - offset = np.random.randint(bsize) - all_inds.append(bst + offset) - inds = np.concatenate(all_inds) - return inds - - def transform(self, results): - """Perform the SampleFrames loading. - - Args: - results (dict): The resulting dict to be modified and passed - to the next transform in pipeline. - """ - num_frames = results['total_frames'] - - if self.test_mode: - inds = self._get_test_clips(num_frames, self.clip_len) - else: - inds = self._get_train_clips(num_frames, self.clip_len) - - inds = np.mod(inds, num_frames) - start_index = results['start_index'] - inds = inds + start_index - - results['frame_inds'] = inds.astype(np.int32) - results['clip_len'] = self.clip_len - results['frame_interval'] = None - results['num_clips'] = self.num_clips - return results - - def __repr__(self): - repr_str = (f'{self.__class__.__name__}(' - f'clip_len={self.clip_len}, ' - f'num_clips={self.num_clips}, ' - f'test_mode={self.test_mode}, ' - f'seed={self.seed})') - return repr_str - - -@TRANSFORMS.register_module() -class PoseDecode(BaseTransform): - """Load and decode pose with given indices. - - Required keys are "keypoint", "frame_inds" (optional), "keypoint_score" - (optional), added or modified keys are "keypoint", "keypoint_score" (if - applicable). - """ - - def transform(self, results): - """Perform the pose decoding. - - Args: - results (dict): The resulting dict to be modified and passed - to the next transform in pipeline. - """ - if 'total_frames' not in results: - results['total_frames'] = results['keypoint'].shape[1] - - if 'frame_inds' not in results: - results['frame_inds'] = np.arange(results['total_frames']) - - if results['frame_inds'].ndim != 1: - results['frame_inds'] = np.squeeze(results['frame_inds']) - - offset = results.get('offset', 0) - frame_inds = results['frame_inds'] + offset - - if 'keypoint_score' in results: - kpscore = results['keypoint_score'] - results['keypoint_score'] = kpscore[:, - frame_inds].astype(np.float32) - - if 'keypoint' in results: - results['keypoint'] = results['keypoint'][:, frame_inds].astype( - np.float32) - - return results - - def __repr__(self): - repr_str = f'{self.__class__.__name__}()' - return repr_str - - -@TRANSFORMS.register_module() -class LoadKineticsPose(BaseTransform): - """Load Kinetics Pose given filename (The format should be pickle) - - Required keys are "filename", "total_frames", "img_shape", "frame_inds", - "anno_inds" (for mmpose source, optional), added or modified keys are - "keypoint", "keypoint_score". - - Args: - io_backend (str): IO backend where frames are stored. Default: 'disk'. - squeeze (bool): Whether to remove frames with no human pose. - Default: True. - max_person (int): The max number of persons in a frame. Default: 10. - keypoint_weight (dict): The weight of keypoints. We set the confidence - score of a person as the weighted sum of confidence scores of each - joint. Persons with low confidence scores are dropped (if exceed - max_person). Default: dict(face=1, torso=2, limb=3). - source (str): The sources of the keypoints used. Choices are 'mmpose' - and 'openpose-18'. Default: 'mmpose'. - kwargs (dict, optional): Arguments for FileClient. - """ - - def __init__(self, - io_backend='disk', - squeeze=True, - max_person=100, - keypoint_weight=dict(face=1, torso=2, limb=3), - source='mmpose', - **kwargs): - - self.io_backend = io_backend - self.squeeze = squeeze - self.max_person = max_person - self.keypoint_weight = cp.deepcopy(keypoint_weight) - self.source = source - - if source == 'openpose-18': - self.kpsubset = dict( - face=[0, 14, 15, 16, 17], - torso=[1, 2, 8, 5, 11], - limb=[3, 4, 6, 7, 9, 10, 12, 13]) - elif source == 'mmpose': - self.kpsubset = dict( - face=[0, 1, 2, 3, 4], - torso=[5, 6, 11, 12], - limb=[7, 8, 9, 10, 13, 14, 15, 16]) - else: - raise NotImplementedError('Unknown source of Kinetics Pose') - - self.kwargs = kwargs - self.file_client = None - - def transform(self, results): - """Perform the kinetics pose decoding. - - Args: - results (dict): The resulting dict to be modified and passed - to the next transform in pipeline. - """ - assert 'filename' in results - filename = results.pop('filename') - - # only applicable to source == 'mmpose' - anno_inds = None - if 'anno_inds' in results: - assert self.source == 'mmpose' - anno_inds = results.pop('anno_inds') - results.pop('box_score', None) - - if self.file_client is None: - self.file_client = FileClient(self.io_backend, **self.kwargs) - - bytes = self.file_client.get(filename) - - # only the kp array is in the pickle file, each kp include x, y, score. - kps = pickle.loads(bytes) - - total_frames = results['total_frames'] - - frame_inds = results.pop('frame_inds') - - if anno_inds is not None: - kps = kps[anno_inds] - frame_inds = frame_inds[anno_inds] - - frame_inds = list(frame_inds) - - def mapinds(inds): - uni = np.unique(inds) - map_ = {x: i for i, x in enumerate(uni)} - inds = [map_[x] for x in inds] - return np.array(inds, dtype=np.int16) - - if self.squeeze: - frame_inds = mapinds(frame_inds) - total_frames = np.max(frame_inds) + 1 - - # write it back - results['total_frames'] = total_frames - - h, w = results['img_shape'] - if self.source == 'openpose-18': - kps[:, :, 0] *= w - kps[:, :, 1] *= h - - num_kp = kps.shape[1] - num_person = mode(frame_inds)[-1][0] - - new_kp = np.zeros([num_person, total_frames, num_kp, 2], - dtype=np.float16) - new_kpscore = np.zeros([num_person, total_frames, num_kp], - dtype=np.float16) - # 32768 is enough - num_person_frame = np.zeros([total_frames], dtype=np.int16) - - for frame_ind, kp in zip(frame_inds, kps): - person_ind = num_person_frame[frame_ind] - new_kp[person_ind, frame_ind] = kp[:, :2] - new_kpscore[person_ind, frame_ind] = kp[:, 2] - num_person_frame[frame_ind] += 1 - - kpgrp = self.kpsubset - weight = self.keypoint_weight - results['num_person'] = num_person - - if num_person > self.max_person: - for i in range(total_frames): - np_frame = num_person_frame[i] - val = new_kpscore[:np_frame, i] - - val = ( - np.sum(val[:, kpgrp['face']], 1) * weight['face'] + - np.sum(val[:, kpgrp['torso']], 1) * weight['torso'] + - np.sum(val[:, kpgrp['limb']], 1) * weight['limb']) - inds = sorted(range(np_frame), key=lambda x: -val[x]) - new_kpscore[:np_frame, i] = new_kpscore[inds, i] - new_kp[:np_frame, i] = new_kp[inds, i] - results['num_person'] = self.max_person - - results['keypoint'] = new_kp[:self.max_person] - results['keypoint_score'] = new_kpscore[:self.max_person] - return results - - def __repr__(self): - repr_str = (f'{self.__class__.__name__}(' - f'io_backend={self.io_backend}, ' - f'squeeze={self.squeeze}, ' - f'max_person={self.max_person}, ' - f'keypoint_weight={self.keypoint_weight}, ' - f'source={self.source}, ' - f'kwargs={self.kwargs})') - return repr_str - - -@TRANSFORMS.register_module() -class GeneratePoseTarget(BaseTransform): - """Generate pseudo heatmaps based on joint coordinates and confidence. - - Required keys are "keypoint", "img_shape", "keypoint_score" (optional), - added or modified keys are "imgs". - - Args: - sigma (float): The sigma of the generated gaussian map. Default: 0.6. - use_score (bool): Use the confidence score of keypoints as the maximum - of the gaussian maps. Default: True. - with_kp (bool): Generate pseudo heatmaps for keypoints. Default: True. - with_limb (bool): Generate pseudo heatmaps for limbs. At least one of - 'with_kp' and 'with_limb' should be True. Default: False. - skeletons (tuple[tuple]): The definition of human skeletons. - Default: ((0, 1), (0, 2), (1, 3), (2, 4), (0, 5), (5, 7), (7, 9), - (0, 6), (6, 8), (8, 10), (5, 11), (11, 13), (13, 15), - (6, 12), (12, 14), (14, 16), (11, 12)), - which is the definition of COCO-17p skeletons. - double (bool): Output both original heatmaps and flipped heatmaps. - Default: False. - left_kp (tuple[int]): Indexes of left keypoints, which is used when - flipping heatmaps. Default: (1, 3, 5, 7, 9, 11, 13, 15), - which is left keypoints in COCO-17p. - right_kp (tuple[int]): Indexes of right keypoints, which is used when - flipping heatmaps. Default: (2, 4, 6, 8, 10, 12, 14, 16), - which is right keypoints in COCO-17p. - """ - - def __init__(self, - sigma=0.6, - use_score=True, - with_kp=True, - with_limb=False, - skeletons=((0, 1), (0, 2), (1, 3), (2, 4), (0, 5), (5, 7), - (7, 9), (0, 6), (6, 8), (8, 10), (5, 11), (11, 13), - (13, 15), (6, 12), (12, 14), (14, 16), (11, 12)), - double=False, - left_kp=(1, 3, 5, 7, 9, 11, 13, 15), - right_kp=(2, 4, 6, 8, 10, 12, 14, 16)): - - self.sigma = sigma - self.use_score = use_score - self.with_kp = with_kp - self.with_limb = with_limb - self.double = double - - # an auxiliary const - self.eps = 1e-4 - - assert self.with_kp or self.with_limb, ( - 'At least one of "with_limb" ' - 'and "with_kp" should be set as True.') - self.left_kp = left_kp - self.right_kp = right_kp - self.skeletons = skeletons - - def generate_a_heatmap(self, img_h, img_w, centers, sigma, max_values): - """Generate pseudo heatmap for one keypoint in one frame. - - Args: - img_h (int): The height of the heatmap. - img_w (int): The width of the heatmap. - centers (np.ndarray): The coordinates of corresponding keypoints - (of multiple persons). - sigma (float): The sigma of generated gaussian. - max_values (np.ndarray): The max values of each keypoint. - - Returns: - np.ndarray: The generated pseudo heatmap. - """ - - heatmap = np.zeros([img_h, img_w], dtype=np.float32) - - for center, max_value in zip(centers, max_values): - mu_x, mu_y = center[0], center[1] - if max_value < self.eps: - continue - - st_x = max(int(mu_x - 3 * sigma), 0) - ed_x = min(int(mu_x + 3 * sigma) + 1, img_w) - st_y = max(int(mu_y - 3 * sigma), 0) - ed_y = min(int(mu_y + 3 * sigma) + 1, img_h) - x = np.arange(st_x, ed_x, 1, np.float32) - y = np.arange(st_y, ed_y, 1, np.float32) - - # if the keypoint not in the heatmap coordinate system - if not (len(x) and len(y)): - continue - y = y[:, None] - - patch = np.exp(-((x - mu_x)**2 + (y - mu_y)**2) / 2 / sigma**2) - patch = patch * max_value - heatmap[st_y:ed_y, - st_x:ed_x] = np.maximum(heatmap[st_y:ed_y, st_x:ed_x], - patch) - - return heatmap - - def generate_a_limb_heatmap(self, img_h, img_w, starts, ends, sigma, - start_values, end_values): - """Generate pseudo heatmap for one limb in one frame. - - Args: - img_h (int): The height of the heatmap. - img_w (int): The width of the heatmap. - starts (np.ndarray): The coordinates of one keypoint in the - corresponding limbs (of multiple persons). - ends (np.ndarray): The coordinates of the other keypoint in the - corresponding limbs (of multiple persons). - sigma (float): The sigma of generated gaussian. - start_values (np.ndarray): The max values of one keypoint in the - corresponding limbs. - end_values (np.ndarray): The max values of the other keypoint in - the corresponding limbs. - - Returns: - np.ndarray: The generated pseudo heatmap. - """ - - heatmap = np.zeros([img_h, img_w], dtype=np.float32) - - for start, end, start_value, end_value in zip(starts, ends, - start_values, - end_values): - value_coeff = min(start_value, end_value) - if value_coeff < self.eps: - continue - - min_x, max_x = min(start[0], end[0]), max(start[0], end[0]) - min_y, max_y = min(start[1], end[1]), max(start[1], end[1]) - - min_x = max(int(min_x - 3 * sigma), 0) - max_x = min(int(max_x + 3 * sigma) + 1, img_w) - min_y = max(int(min_y - 3 * sigma), 0) - max_y = min(int(max_y + 3 * sigma) + 1, img_h) - - x = np.arange(min_x, max_x, 1, np.float32) - y = np.arange(min_y, max_y, 1, np.float32) - - if not (len(x) and len(y)): - continue - - y = y[:, None] - x_0 = np.zeros_like(x) - y_0 = np.zeros_like(y) - - # distance to start keypoints - d2_start = ((x - start[0])**2 + (y - start[1])**2) - - # distance to end keypoints - d2_end = ((x - end[0])**2 + (y - end[1])**2) - - # the distance between start and end keypoints. - d2_ab = ((start[0] - end[0])**2 + (start[1] - end[1])**2) - - if d2_ab < 1: - full_map = self.generate_a_heatmap(img_h, img_w, [start], - sigma, [start_value]) - heatmap = np.maximum(heatmap, full_map) - continue - - coeff = (d2_start - d2_end + d2_ab) / 2. / d2_ab - - a_dominate = coeff <= 0 - b_dominate = coeff >= 1 - seg_dominate = 1 - a_dominate - b_dominate - - position = np.stack([x + y_0, y + x_0], axis=-1) - projection = start + np.stack([coeff, coeff], axis=-1) * ( - end - start) - d2_line = position - projection - d2_line = d2_line[:, :, 0]**2 + d2_line[:, :, 1]**2 - d2_seg = ( - a_dominate * d2_start + b_dominate * d2_end + - seg_dominate * d2_line) - - patch = np.exp(-d2_seg / 2. / sigma**2) - patch = patch * value_coeff - - heatmap[min_y:max_y, min_x:max_x] = np.maximum( - heatmap[min_y:max_y, min_x:max_x], patch) - - return heatmap - - def generate_heatmap(self, img_h, img_w, kps, sigma, max_values): - """Generate pseudo heatmap for all keypoints and limbs in one frame (if - needed). - - Args: - img_h (int): The height of the heatmap. - img_w (int): The width of the heatmap. - kps (np.ndarray): The coordinates of keypoints in this frame. - sigma (float): The sigma of generated gaussian. - max_values (np.ndarray): The confidence score of each keypoint. - - Returns: - np.ndarray: The generated pseudo heatmap. - """ - - heatmaps = [] - if self.with_kp: - num_kp = kps.shape[1] - for i in range(num_kp): - heatmap = self.generate_a_heatmap(img_h, img_w, kps[:, i], - sigma, max_values[:, i]) - heatmaps.append(heatmap) - - if self.with_limb: - for limb in self.skeletons: - start_idx, end_idx = limb - starts = kps[:, start_idx] - ends = kps[:, end_idx] - - start_values = max_values[:, start_idx] - end_values = max_values[:, end_idx] - heatmap = self.generate_a_limb_heatmap(img_h, img_w, starts, - ends, sigma, - start_values, - end_values) - heatmaps.append(heatmap) - - return np.stack(heatmaps, axis=-1) - - def gen_an_aug(self, results): - """Generate pseudo heatmaps for all frames. - - Args: - results (dict): The dictionary that contains all info of a sample. - - Returns: - list[np.ndarray]: The generated pseudo heatmaps. - """ - - all_kps = results['keypoint'] - kp_shape = all_kps.shape - - if 'keypoint_score' in results: - all_kpscores = results['keypoint_score'] - else: - all_kpscores = np.ones(kp_shape[:-1], dtype=np.float32) - - img_h, img_w = results['img_shape'] - num_frame = kp_shape[1] - - imgs = [] - for i in range(num_frame): - sigma = self.sigma - kps = all_kps[:, i] - kpscores = all_kpscores[:, i] - - max_values = np.ones(kpscores.shape, dtype=np.float32) - if self.use_score: - max_values = kpscores - - hmap = self.generate_heatmap(img_h, img_w, kps, sigma, max_values) - imgs.append(hmap) - - return imgs - - def transform(self, results): - """Generate pseudo heatmaps based on joint coordinates and confidence. - - Args: - results (dict): The resulting dict to be modified and passed - to the next transform in pipeline. - """ - if not self.double: - results['imgs'] = np.stack(self.gen_an_aug(results)) - else: - results_ = cp.deepcopy(results) - flip = Flip( - flip_ratio=1, left_kp=self.left_kp, right_kp=self.right_kp) - results_ = flip(results_) - results['imgs'] = np.concatenate( - [self.gen_an_aug(results), - self.gen_an_aug(results_)]) - return results - - def __repr__(self): - repr_str = (f'{self.__class__.__name__}(' - f'sigma={self.sigma}, ' - f'use_score={self.use_score}, ' - f'with_kp={self.with_kp}, ' - f'with_limb={self.with_limb}, ' - f'skeletons={self.skeletons}, ' - f'double={self.double}, ' - f'left_kp={self.left_kp}, ' - f'right_kp={self.right_kp})') - return repr_str - - -@TRANSFORMS.register_module() -class PaddingWithLoop(BaseTransform): - """Sample frames from the video. - - To sample an n-frame clip from the video, PaddingWithLoop samples - the frames from zero index, and loop the frames if the length of - video frames is less than the value of 'clip_len'. - - Required keys are "total_frames", added or modified keys - are "frame_inds", "clip_len", "frame_interval" and "num_clips". - - Args: - clip_len (int): Frames of each sampled output clip. - num_clips (int): Number of clips to be sampled. Default: 1. - """ - - def __init__(self, clip_len, num_clips=1): - - self.clip_len = clip_len - self.num_clips = num_clips - - def transform(self, results): - """Sample frames from the video. - - Args: - results (dict): The resulting dict to be modified and passed - to the next transform in pipeline. - """ - num_frames = results['total_frames'] - - start_index = results['start_index'] - inds = np.arange(start_index, start_index + self.clip_len) - inds = np.mod(inds, num_frames) - - results['frame_inds'] = inds.astype(np.int32) - results['clip_len'] = self.clip_len - results['frame_interval'] = None - results['num_clips'] = self.num_clips - return results - - def __repr__(self): - repr_str = (f'{self.__class__.__name__}(' - f'clip_len={self.clip_len}, ' - f'num_clips={self.num_clips})') - return repr_str diff --git a/mmaction/datasets/transforms/pose_transforms.py b/mmaction/datasets/transforms/pose_transforms.py new file mode 100644 index 0000000000..1740a18575 --- /dev/null +++ b/mmaction/datasets/transforms/pose_transforms.py @@ -0,0 +1,1289 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy as cp +import pickle +from typing import Dict, List, Tuple + +import numpy as np +from mmcv.transforms import BaseTransform, KeyMapper +from mmengine.dataset import Compose +from mmengine.fileio import FileClient +from scipy.stats import mode +from torch.nn.modules.utils import _pair + +from mmaction.registry import TRANSFORMS +from .processing import Flip, _combine_quadruple + + +@TRANSFORMS.register_module() +class LoadKineticsPose(BaseTransform): + """Load Kinetics Pose given filename (The format should be pickle) + + Required keys are "filename", "total_frames", "img_shape", "frame_inds", + "anno_inds" (for mmpose source, optional), added or modified keys are + "keypoint", "keypoint_score". + + Args: + io_backend (str): IO backend where frames are stored. Default: 'disk'. + squeeze (bool): Whether to remove frames with no human pose. + Default: True. + max_person (int): The max number of persons in a frame. Default: 10. + keypoint_weight (dict): The weight of keypoints. We set the confidence + score of a person as the weighted sum of confidence scores of each + joint. Persons with low confidence scores are dropped (if exceed + max_person). Default: dict(face=1, torso=2, limb=3). + source (str): The sources of the keypoints used. Choices are 'mmpose' + and 'openpose-18'. Default: 'mmpose'. + kwargs (dict, optional): Arguments for FileClient. + """ + + def __init__(self, + io_backend='disk', + squeeze=True, + max_person=100, + keypoint_weight=dict(face=1, torso=2, limb=3), + source='mmpose', + **kwargs): + + self.io_backend = io_backend + self.squeeze = squeeze + self.max_person = max_person + self.keypoint_weight = cp.deepcopy(keypoint_weight) + self.source = source + + if source == 'openpose-18': + self.kpsubset = dict( + face=[0, 14, 15, 16, 17], + torso=[1, 2, 8, 5, 11], + limb=[3, 4, 6, 7, 9, 10, 12, 13]) + elif source == 'mmpose': + self.kpsubset = dict( + face=[0, 1, 2, 3, 4], + torso=[5, 6, 11, 12], + limb=[7, 8, 9, 10, 13, 14, 15, 16]) + else: + raise NotImplementedError('Unknown source of Kinetics Pose') + + self.kwargs = kwargs + self.file_client = None + + def transform(self, results): + """Perform the kinetics pose decoding. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + assert 'filename' in results + filename = results.pop('filename') + + # only applicable to source == 'mmpose' + anno_inds = None + if 'anno_inds' in results: + assert self.source == 'mmpose' + anno_inds = results.pop('anno_inds') + results.pop('box_score', None) + + if self.file_client is None: + self.file_client = FileClient(self.io_backend, **self.kwargs) + + bytes = self.file_client.get(filename) + + # only the kp array is in the pickle file, each kp include x, y, score. + kps = pickle.loads(bytes) + + total_frames = results['total_frames'] + + frame_inds = results.pop('frame_inds') + + if anno_inds is not None: + kps = kps[anno_inds] + frame_inds = frame_inds[anno_inds] + + frame_inds = list(frame_inds) + + def mapinds(inds): + uni = np.unique(inds) + map_ = {x: i for i, x in enumerate(uni)} + inds = [map_[x] for x in inds] + return np.array(inds, dtype=np.int16) + + if self.squeeze: + frame_inds = mapinds(frame_inds) + total_frames = np.max(frame_inds) + 1 + + # write it back + results['total_frames'] = total_frames + + h, w = results['img_shape'] + if self.source == 'openpose-18': + kps[:, :, 0] *= w + kps[:, :, 1] *= h + + num_kp = kps.shape[1] + num_person = mode(frame_inds)[-1][0] + + new_kp = np.zeros([num_person, total_frames, num_kp, 2], + dtype=np.float16) + new_kpscore = np.zeros([num_person, total_frames, num_kp], + dtype=np.float16) + # 32768 is enough + num_person_frame = np.zeros([total_frames], dtype=np.int16) + + for frame_ind, kp in zip(frame_inds, kps): + person_ind = num_person_frame[frame_ind] + new_kp[person_ind, frame_ind] = kp[:, :2] + new_kpscore[person_ind, frame_ind] = kp[:, 2] + num_person_frame[frame_ind] += 1 + + kpgrp = self.kpsubset + weight = self.keypoint_weight + results['num_person'] = num_person + + if num_person > self.max_person: + for i in range(total_frames): + np_frame = num_person_frame[i] + val = new_kpscore[:np_frame, i] + + val = ( + np.sum(val[:, kpgrp['face']], 1) * weight['face'] + + np.sum(val[:, kpgrp['torso']], 1) * weight['torso'] + + np.sum(val[:, kpgrp['limb']], 1) * weight['limb']) + inds = sorted(range(np_frame), key=lambda x: -val[x]) + new_kpscore[:np_frame, i] = new_kpscore[inds, i] + new_kp[:np_frame, i] = new_kp[inds, i] + results['num_person'] = self.max_person + + results['keypoint'] = new_kp[:self.max_person] + results['keypoint_score'] = new_kpscore[:self.max_person] + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'io_backend={self.io_backend}, ' + f'squeeze={self.squeeze}, ' + f'max_person={self.max_person}, ' + f'keypoint_weight={self.keypoint_weight}, ' + f'source={self.source}, ' + f'kwargs={self.kwargs})') + return repr_str + + +@TRANSFORMS.register_module() +class GeneratePoseTarget(BaseTransform): + """Generate pseudo heatmaps based on joint coordinates and confidence. + + Required keys are "keypoint", "img_shape", "keypoint_score" (optional), + added or modified keys are "imgs". + + Args: + sigma (float): The sigma of the generated gaussian map. Default: 0.6. + use_score (bool): Use the confidence score of keypoints as the maximum + of the gaussian maps. Default: True. + with_kp (bool): Generate pseudo heatmaps for keypoints. Default: True. + with_limb (bool): Generate pseudo heatmaps for limbs. At least one of + 'with_kp' and 'with_limb' should be True. Default: False. + skeletons (tuple[tuple]): The definition of human skeletons. + Default: ((0, 1), (0, 2), (1, 3), (2, 4), (0, 5), (5, 7), (7, 9), + (0, 6), (6, 8), (8, 10), (5, 11), (11, 13), (13, 15), + (6, 12), (12, 14), (14, 16), (11, 12)), + which is the definition of COCO-17p skeletons. + double (bool): Output both original heatmaps and flipped heatmaps. + Default: False. + left_kp (tuple[int]): Indexes of left keypoints, which is used when + flipping heatmaps. Default: (1, 3, 5, 7, 9, 11, 13, 15), + which is left keypoints in COCO-17p. + right_kp (tuple[int]): Indexes of right keypoints, which is used when + flipping heatmaps. Default: (2, 4, 6, 8, 10, 12, 14, 16), + which is right keypoints in COCO-17p. + """ + + def __init__(self, + sigma=0.6, + use_score=True, + with_kp=True, + with_limb=False, + skeletons=((0, 1), (0, 2), (1, 3), (2, 4), (0, 5), (5, 7), + (7, 9), (0, 6), (6, 8), (8, 10), (5, 11), (11, 13), + (13, 15), (6, 12), (12, 14), (14, 16), (11, 12)), + double=False, + left_kp=(1, 3, 5, 7, 9, 11, 13, 15), + right_kp=(2, 4, 6, 8, 10, 12, 14, 16)): + + self.sigma = sigma + self.use_score = use_score + self.with_kp = with_kp + self.with_limb = with_limb + self.double = double + + # an auxiliary const + self.eps = 1e-4 + + assert self.with_kp or self.with_limb, ( + 'At least one of "with_limb" ' + 'and "with_kp" should be set as True.') + self.left_kp = left_kp + self.right_kp = right_kp + self.skeletons = skeletons + + def generate_a_heatmap(self, img_h, img_w, centers, sigma, max_values): + """Generate pseudo heatmap for one keypoint in one frame. + + Args: + img_h (int): The height of the heatmap. + img_w (int): The width of the heatmap. + centers (np.ndarray): The coordinates of corresponding keypoints + (of multiple persons). + sigma (float): The sigma of generated gaussian. + max_values (np.ndarray): The max values of each keypoint. + + Returns: + np.ndarray: The generated pseudo heatmap. + """ + + heatmap = np.zeros([img_h, img_w], dtype=np.float32) + + for center, max_value in zip(centers, max_values): + mu_x, mu_y = center[0], center[1] + if max_value < self.eps: + continue + + st_x = max(int(mu_x - 3 * sigma), 0) + ed_x = min(int(mu_x + 3 * sigma) + 1, img_w) + st_y = max(int(mu_y - 3 * sigma), 0) + ed_y = min(int(mu_y + 3 * sigma) + 1, img_h) + x = np.arange(st_x, ed_x, 1, np.float32) + y = np.arange(st_y, ed_y, 1, np.float32) + + # if the keypoint not in the heatmap coordinate system + if not (len(x) and len(y)): + continue + y = y[:, None] + + patch = np.exp(-((x - mu_x)**2 + (y - mu_y)**2) / 2 / sigma**2) + patch = patch * max_value + heatmap[st_y:ed_y, + st_x:ed_x] = np.maximum(heatmap[st_y:ed_y, st_x:ed_x], + patch) + + return heatmap + + def generate_a_limb_heatmap(self, img_h, img_w, starts, ends, sigma, + start_values, end_values): + """Generate pseudo heatmap for one limb in one frame. + + Args: + img_h (int): The height of the heatmap. + img_w (int): The width of the heatmap. + starts (np.ndarray): The coordinates of one keypoint in the + corresponding limbs (of multiple persons). + ends (np.ndarray): The coordinates of the other keypoint in the + corresponding limbs (of multiple persons). + sigma (float): The sigma of generated gaussian. + start_values (np.ndarray): The max values of one keypoint in the + corresponding limbs. + end_values (np.ndarray): The max values of the other keypoint in + the corresponding limbs. + + Returns: + np.ndarray: The generated pseudo heatmap. + """ + + heatmap = np.zeros([img_h, img_w], dtype=np.float32) + + for start, end, start_value, end_value in zip(starts, ends, + start_values, + end_values): + value_coeff = min(start_value, end_value) + if value_coeff < self.eps: + continue + + min_x, max_x = min(start[0], end[0]), max(start[0], end[0]) + min_y, max_y = min(start[1], end[1]), max(start[1], end[1]) + + min_x = max(int(min_x - 3 * sigma), 0) + max_x = min(int(max_x + 3 * sigma) + 1, img_w) + min_y = max(int(min_y - 3 * sigma), 0) + max_y = min(int(max_y + 3 * sigma) + 1, img_h) + + x = np.arange(min_x, max_x, 1, np.float32) + y = np.arange(min_y, max_y, 1, np.float32) + + if not (len(x) and len(y)): + continue + + y = y[:, None] + x_0 = np.zeros_like(x) + y_0 = np.zeros_like(y) + + # distance to start keypoints + d2_start = ((x - start[0])**2 + (y - start[1])**2) + + # distance to end keypoints + d2_end = ((x - end[0])**2 + (y - end[1])**2) + + # the distance between start and end keypoints. + d2_ab = ((start[0] - end[0])**2 + (start[1] - end[1])**2) + + if d2_ab < 1: + full_map = self.generate_a_heatmap(img_h, img_w, [start], + sigma, [start_value]) + heatmap = np.maximum(heatmap, full_map) + continue + + coeff = (d2_start - d2_end + d2_ab) / 2. / d2_ab + + a_dominate = coeff <= 0 + b_dominate = coeff >= 1 + seg_dominate = 1 - a_dominate - b_dominate + + position = np.stack([x + y_0, y + x_0], axis=-1) + projection = start + np.stack([coeff, coeff], axis=-1) * ( + end - start) + d2_line = position - projection + d2_line = d2_line[:, :, 0]**2 + d2_line[:, :, 1]**2 + d2_seg = ( + a_dominate * d2_start + b_dominate * d2_end + + seg_dominate * d2_line) + + patch = np.exp(-d2_seg / 2. / sigma**2) + patch = patch * value_coeff + + heatmap[min_y:max_y, min_x:max_x] = np.maximum( + heatmap[min_y:max_y, min_x:max_x], patch) + + return heatmap + + def generate_heatmap(self, img_h, img_w, kps, sigma, max_values): + """Generate pseudo heatmap for all keypoints and limbs in one frame (if + needed). + + Args: + img_h (int): The height of the heatmap. + img_w (int): The width of the heatmap. + kps (np.ndarray): The coordinates of keypoints in this frame. + sigma (float): The sigma of generated gaussian. + max_values (np.ndarray): The confidence score of each keypoint. + + Returns: + np.ndarray: The generated pseudo heatmap. + """ + + heatmaps = [] + if self.with_kp: + num_kp = kps.shape[1] + for i in range(num_kp): + heatmap = self.generate_a_heatmap(img_h, img_w, kps[:, i], + sigma, max_values[:, i]) + heatmaps.append(heatmap) + + if self.with_limb: + for limb in self.skeletons: + start_idx, end_idx = limb + starts = kps[:, start_idx] + ends = kps[:, end_idx] + + start_values = max_values[:, start_idx] + end_values = max_values[:, end_idx] + heatmap = self.generate_a_limb_heatmap(img_h, img_w, starts, + ends, sigma, + start_values, + end_values) + heatmaps.append(heatmap) + + return np.stack(heatmaps, axis=-1) + + def gen_an_aug(self, results): + """Generate pseudo heatmaps for all frames. + + Args: + results (dict): The dictionary that contains all info of a sample. + + Returns: + list[np.ndarray]: The generated pseudo heatmaps. + """ + + all_kps = results['keypoint'] + kp_shape = all_kps.shape + + if 'keypoint_score' in results: + all_kpscores = results['keypoint_score'] + else: + all_kpscores = np.ones(kp_shape[:-1], dtype=np.float32) + + img_h, img_w = results['img_shape'] + num_frame = kp_shape[1] + + imgs = [] + for i in range(num_frame): + sigma = self.sigma + kps = all_kps[:, i] + kpscores = all_kpscores[:, i] + + max_values = np.ones(kpscores.shape, dtype=np.float32) + if self.use_score: + max_values = kpscores + + hmap = self.generate_heatmap(img_h, img_w, kps, sigma, max_values) + imgs.append(hmap) + + return imgs + + def transform(self, results): + """Generate pseudo heatmaps based on joint coordinates and confidence. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + if not self.double: + results['imgs'] = np.stack(self.gen_an_aug(results)) + else: + results_ = cp.deepcopy(results) + flip = Flip( + flip_ratio=1, left_kp=self.left_kp, right_kp=self.right_kp) + results_ = flip(results_) + results['imgs'] = np.concatenate( + [self.gen_an_aug(results), + self.gen_an_aug(results_)]) + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'sigma={self.sigma}, ' + f'use_score={self.use_score}, ' + f'with_kp={self.with_kp}, ' + f'with_limb={self.with_limb}, ' + f'skeletons={self.skeletons}, ' + f'double={self.double}, ' + f'left_kp={self.left_kp}, ' + f'right_kp={self.right_kp})') + return repr_str + + +@TRANSFORMS.register_module() +class PoseCompact(BaseTransform): + """Convert the coordinates of keypoints to make it more compact. + Specifically, it first find a tight bounding box that surrounds all joints + in each frame, then we expand the tight box by a given padding ratio. For + example, if 'padding == 0.25', then the expanded box has unchanged center, + and 1.25x width and height. + + Required keys in results are "img_shape", "keypoint", add or modified keys + are "img_shape", "keypoint", "crop_quadruple". + + Args: + padding (float): The padding size. Default: 0.25. + threshold (int): The threshold for the tight bounding box. If the width + or height of the tight bounding box is smaller than the threshold, + we do not perform the compact operation. Default: 10. + hw_ratio (float | tuple[float] | None): The hw_ratio of the expanded + box. Float indicates the specific ratio and tuple indicates a + ratio range. If set as None, it means there is no requirement on + hw_ratio. Default: None. + allow_imgpad (bool): Whether to allow expanding the box outside the + image to meet the hw_ratio requirement. Default: True. + + Returns: + type: Description of returned object. + """ + + def __init__(self, + padding=0.25, + threshold=10, + hw_ratio=None, + allow_imgpad=True): + + self.padding = padding + self.threshold = threshold + if hw_ratio is not None: + hw_ratio = _pair(hw_ratio) + + self.hw_ratio = hw_ratio + + self.allow_imgpad = allow_imgpad + assert self.padding >= 0 + + def transform(self, results): + """Convert the coordinates of keypoints to make it more compact. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + img_shape = results['img_shape'] + h, w = img_shape + kp = results['keypoint'] + + # Make NaN zero + kp[np.isnan(kp)] = 0. + kp_x = kp[..., 0] + kp_y = kp[..., 1] + + min_x = np.min(kp_x[kp_x != 0], initial=np.Inf) + min_y = np.min(kp_y[kp_y != 0], initial=np.Inf) + max_x = np.max(kp_x[kp_x != 0], initial=-np.Inf) + max_y = np.max(kp_y[kp_y != 0], initial=-np.Inf) + + # The compact area is too small + if max_x - min_x < self.threshold or max_y - min_y < self.threshold: + return results + + center = ((max_x + min_x) / 2, (max_y + min_y) / 2) + half_width = (max_x - min_x) / 2 * (1 + self.padding) + half_height = (max_y - min_y) / 2 * (1 + self.padding) + + if self.hw_ratio is not None: + half_height = max(self.hw_ratio[0] * half_width, half_height) + half_width = max(1 / self.hw_ratio[1] * half_height, half_width) + + min_x, max_x = center[0] - half_width, center[0] + half_width + min_y, max_y = center[1] - half_height, center[1] + half_height + + # hot update + if not self.allow_imgpad: + min_x, min_y = int(max(0, min_x)), int(max(0, min_y)) + max_x, max_y = int(min(w, max_x)), int(min(h, max_y)) + else: + min_x, min_y = int(min_x), int(min_y) + max_x, max_y = int(max_x), int(max_y) + + kp_x[kp_x != 0] -= min_x + kp_y[kp_y != 0] -= min_y + + new_shape = (max_y - min_y, max_x - min_x) + results['img_shape'] = new_shape + + # the order is x, y, w, h (in [0, 1]), a tuple + crop_quadruple = results.get('crop_quadruple', (0., 0., 1., 1.)) + new_crop_quadruple = (min_x / w, min_y / h, (max_x - min_x) / w, + (max_y - min_y) / h) + crop_quadruple = _combine_quadruple(crop_quadruple, new_crop_quadruple) + results['crop_quadruple'] = crop_quadruple + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(padding={self.padding}, ' + f'threshold={self.threshold}, ' + f'hw_ratio={self.hw_ratio}, ' + f'allow_imgpad={self.allow_imgpad})') + return repr_str + + +@TRANSFORMS.register_module() +class PreNormalize3D(BaseTransform): + """PreNormalize for NTURGB+D 3D keypoints (x, y, z). + + PreNormalize3D first subtracts the coordinates of each joint + from the coordinates of the 'spine' (joint #1 in ntu) of the first person + in the first frame. Subsequently, it performs a 3D rotation to fix the Z + axis parallel to the 3D vector from the 'hip' (joint #0) and the 'spine' + (joint #1) and the X axis toward the 3D vector from the 'right shoulder' + (joint #8) and the 'left shoulder' (joint #4). Codes adapted from + https://github.com/lshiwjx/2s-AGCN. + + Required Keys: + + - keypoint + - total_frames (optional) + + Modified Keys: + + - keypoint + + Added Keys: + + - body_center + + Args: + zaxis (list[int]): The target Z axis for the 3D rotation. + Defaults to ``[0, 1]``. + xaxis (list[int]): The target X axis for the 3D rotation. + Defaults to ``[8, 4]``. + align_spine (bool): Whether to perform a 3D rotation to + align the spine. Defaults to True. + align_shoulder (bool): Whether to perform a 3D rotation + to align the shoulder. Defaults to True. + align_center (bool): Whether to align the body center. + Defaults to True. + """ + + def __init__(self, + zaxis: List[int] = [0, 1], + xaxis: List[int] = [8, 4], + align_spine: bool = True, + align_shoulder: bool = True, + align_center: bool = True) -> None: + self.zaxis = zaxis + self.xaxis = xaxis + self.align_center = align_center + self.align_spine = align_spine + self.align_shoulder = align_shoulder + + def unit_vector(self, vector: np.ndarray) -> np.ndarray: + """Returns the unit vector of the vector.""" + return vector / np.linalg.norm(vector) + + def angle_between(self, v1: np.ndarray, v2: np.ndarray) -> float: + """Returns the angle in radians between vectors 'v1' and 'v2'.""" + if np.abs(v1).sum() < 1e-6 or np.abs(v2).sum() < 1e-6: + return 0 + v1_u = self.unit_vector(v1) + v2_u = self.unit_vector(v2) + return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)) + + def rotation_matrix(self, axis: np.ndarray, theta: float) -> np.ndarray: + """Returns the rotation matrix associated with counterclockwise + rotation about the given axis by theta radians.""" + if np.abs(axis).sum() < 1e-6 or np.abs(theta) < 1e-6: + return np.eye(3) + axis = np.asarray(axis) + axis = axis / np.sqrt(np.dot(axis, axis)) + a = np.cos(theta / 2.0) + b, c, d = -axis * np.sin(theta / 2.0) + aa, bb, cc, dd = a * a, b * b, c * c, d * d + bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d + return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)], + [2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)], + [2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]]) + + def transform(self, results: Dict) -> Dict: + """The transform function of :class:`PreNormalize3D`. + + Args: + results (dict): The result dict. + + Returns: + dict: The result dict. + """ + skeleton = results['keypoint'] + total_frames = results.get('total_frames', skeleton.shape[1]) + + M, T, V, C = skeleton.shape + assert T == total_frames + if skeleton.sum() == 0: + return results + + index0 = [ + i for i in range(T) if not np.all(np.isclose(skeleton[0, i], 0)) + ] + + assert M in [1, 2] + if M == 2: + index1 = [ + i for i in range(T) + if not np.all(np.isclose(skeleton[1, i], 0)) + ] + if len(index0) < len(index1): + skeleton = skeleton[:, np.array(index1)] + skeleton = skeleton[[1, 0]] + else: + skeleton = skeleton[:, np.array(index0)] + else: + skeleton = skeleton[:, np.array(index0)] + + T_new = skeleton.shape[1] + + if self.align_center: + if skeleton.shape[2] == 25: + main_body_center = skeleton[0, 0, 1].copy() + else: + main_body_center = skeleton[0, 0, -1].copy() + mask = ((skeleton != 0).sum(-1) > 0)[..., None] + skeleton = (skeleton - main_body_center) * mask + + if self.align_spine: + joint_bottom = skeleton[0, 0, self.zaxis[0]] + joint_top = skeleton[0, 0, self.zaxis[1]] + axis = np.cross(joint_top - joint_bottom, [0, 0, 1]) + angle = self.angle_between(joint_top - joint_bottom, [0, 0, 1]) + matrix_z = self.rotation_matrix(axis, angle) + skeleton = np.einsum('abcd,kd->abck', skeleton, matrix_z) + + if self.align_shoulder: + joint_rshoulder = skeleton[0, 0, self.xaxis[0]] + joint_lshoulder = skeleton[0, 0, self.xaxis[1]] + axis = np.cross(joint_rshoulder - joint_lshoulder, [1, 0, 0]) + angle = self.angle_between(joint_rshoulder - joint_lshoulder, + [1, 0, 0]) + matrix_x = self.rotation_matrix(axis, angle) + skeleton = np.einsum('abcd,kd->abck', skeleton, matrix_x) + + results['keypoint'] = skeleton + results['total_frames'] = T_new + results['body_center'] = main_body_center + return results + + def __repr__(self) -> str: + repr_str = (f'{self.__class__.__name__}(' + f'zaxis={self.zaxis}, ' + f'xaxis={self.xaxis}, ' + f'align_center={self.align_center}, ' + f'align_spine={self.align_spine}, ' + f'align_shoulder={self.align_shoulder})') + return repr_str + + +@TRANSFORMS.register_module() +class PreNormalize2D(BaseTransform): + """Normalize the range of keypoint values. + + Required Keys: + + - keypoint + - img_shape (optional) + + Modified Keys: + + - keypoint + + Args: + img_shape (tuple[int, int]): The resolution of the original video. + Defaults to ``(1080, 1920)``. + """ + + def __init__(self, img_shape: Tuple[int, int] = (1080, 1920)) -> None: + self.img_shape = img_shape + + def transform(self, results: Dict) -> Dict: + """The transform function of :class:`PreNormalize2D`. + + Args: + results (dict): The result dict. + + Returns: + dict: The result dict. + """ + h, w = results.get('img_shape', self.img_shape) + results['keypoint'][..., 0] = \ + (results['keypoint'][..., 0] - (w / 2)) / (w / 2) + results['keypoint'][..., 1] = \ + (results['keypoint'][..., 1] - (h / 2)) / (h / 2) + return results + + def __repr__(self) -> str: + repr_str = (f'{self.__class__.__name__}(' + f'img_shape={self.img_shape})') + return repr_str + + +@TRANSFORMS.register_module() +class JointToBone(BaseTransform): + """Convert the joint information to bone information. + + Required Keys: + + - keypoint + + Modified Keys: + + - keypoint + + Args: + dataset (str): Define the type of dataset: 'nturgb+d', 'openpose', + 'coco'. Defaults to ``'nturgb+d'``. + target (str): The target key for the bone information. + Defaults to ``'keypoint'``. + """ + + def __init__(self, + dataset: str = 'nturgb+d', + target: str = 'keypoint') -> None: + self.dataset = dataset + self.target = target + if self.dataset not in ['nturgb+d', 'openpose', 'coco']: + raise ValueError( + f'The dataset type {self.dataset} is not supported') + if self.dataset == 'nturgb+d': + self.pairs = [(0, 1), (1, 20), (2, 20), (3, 2), (4, 20), (5, 4), + (6, 5), (7, 6), (8, 20), (9, 8), (10, 9), (11, 10), + (12, 0), (13, 12), (14, 13), (15, 14), (16, 0), + (17, 16), (18, 17), (19, 18), (21, 22), (20, 20), + (22, 7), (23, 24), (24, 11)] + elif self.dataset == 'openpose': + self.pairs = ((0, 0), (1, 0), (2, 1), (3, 2), (4, 3), (5, 1), + (6, 5), (7, 6), (8, 2), (9, 8), (10, 9), (11, 5), + (12, 11), (13, 12), (14, 0), (15, 0), (16, 14), (17, + 15)) + elif self.dataset == 'coco': + self.pairs = ((0, 0), (1, 0), (2, 0), (3, 1), (4, 2), (5, 0), + (6, 0), (7, 5), (8, 6), (9, 7), (10, 8), (11, 0), + (12, 0), (13, 11), (14, 12), (15, 13), (16, 14)) + + def transform(self, results: Dict) -> Dict: + """The transform function of :class:`JointToBone`. + + Args: + results (dict): The result dict. + + Returns: + dict: The result dict. + """ + keypoint = results['keypoint'] + M, T, V, C = keypoint.shape + bone = np.zeros((M, T, V, C), dtype=np.float32) + + assert C in [2, 3] + for v1, v2 in self.pairs: + bone[..., v1, :] = keypoint[..., v1, :] - keypoint[..., v2, :] + if C == 3 and self.dataset in ['openpose', 'coco']: + score = (keypoint[..., v1, 2] + keypoint[..., v2, 2]) / 2 + bone[..., v1, 2] = score + + results[self.target] = bone + return results + + def __repr__(self) -> str: + repr_str = (f'{self.__class__.__name__}(' + f'dataset={self.dataset}, ' + f'target={self.target})') + return repr_str + + +@TRANSFORMS.register_module() +class ToMotion(BaseTransform): + """Convert the joint information or bone information to corresponding + motion information. + + Required Keys: + + - keypoint + + Added Keys: + + - motion + + Args: + dataset (str): Define the type of dataset: 'nturgb+d', 'openpose', + 'coco'. Defaults to ``'nturgb+d'``. + source (str): The source key for the joint or bone information. + Defaults to ``'keypoint'``. + target (str): The target key for the motion information. + Defaults to ``'motion'``. + """ + + def __init__(self, + dataset: str = 'nturgb+d', + source: str = 'keypoint', + target: str = 'motion') -> None: + self.dataset = dataset + self.source = source + self.target = target + + def transform(self, results: Dict) -> Dict: + """The transform function of :class:`ToMotion`. + + Args: + results (dict): The result dict. + + Returns: + dict: The result dict. + """ + data = results[self.source] + M, T, V, C = data.shape + motion = np.zeros_like(data) + + assert C in [2, 3] + motion[:, :T - 1] = np.diff(data, axis=1) + if C == 3 and self.dataset in ['openpose', 'coco']: + score = (data[:, :T - 1, :, 2] + data[:, 1:, :, 2]) / 2 + motion[:, :T - 1, :, 2] = score + + results[self.target] = motion + + return results + + def __repr__(self) -> str: + repr_str = (f'{self.__class__.__name__}(' + f'dataset={self.dataset}, ' + f'source={self.source}, ' + f'target={self.target})') + return repr_str + + +@TRANSFORMS.register_module() +class MergeSkeFeat(BaseTransform): + """Merge multi-stream features. + + Args: + feat_list (list[str]): The list of the keys of features. + Defaults to ``['keypoint']``. + target (str): The target key for the merged multi-stream information. + Defaults to ``'keypoint'``. + axis (int): The axis along which the features will be joined. + Defaults to -1. + """ + + def __init__(self, + feat_list: List[str] = ['keypoint'], + target: str = 'keypoint', + axis: int = -1) -> None: + self.feat_list = feat_list + self.target = target + self.axis = axis + + def transform(self, results: Dict) -> Dict: + """The transform function of :class:`MergeSkeFeat`. + + Args: + results (dict): The result dict. + + Returns: + dict: The result dict. + """ + feats = [] + for name in self.feat_list: + feats.append(results.pop(name)) + feats = np.concatenate(feats, axis=self.axis) + results[self.target] = feats + return results + + def __repr__(self) -> str: + repr_str = (f'{self.__class__.__name__}(' + f'feat_list={self.feat_list}, ' + f'target={self.target}, ' + f'axis={self.axis})') + return repr_str + + +@TRANSFORMS.register_module() +class GenSkeFeat(BaseTransform): + """Unified interface for generating multi-stream skeleton features. + + Required Keys: + + - keypoint + - keypoint_score (optional) + + Args: + dataset (str): Define the type of dataset: 'nturgb+d', 'openpose', + 'coco'. Defaults to ``'nturgb+d'``. + feats (list[str]): The list of the keys of features. + Defaults to ``['j']``. + axis (int): The axis along which the features will be joined. + Defaults to -1. + """ + + def __init__(self, + dataset: str = 'nturgb+d', + feats: List[str] = ['j'], + axis: int = -1) -> None: + self.dataset = dataset + self.feats = feats + self.axis = axis + ops = [] + if 'b' in feats or 'bm' in feats: + ops.append(JointToBone(dataset=dataset, target='b')) + ops.append(KeyMapper(remapping={'keypoint': 'j'})) + if 'jm' in feats: + ops.append(ToMotion(dataset=dataset, source='j', target='jm')) + if 'bm' in feats: + ops.append(ToMotion(dataset=dataset, source='b', target='bm')) + ops.append(MergeSkeFeat(feat_list=feats, axis=axis)) + self.ops = Compose(ops) + + def transform(self, results: Dict) -> Dict: + """The transform function of :class:`GenSkeFeat`. + + Args: + results (dict): The result dict. + + Returns: + dict: The result dict. + """ + if 'keypoint_score' in results and 'keypoint' in results: + assert self.dataset != 'nturgb+d' + assert results['keypoint'].shape[ + -1] == 2, 'Only 2D keypoints have keypoint_score. ' + keypoint = results.pop('keypoint') + keypoint_score = results.pop('keypoint_score') + results['keypoint'] = np.concatenate( + [keypoint, keypoint_score[..., None]], -1) + return self.ops(results) + + def __repr__(self) -> str: + repr_str = (f'{self.__class__.__name__}(' + f'dataset={self.dataset}, ' + f'feats={self.feats}, ' + f'axis={self.axis})') + return repr_str + + +@TRANSFORMS.register_module() +class UniformSampleFrames(BaseTransform): + """Uniformly sample frames from the video. + + To sample an n-frame clip from the video. UniformSampleFrames basically + divide the video into n segments of equal length and randomly sample one + frame from each segment. To make the testing results reproducible, a + random seed is set during testing, to make the sampling results + deterministic. + + Required Keys: + + - total_frames + - start_index (optional) + + Added Keys: + + - frame_inds + - frame_interval + - num_clips + - clip_len + + Args: + clip_len (int): Frames of each sampled output clip. + num_clips (int): Number of clips to be sampled. Defaults to 1. + test_mode (bool): Store True when building test or validation dataset. + Defaults to False. + seed (int): The random seed used during test time. Defaults to 255. + """ + + def __init__(self, + clip_len: int, + num_clips: int = 1, + test_mode: bool = False, + seed: int = 255) -> None: + self.clip_len = clip_len + self.num_clips = num_clips + self.test_mode = test_mode + self.seed = seed + + def _get_train_clips(self, num_frames: int, clip_len: int) -> np.ndarray: + """Uniformly sample indices for training clips. + + Args: + num_frames (int): The number of frames. + clip_len (int): The length of the clip. + + Returns: + np.ndarray: The sampled indices for training clips. + """ + all_inds = [] + for clip_idx in range(self.num_clips): + if num_frames < clip_len: + start = np.random.randint(0, num_frames) + inds = np.arange(start, start + clip_len) + elif clip_len <= num_frames < 2 * clip_len: + basic = np.arange(clip_len) + inds = np.random.choice( + clip_len + 1, num_frames - clip_len, replace=False) + offset = np.zeros(clip_len + 1, dtype=np.int32) + offset[inds] = 1 + offset = np.cumsum(offset) + inds = basic + offset[:-1] + else: + bids = np.array( + [i * num_frames // clip_len for i in range(clip_len + 1)]) + bsize = np.diff(bids) + bst = bids[:clip_len] + offset = np.random.randint(bsize) + inds = bst + offset + + all_inds.append(inds) + + return np.concatenate(all_inds) + + def _get_test_clips(self, num_frames: int, clip_len: int) -> np.ndarray: + """Uniformly sample indices for testing clips. + + Args: + num_frames (int): The number of frames. + clip_len (int): The length of the clip. + + Returns: + np.ndarray: The sampled indices for testing clips. + """ + + np.random.seed(self.seed) + all_inds = [] + for i in range(self.num_clips): + if num_frames < clip_len: + start_ind = i if num_frames < self.num_clips \ + else i * num_frames // self.num_clips + inds = np.arange(start_ind, start_ind + clip_len) + elif clip_len <= num_frames < clip_len * 2: + basic = np.arange(clip_len) + inds = np.random.choice( + clip_len + 1, num_frames - clip_len, replace=False) + offset = np.zeros(clip_len + 1, dtype=np.int64) + offset[inds] = 1 + offset = np.cumsum(offset) + inds = basic + offset[:-1] + else: + bids = np.array( + [i * num_frames // clip_len for i in range(clip_len + 1)]) + bsize = np.diff(bids) + bst = bids[:clip_len] + offset = np.random.randint(bsize) + inds = bst + offset + + all_inds.append(inds) + + return np.concatenate(all_inds) + + def transform(self, results: Dict) -> Dict: + """The transform function of :class:`UniformSampleFrames`. + + Args: + results (dict): The result dict. + + Returns: + dict: The result dict. + """ + num_frames = results['total_frames'] + + if self.test_mode: + inds = self._get_test_clips(num_frames, self.clip_len) + else: + inds = self._get_train_clips(num_frames, self.clip_len) + + inds = np.mod(inds, num_frames) + start_index = results.get('start_index', 0) + inds = inds + start_index + + if 'keypoint' in results: + kp = results['keypoint'] + assert num_frames == kp.shape[1] + num_person = kp.shape[0] + num_persons = [num_person] * num_frames + for i in range(num_frames): + j = num_person - 1 + while j >= 0 and np.all(np.abs(kp[j, i]) < 1e-5): + j -= 1 + num_persons[i] = j + 1 + transitional = [False] * num_frames + for i in range(1, num_frames - 1): + if num_persons[i] != num_persons[i - 1]: + transitional[i] = transitional[i - 1] = True + if num_persons[i] != num_persons[i + 1]: + transitional[i] = transitional[i + 1] = True + inds_int = inds.astype(np.int) + coeff = np.array([transitional[i] for i in inds_int]) + inds = (coeff * inds_int + (1 - coeff) * inds).astype(np.float32) + + results['frame_inds'] = inds.astype(np.int32) + results['clip_len'] = self.clip_len + results['frame_interval'] = None + results['num_clips'] = self.num_clips + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'clip_len={self.clip_len}, ' + f'num_clips={self.num_clips}, ' + f'test_mode={self.test_mode}, ' + f'seed={self.seed})') + return repr_str + + +@TRANSFORMS.register_module() +class PadTo(BaseTransform): + """Sample frames from the video. + + To sample an n-frame clip from the video, PadTo samples + the frames from zero index, and loop or zero pad the frames + if the length of video frames is less than the value of `length`. + + Required Keys: + + - keypoint + - total_frames + - start_index (optional) + + Modified Keys: + + - keypoint + - total_frames + + Args: + length (int): The maximum length of the sampled output clip. + mode (str): The padding mode. Defaults to ``'loop'``. + """ + + def __init__(self, length: int, mode: str = 'loop') -> None: + self.length = length + assert mode in ['loop', 'zero'] + self.mode = mode + + def transform(self, results: Dict) -> Dict: + """The transform function of :class:`PadTo`. + + Args: + results (dict): The result dict. + + Returns: + dict: The result dict. + """ + total_frames = results['total_frames'] + assert total_frames <= self.length + start_index = results.get('start_index', 0) + inds = np.arange(start_index, start_index + self.length) + inds = np.mod(inds, total_frames) + + keypoint = results['keypoint'][:, inds].copy() + if self.mode == 'zero': + keypoint[:, total_frames:] = 0 + + results['keypoint'] = keypoint + results['total_frames'] = self.length + return results + + def __repr__(self) -> str: + repr_str = (f'{self.__class__.__name__}(' + f'length={self.length}, ' + f'mode={self.mode})') + return repr_str + + +@TRANSFORMS.register_module() +class PoseDecode(BaseTransform): + """Load and decode pose with given indices. + + Required Keys: + + - keypoint + - total_frames (optional) + - frame_inds (optional) + - offset (optional) + - keypoint_score (optional) + + Modified Keys: + + - keypoint + - keypoint_score (optional) + """ + + def transform(self, results: Dict) -> Dict: + """The transform function of :class:`PoseDecode`. + + Args: + results (dict): The result dict. + + Returns: + dict: The result dict. + """ + if 'total_frames' not in results: + results['total_frames'] = results['keypoint'].shape[1] + + if 'frame_inds' not in results: + results['frame_inds'] = np.arange(results['total_frames']) + + if results['frame_inds'].ndim != 1: + results['frame_inds'] = np.squeeze(results['frame_inds']) + + offset = results.get('offset', 0) + frame_inds = results['frame_inds'] + offset + + results['keypoint'] = results['keypoint'][:, frame_inds].astype( + np.float32) + + if 'keypoint_score' in results: + kpscore = results['keypoint_score'] + results['keypoint_score'] = kpscore[:, + frame_inds].astype(np.float32) + + return results + + def __repr__(self) -> str: + repr_str = f'{self.__class__.__name__}()' + return repr_str diff --git a/mmaction/datasets/transforms/processing.py b/mmaction/datasets/transforms/processing.py index d34bc93327..13637dcf38 100644 --- a/mmaction/datasets/transforms/processing.py +++ b/mmaction/datasets/transforms/processing.py @@ -57,115 +57,6 @@ def _init_lazy_if_proper(results, lazy): assert 'lazy' not in results, 'Use Fuse after lazy operations' -@TRANSFORMS.register_module() -class PoseCompact(BaseTransform): - """Convert the coordinates of keypoints to make it more compact. - Specifically, it first find a tight bounding box that surrounds all joints - in each frame, then we expand the tight box by a given padding ratio. For - example, if 'padding == 0.25', then the expanded box has unchanged center, - and 1.25x width and height. - - Required keys in results are "img_shape", "keypoint", add or modified keys - are "img_shape", "keypoint", "crop_quadruple". - - Args: - padding (float): The padding size. Default: 0.25. - threshold (int): The threshold for the tight bounding box. If the width - or height of the tight bounding box is smaller than the threshold, - we do not perform the compact operation. Default: 10. - hw_ratio (float | tuple[float] | None): The hw_ratio of the expanded - box. Float indicates the specific ratio and tuple indicates a - ratio range. If set as None, it means there is no requirement on - hw_ratio. Default: None. - allow_imgpad (bool): Whether to allow expanding the box outside the - image to meet the hw_ratio requirement. Default: True. - - Returns: - type: Description of returned object. - """ - - def __init__(self, - padding=0.25, - threshold=10, - hw_ratio=None, - allow_imgpad=True): - - self.padding = padding - self.threshold = threshold - if hw_ratio is not None: - hw_ratio = _pair(hw_ratio) - - self.hw_ratio = hw_ratio - - self.allow_imgpad = allow_imgpad - assert self.padding >= 0 - - def transform(self, results): - """Convert the coordinates of keypoints to make it more compact. - - Args: - results (dict): The resulting dict to be modified and passed - to the next transform in pipeline. - """ - img_shape = results['img_shape'] - h, w = img_shape - kp = results['keypoint'] - - # Make NaN zero - kp[np.isnan(kp)] = 0. - kp_x = kp[..., 0] - kp_y = kp[..., 1] - - min_x = np.min(kp_x[kp_x != 0], initial=np.Inf) - min_y = np.min(kp_y[kp_y != 0], initial=np.Inf) - max_x = np.max(kp_x[kp_x != 0], initial=-np.Inf) - max_y = np.max(kp_y[kp_y != 0], initial=-np.Inf) - - # The compact area is too small - if max_x - min_x < self.threshold or max_y - min_y < self.threshold: - return results - - center = ((max_x + min_x) / 2, (max_y + min_y) / 2) - half_width = (max_x - min_x) / 2 * (1 + self.padding) - half_height = (max_y - min_y) / 2 * (1 + self.padding) - - if self.hw_ratio is not None: - half_height = max(self.hw_ratio[0] * half_width, half_height) - half_width = max(1 / self.hw_ratio[1] * half_height, half_width) - - min_x, max_x = center[0] - half_width, center[0] + half_width - min_y, max_y = center[1] - half_height, center[1] + half_height - - # hot update - if not self.allow_imgpad: - min_x, min_y = int(max(0, min_x)), int(max(0, min_y)) - max_x, max_y = int(min(w, max_x)), int(min(h, max_y)) - else: - min_x, min_y = int(min_x), int(min_y) - max_x, max_y = int(max_x), int(max_y) - - kp_x[kp_x != 0] -= min_x - kp_y[kp_y != 0] -= min_y - - new_shape = (max_y - min_y, max_x - min_x) - results['img_shape'] = new_shape - - # the order is x, y, w, h (in [0, 1]), a tuple - crop_quadruple = results.get('crop_quadruple', (0., 0., 1., 1.)) - new_crop_quadruple = (min_x / w, min_y / h, (max_x - min_x) / w, - (max_y - min_y) / h) - crop_quadruple = _combine_quadruple(crop_quadruple, new_crop_quadruple) - results['crop_quadruple'] = crop_quadruple - return results - - def __repr__(self): - repr_str = (f'{self.__class__.__name__}(padding={self.padding}, ' - f'threshold={self.threshold}, ' - f'hw_ratio={self.hw_ratio}, ' - f'allow_imgpad={self.allow_imgpad})') - return repr_str - - @TRANSFORMS.register_module() class Fuse(BaseTransform): """Fuse lazy operations. diff --git a/mmaction/models/backbones/stgcn.py b/mmaction/models/backbones/stgcn.py index 8aae64676c..9fb2469674 100644 --- a/mmaction/models/backbones/stgcn.py +++ b/mmaction/models/backbones/stgcn.py @@ -1,286 +1,231 @@ # Copyright (c) OpenMMLab. All rights reserved. -from typing import Tuple +import copy as cp +from typing import Dict, List, Optional, Union import torch import torch.nn as nn -from mmengine.logging import MMLogger -from mmengine.model.weight_init import constant_init, kaiming_init, normal_init -from mmengine.runner import load_checkpoint -from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm +from mmengine.model import BaseModule, ModuleList from mmaction.registry import MODELS -from ..utils import Graph +from ..utils import Graph, unit_gcn, unit_tcn +EPS = 1e-4 -class STGCNBlock(nn.Module): - """Applies a spatial temporal graph convolution over an input graph - sequence. + +class STGCNBlock(BaseModule): + """The basic block of ST-GCN. Args: - in_channels (int): Number of channels in the input sequence data. - out_channels (int): Number of channels produced by the convolution. - kernel_size (Tuple[int]): Size of the temporal convolving kernel and - graph convolving kernel. - stride (int, optional): Stride of the temporal convolution. - Default: 1. - dropout (float, optional): Dropout rate of the final output. - Default: 0. - residual (bool, optional): If True, applies a residual mechanism. - Default: True. + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + A (torch.Tensor): The adjacency matrix defined in the graph + with shape of `(num_subsets, num_nodes, num_nodes)`. + stride (int): Stride of the temporal convolution. Defaults to 1. + residual (bool): Whether to use residual connection. Defaults to True. """ def __init__(self, in_channels: int, out_channels: int, - kernel_size: Tuple[int], + A: torch.Tensor, stride: int = 1, - dropout: float = 0, - residual: bool = True) -> None: + residual: bool = True, + **kwargs) -> None: super().__init__() - assert len(kernel_size) == 2 - assert kernel_size[0] % 2 == 1 - padding = ((kernel_size[0] - 1) // 2, 0) + gcn_kwargs = {k[4:]: v for k, v in kwargs.items() if k[:4] == 'gcn_'} + tcn_kwargs = {k[4:]: v for k, v in kwargs.items() if k[:4] == 'tcn_'} + kwargs = { + k: v + for k, v in kwargs.items() if k[:4] not in ['gcn_', 'tcn_'] + } + assert len(kwargs) == 0, f'Invalid arguments: {kwargs}' + + tcn_type = tcn_kwargs.pop('type', 'unit_tcn') + assert tcn_type in ['unit_tcn'] + gcn_type = gcn_kwargs.pop('type', 'unit_gcn') + assert gcn_type in ['unit_gcn'] - self.gcn = ConvTemporalGraphical(in_channels, out_channels, - kernel_size[1]) - self.tcn = nn.Sequential( - nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), - nn.Conv2d(out_channels, out_channels, (kernel_size[0], 1), - (stride, 1), padding), nn.BatchNorm2d(out_channels), - nn.Dropout(dropout, inplace=True)) + self.gcn = unit_gcn(in_channels, out_channels, A, **gcn_kwargs) + + if tcn_type == 'unit_tcn': + self.tcn = unit_tcn( + out_channels, out_channels, 9, stride=stride, **tcn_kwargs) + + self.relu = nn.ReLU() if not residual: self.residual = lambda x: 0 elif (in_channels == out_channels) and (stride == 1): self.residual = lambda x: x else: - self.residual = nn.Sequential( - nn.Conv2d( - in_channels, - out_channels, - kernel_size=1, - stride=(stride, 1)), nn.BatchNorm2d(out_channels)) + self.residual = unit_tcn( + in_channels, out_channels, kernel_size=1, stride=stride) - self.relu = nn.ReLU(inplace=True) - - def forward(self, x: torch.Tensor, adj_mat: torch.Tensor) -> tuple: - """Defines the computation performed at every call. - - Args: - x (torch.Tensor): Input graph sequence in - :math:`(N, in_channels, T_{in}, V)` format. - adj_mat (torch.Tensor): Input graph adjacency matrix in - :math:`(K, V, V)` format. - - Returns: - tuple: A tuple of output graph sequence and graph adjacency matrix. - - - x (torch.Tensor): Output graph sequence in - :math:`(N, out_channels, T_{out}, V)` format. - - adj_mat (torch.Tensor): graph adjacency matrix for - output data in :math:`(K, V, V)` format. - - where - :math:`N` is the batch size, - :math:`K` is the spatial kernel size, as - :math:`K == kernel_size[1]`, - :math:`T_{in}/T_{out}` is a length of input/output sequence, - :math:`V` is the number of graph nodes. - """ + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Defines the computation performed at every call.""" res = self.residual(x) - x, adj_mat = self.gcn(x, adj_mat) - x = self.relu(self.tcn(x) + res) - - return x, adj_mat - - -class ConvTemporalGraphical(nn.Module): - """The basic module for applying a graph convolution. - - Args: - in_channels (int): Number of channels in the input sequence data. - out_channels (int): Number of channels produced by the convolution. - kernel_size (int): Size of the graph convolution kernel. - t_kernel_size (int): Size of the temporal convolution kernel. - t_stride (int, optional): Stride of the temporal convolution. - Default: 1. - t_padding (int, optional): Temporal zero-padding added to both sides - of the input. Default: 0. - t_dilation (int, optional): Spacing between temporal kernel elements. - Default: 1. - bias (bool, optional): If True, adds a learnable bias to the - output. Default: True. - """ - - def __init__(self, - in_channels: int, - out_channels: int, - kernel_size: int, - t_kernel_size: int = 1, - t_stride: int = 1, - t_padding: int = 0, - t_dilation: int = 1, - bias: bool = True) -> None: - super().__init__() - - self.kernel_size = kernel_size - self.conv = nn.Conv2d( - in_channels, - out_channels * kernel_size, - kernel_size=(t_kernel_size, 1), - padding=(t_padding, 0), - stride=(t_stride, 1), - dilation=(t_dilation, 1), - bias=bias) - - def forward(self, x: torch.Tensor, adj_mat: torch.Tensor) -> tuple: - """Defines the computation performed at every call. - - Args: - x (torch.Tensor): Input graph sequence in - :math:`(N, in_channels, T_{in}, V)` format - adj_mat (torch.Tensor): Input graph adjacency matrix in - :math:`(K, V, V)` format. - - Returns: - tuple: A tuple of output graph sequence and graph adjacency matrix. - - - x (Tensor): Output graph sequence in - :math:`(N, out_channels, T_{out}, V)` format. - - adj_mat (Tensor): graph adjacency matrix for output data in - :math:`(K, V, V)` format. - - where - :math:`N` is a batch size, - :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1] - `, - :math:`T_{in}/T_{out}` is a length of input/output sequence, - :math:`V` is the number of graph nodes. - """ - assert adj_mat.size(0) == self.kernel_size - - x = self.conv(x) - - n, kc, t, v = x.size() - x = x.view(n, self.kernel_size, kc // self.kernel_size, t, v) - x = torch.einsum('nkctv,kvw->nctw', (x, adj_mat)).contiguous() - - return x, adj_mat + x = self.tcn(self.gcn(x)) + res + return self.relu(x) @MODELS.register_module() -class STGCN(nn.Module): - """Backbone of spatial temporal graph convolutional networks. +class STGCN(BaseModule): + """ STGCN + A PyTorch implement of : `Spatial Temporal Graph Convolutional + Networks for Skeleton-Based Action Recognition` - + https://arxiv.org/abs/1801.07455 Args: - in_channels (int): Number of channels of the input data. - graph_cfg (dict): The arguments for building the graph. - edge_importance_weighting (bool): If ``True``, add a learnable - importance weighting to the edges of the graph. Defaults to True. - data_bn (bool): If ``True``, adds data normalization to the inputs. - Defaults to True. - pretrained (str, optional): Path of pretrained model. - **kwargs: Keyword parameters passed to graph convolution units. - - Shape: - - Input: :math:`(N, in_channels, T_{in}, V_{in}, M_{in})` - - Output: :math:`(N, num_class)` where - :math:`N` is a batch size, - :math:`T_{in}` is a length of input sequence, - :math:`V_{in}` is the number of graph nodes, - :math:`M_{in}` is the number of instance in a frame. + graph_cfg (dict): Config for building the graph. + in_channels (int): Number of input channels. Defaults to 3. + base_channels (int): Number of base channels. Defaults to 64. + data_bn_type (str): Type of the data bn layer. Defaults to ``'VC'``. + ch_ratio (int): Inflation ratio of the number of channels. + Defaults to 2. + num_person (int): Maximum number of people. Only used when + data_bn_type == 'MVC'. Defaults to 2. + num_stages (int): Total number of stages. Defaults to 10. + inflate_stages (list[int]): Stages to inflate the number of channels. + Defaults to ``[5, 8]``. + down_stages (list[int]): Stages to perform downsampling in + the time dimension. Defaults to ``[5, 8]``. + stage_cfgs (dict): Extra config dict for each stage. + Defaults to ``dict()``. + init_cfg (dict or list[dict], optional): Config to control + the initialization. Defaults to None. + + Examples: + >>> import torch + >>> from mmaction.models import STGCN + >>> + >>> mode = 'stgcn_spatial' + >>> batch_size, num_person, num_frames = 2, 2, 150 + >>> + >>> # openpose-18 layout + >>> num_joints = 18 + >>> model = STGCN(graph_cfg=dict(layout='openpose', mode=mode)) + >>> model.init_weights() + >>> inputs = torch.randn(batch_size, num_person, + >>> num_frames, num_joints, 3) + >>> output = model(inputs) + >>> print(output.shape) + >>> + >>> # nturgb+d layout + >>> num_joints = 25 + >>> model = STGCN(graph_cfg=dict(layout='nturgb+d', mode=mode)) + >>> model.init_weights() + >>> inputs = torch.randn(batch_size, num_person, + >>> num_frames, num_joints, 3) + >>> output = model(inputs) + >>> print(output.shape) + >>> + >>> # coco layout + >>> num_joints = 17 + >>> model = STGCN(graph_cfg=dict(layout='coco', mode=mode)) + >>> model.init_weights() + >>> inputs = torch.randn(batch_size, num_person, + >>> num_frames, num_joints, 3) + >>> output = model(inputs) + >>> print(output.shape) + >>> + >>> # custom settings + >>> # add additional residual connection for the first four gcns + >>> stage_cfgs = {'gcn_with_res': [True] * 4 + [False] * 6} + >>> model = STGCN(graph_cfg=dict(layout='coco', mode=mode), + >>> num_stages=10, stage_cfgs=stage_cfgs) + >>> model.init_weights() + >>> output = model(inputs) + >>> print(output.shape) + torch.Size([2, 2, 256, 38, 18]) + torch.Size([2, 2, 256, 38, 25]) + torch.Size([2, 2, 256, 38, 17]) + torch.Size([2, 2, 256, 38, 17]) """ def __init__(self, - in_channels: int, - graph_cfg: dict, - edge_importance_weighting: bool = True, - data_bn: bool = True, - pretrained: str = None, - **kwargs) -> None: - super().__init__() + graph_cfg: Dict, + in_channels: int = 3, + base_channels: int = 64, + data_bn_type: str = 'VC', + ch_ratio: int = 2, + num_person: int = 2, + num_stages: int = 10, + inflate_stages: List[int] = [5, 8], + down_stages: List[int] = [5, 8], + stage_cfgs: Dict = dict(), + init_cfg: Optional[Union[Dict, List[Dict]]] = None) -> None: + super().__init__(init_cfg=init_cfg) - # load graph self.graph = Graph(**graph_cfg) A = torch.tensor( self.graph.A, dtype=torch.float32, requires_grad=False) - self.register_buffer('A', A) + self.data_bn_type = data_bn_type - # build networks - spatial_kernel_size = A.size(0) - temporal_kernel_size = 9 - kernel_size = (temporal_kernel_size, spatial_kernel_size) - self.data_bn = nn.BatchNorm1d(in_channels * - A.size(1)) if data_bn else nn.Identity() - - kwargs0 = {k: v for k, v in kwargs.items() if k != 'dropout'} - self.st_gcn_networks = nn.ModuleList(( - STGCNBlock( - in_channels, 64, kernel_size, 1, residual=False, **kwargs0), - STGCNBlock(64, 64, kernel_size, 1, **kwargs), - STGCNBlock(64, 64, kernel_size, 1, **kwargs), - STGCNBlock(64, 64, kernel_size, 1, **kwargs), - STGCNBlock(64, 128, kernel_size, 2, **kwargs), - STGCNBlock(128, 128, kernel_size, 1, **kwargs), - STGCNBlock(128, 128, kernel_size, 1, **kwargs), - STGCNBlock(128, 256, kernel_size, 2, **kwargs), - STGCNBlock(256, 256, kernel_size, 1, **kwargs), - STGCNBlock(256, 256, kernel_size, 1, **kwargs), - )) - - # initialize parameters for edge importance weighting - if edge_importance_weighting: - self.edge_importance = nn.ParameterList([ - nn.Parameter(torch.ones(self.A.size())) - for i in self.st_gcn_networks - ]) + if data_bn_type == 'MVC': + self.data_bn = nn.BatchNorm1d(num_person * in_channels * A.size(1)) + elif data_bn_type == 'VC': + self.data_bn = nn.BatchNorm1d(in_channels * A.size(1)) else: - self.edge_importance = [1 for _ in self.st_gcn_networks] - - self.pretrained = pretrained - - def init_weights(self) -> None: - """Initiate the parameters either from existing checkpoint or from - scratch.""" - if isinstance(self.pretrained, str): - logger = MMLogger.get_current_instance() - logger.info(f'load model from: {self.pretrained}') - - load_checkpoint(self, self.pretrained, strict=False, logger=logger) - - elif self.pretrained is None: - for m in self.modules(): - if isinstance(m, nn.Conv2d): - kaiming_init(m) - elif isinstance(m, nn.Linear): - normal_init(m) - elif isinstance(m, _BatchNorm): - constant_init(m, 1) - else: - raise TypeError('pretrained must be a str or None') + self.data_bn = nn.Identity() + + lw_kwargs = [cp.deepcopy(stage_cfgs) for i in range(num_stages)] + for k, v in stage_cfgs.items(): + if isinstance(v, (tuple, list)) and len(v) == num_stages: + for i in range(num_stages): + lw_kwargs[i][k] = v[i] + lw_kwargs[0].pop('tcn_dropout', None) + + self.in_channels = in_channels + self.base_channels = base_channels + self.ch_ratio = ch_ratio + self.inflate_stages = inflate_stages + self.down_stages = down_stages + + modules = [] + if self.in_channels != self.base_channels: + modules = [ + STGCNBlock( + in_channels, + base_channels, + A.clone(), + 1, + residual=False, + **lw_kwargs[0]) + ] + + inflate_times = 0 + for i in range(2, num_stages + 1): + stride = 1 + (i in down_stages) + in_channels = base_channels + if i in inflate_stages: + inflate_times += 1 + out_channels = int(self.base_channels * + self.ch_ratio**inflate_times + EPS) + base_channels = out_channels + modules.append( + STGCNBlock(in_channels, out_channels, A.clone(), stride, + **lw_kwargs[i - 1])) + + if self.in_channels == self.base_channels: + num_stages -= 1 + + self.num_stages = num_stages + self.gcn = ModuleList(modules) def forward(self, x: torch.Tensor) -> torch.Tensor: - """Defines the computation performed at every call. - - Args: - x (torch.Tensor): The input data. - - Returns: - torch.Tensor: The output of the module. - """ - # data normalization - x = x.float() - n, c, t, v, m = x.size() # bs 3 300 25(17) 2 - x = x.permute(0, 4, 3, 1, 2).contiguous() # N M V C T - x = x.view(n * m, v * c, t) - x = self.data_bn(x) - x = x.view(n, m, v, c, t) + """Defines the computation performed at every call.""" + N, M, T, V, C = x.size() x = x.permute(0, 1, 3, 4, 2).contiguous() - x = x.view(n * m, c, t, v) # bsx2 3 300 25(17) + if self.data_bn_type == 'MVC': + x = self.data_bn(x.view(N, M * V * C, T)) + else: + x = self.data_bn(x.view(N * M, V * C, T)) + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, + 2).contiguous().view(N * M, C, T, V) - # forward - for gcn, importance in zip(self.st_gcn_networks, self.edge_importance): - x, _ = gcn(x, self.A * importance) + for i in range(self.num_stages): + x = self.gcn[i](x) + x = x.reshape((N, M) + x.shape[1:]) return x diff --git a/mmaction/models/heads/__init__.py b/mmaction/models/heads/__init__.py index c803fc8561..9890d5aa5a 100644 --- a/mmaction/models/heads/__init__.py +++ b/mmaction/models/heads/__init__.py @@ -1,9 +1,9 @@ # Copyright (c) OpenMMLab. All rights reserved. from .base import BaseHead +from .gcn_head import GCNHead from .i3d_head import I3DHead from .mvit_head import MViTHead from .slowfast_head import SlowFastHead -from .stgcn_head import STGCNHead from .timesformer_head import TimeSformerHead from .tpn_head import TPNHead from .trn_head import TRNHead @@ -14,6 +14,6 @@ __all__ = [ 'TSNHead', 'I3DHead', 'BaseHead', 'TSMHead', 'SlowFastHead', 'TPNHead', - 'X3DHead', 'TRNHead', 'TimeSformerHead', 'STGCNHead', 'TSNAudioHead', + 'X3DHead', 'TRNHead', 'TimeSformerHead', 'GCNHead', 'TSNAudioHead', 'MViTHead' ] diff --git a/mmaction/models/heads/base.py b/mmaction/models/heads/base.py index 9c505801d7..0f3e0785f0 100644 --- a/mmaction/models/heads/base.py +++ b/mmaction/models/heads/base.py @@ -80,12 +80,6 @@ def __init__(self, assert _topk > 0, 'Top-k should be larger than 0' self.topk = topk - @abstractmethod - def init_weights(self) -> None: - """Initiate the parameters either from existing checkpoint or from - scratch.""" - raise NotImplementedError - @abstractmethod def forward(self, x, **kwargs) -> Tensor: """Defines the computation performed at every call.""" diff --git a/mmaction/models/heads/gcn_head.py b/mmaction/models/heads/gcn_head.py new file mode 100644 index 0000000000..d43af575df --- /dev/null +++ b/mmaction/models/heads/gcn_head.py @@ -0,0 +1,71 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Union + +import torch +import torch.nn as nn + +from mmaction.registry import MODELS +from .base import BaseHead + + +@MODELS.register_module() +class GCNHead(BaseHead): + """The classification head for GCN. + + Args: + num_classes (int): Number of classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict): Config for building loss. + Defaults to ``dict(type='CrossEntropyLoss')``. + dropout (float): Probability of dropout layer. Defaults to 0. + init_cfg (dict or list[dict]): Config to control the initialization. + Defaults to ``dict(type='Normal', layer='Linear', std=0.01)``. + """ + + def __init__(self, + num_classes: int, + in_channels: int, + loss_cls: Dict = dict(type='CrossEntropyLoss'), + dropout: float = 0., + average_clips: str = 'prob', + init_cfg: Union[Dict, List[Dict]] = dict( + type='Normal', layer='Linear', std=0.01), + **kwargs) -> None: + super().__init__( + num_classes, + in_channels, + loss_cls=loss_cls, + average_clips=average_clips, + init_cfg=init_cfg, + **kwargs) + self.dropout_ratio = dropout + if self.dropout_ratio != 0: + self.dropout = nn.Dropout(p=self.dropout_ratio) + else: + self.dropout = None + + self.pool = nn.AdaptiveAvgPool2d(1) + self.fc = nn.Linear(self.in_channels, self.num_classes) + + def forward(self, x: torch.Tensor, **kwargs) -> torch.Tensor: + """Forward features from the upstream network. + + Args: + x (torch.Tensor): Features from the upstream network. + + Returns: + torch.Tensor: Classification scores with shape (B, num_classes). + """ + + N, M, C, T, V = x.shape + x = x.view(N * M, C, T, V) + x = self.pool(x) + x = x.view(N, M, C) + x = x.mean(dim=1) + assert x.shape[1] == self.in_channels + + if self.dropout is not None: + x = self.dropout(x) + + cls_scores = self.fc(x) + return cls_scores diff --git a/mmaction/models/heads/stgcn_head.py b/mmaction/models/heads/stgcn_head.py deleted file mode 100644 index 428f9461d3..0000000000 --- a/mmaction/models/heads/stgcn_head.py +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -from mmengine.model.weight_init import normal_init -from torch import Tensor - -from mmaction.registry import MODELS -from mmaction.utils import ConfigType -from .base import BaseHead - - -@MODELS.register_module() -class STGCNHead(BaseHead): - """The classification head for STGCN. - - Args: - num_classes (int): Number of classes to be classified. - in_channels (int): Number of channels in input feature. - loss_cls (dict or ConfigDict): Config for building loss. - Default: dict(type='CrossEntropyLoss') - spatial_type (str): Pooling type in spatial dimension. Default: 'avg'. - num_person (int): Number of person. Default: 2. - init_std (float): Std value for Initiation. Default: 0.01. - """ - - def __init__(self, - num_classes: int, - in_channels: int, - loss_cls: ConfigType = dict(type='CrossEntropyLoss'), - spatial_type: str = 'avg', - num_person: int = 2, - init_std: float = 0.01, - **kwargs) -> None: - super().__init__(num_classes, in_channels, loss_cls, **kwargs) - - self.spatial_type = spatial_type - self.num_person = num_person - self.init_std = init_std - - self.pool = None - if self.spatial_type == 'avg': - self.pool = nn.AdaptiveAvgPool2d((1, 1)) - elif self.spatial_type == 'max': - self.pool = nn.AdaptiveMaxPool2d((1, 1)) - else: - raise NotImplementedError - - self.fc = nn.Conv2d(self.in_channels, self.num_classes, kernel_size=1) - - def init_weights(self) -> None: - """Initialize the model network weights.""" - normal_init(self.fc, std=self.init_std) - - def forward(self, x: Tensor, **kwargs) -> Tensor: - """Forward features from the upstream network. - - Args: - x (Tensor): Features from the upstream network. - - Returns: - Tensor: Classification scores with shape(k, num_classes). - """ - - # global pooling - assert self.pool is not None, 'pool must be implemented.' - x = self.pool(x) - x = x.view(x.shape[0] // self.num_person, self.num_person, -1, 1, - 1).mean(dim=1) - - # prediction - x = self.fc(x) - x = x.view(x.shape[0], -1) - - return x diff --git a/mmaction/models/recognizers/recognizer_gcn.py b/mmaction/models/recognizers/recognizer_gcn.py index fa9866cdea..041aa5ea8a 100644 --- a/mmaction/models/recognizers/recognizer_gcn.py +++ b/mmaction/models/recognizers/recognizer_gcn.py @@ -1,5 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. -from torch import Tensor +from typing import Tuple + +import torch from mmaction.registry import MODELS from .base import BaseRecognizer @@ -7,27 +9,32 @@ @MODELS.register_module() class RecognizerGCN(BaseRecognizer): - """GCN recognizer model framework.""" + """GCN-based recognizer for skeleton-based action recognition.""" def extract_feat(self, - inputs: Tensor, + inputs: torch.Tensor, stage: str = 'backbone', - **kwargs) -> tuple: - """Extract features of different stages. + **kwargs) -> Tuple: + """Extract features at the given stage. Args: - inputs (Tensor): The input data. - stage (str): Which stage to output the feature. - Defaults to ``backbone``. + inputs (torch.Tensor): The input skeleton with shape of + `(B, num_clips, num_person, clip_len, num_joints, 3 or 2)`. + stage (str): The stage to output the features. + Defaults to ``'backbone'``. Returns: - Tensor: The extracted features. - dict: A dict recording the kwargs for downstream - pipeline. This will be an empty dict in GCN recognizer. + tuple: THe extracted features and a dict recording the kwargs + for downstream pipeline, which is an empty dict for the + GCN-based recognizer. """ # Record the kwargs required by `loss` and `predict` loss_predict_kwargs = dict() + + bs, nc = inputs.shape[:2] + inputs = inputs.reshape((bs * nc, ) + inputs.shape[2:]) + x = self.backbone(inputs) if stage == 'backbone': diff --git a/mmaction/models/utils/__init__.py b/mmaction/models/utils/__init__.py index 865ccbea99..0e8537b4bc 100644 --- a/mmaction/models/utils/__init__.py +++ b/mmaction/models/utils/__init__.py @@ -1,6 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. from .blending_utils import (BaseMiniBatchBlending, CutmixBlending, MixupBlending, RandomBatchAugment) +from .gcn_utils import * # noqa: F401,F403 from .graph import Graph __all__ = [ diff --git a/mmaction/models/utils/gcn_utils.py b/mmaction/models/utils/gcn_utils.py new file mode 100644 index 0000000000..1e83d03da3 --- /dev/null +++ b/mmaction/models/utils/gcn_utils.py @@ -0,0 +1,162 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, List, Optional, Union + +import torch +import torch.nn as nn +from mmcv.cnn import build_activation_layer, build_norm_layer +from mmengine.model import BaseModule, Sequential + + +class unit_gcn(BaseModule): + """The basic unit of graph convolutional network. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + A (torch.Tensor): The adjacency matrix defined in the graph + with shape of `(num_subsets, num_nodes, num_nodes)`. + adaptive (str): The strategy for adapting the weights of the + adjacency matrix. Defaults to ``'importance'``. + conv_pos (str): The position of the 1x1 2D conv. + Defaults to ``'pre'``. + with_res (bool): Whether to use residual connection. + Defaults to False. + norm (str): The name of norm layer. Defaults to ``'BN'``. + act (str): The name of activation layer. Defaults to ``'Relu'``. + init_cfg (dict or list[dict], optional): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + A: torch.Tensor, + adaptive: str = 'importance', + conv_pos: str = 'pre', + with_res: bool = False, + norm: str = 'BN', + act: str = 'ReLU', + init_cfg: Optional[Union[Dict, List[Dict]]] = None) -> None: + super().__init__(init_cfg=init_cfg) + self.in_channels = in_channels + self.out_channels = out_channels + self.num_subsets = A.size(0) + + assert adaptive in [None, 'init', 'offset', 'importance'] + self.adaptive = adaptive + assert conv_pos in ['pre', 'post'] + self.conv_pos = conv_pos + self.with_res = with_res + + self.norm_cfg = norm if isinstance(norm, dict) else dict(type=norm) + self.act_cfg = act if isinstance(act, dict) else dict(type=act) + self.bn = build_norm_layer(self.norm_cfg, out_channels)[1] + self.act = build_activation_layer(self.act_cfg) + + if self.adaptive == 'init': + self.A = nn.Parameter(A.clone()) + else: + self.register_buffer('A', A) + + if self.adaptive in ['offset', 'importance']: + self.PA = nn.Parameter(A.clone()) + if self.adaptive == 'offset': + nn.init.uniform_(self.PA, -1e-6, 1e-6) + elif self.adaptive == 'importance': + nn.init.constant_(self.PA, 1) + + if self.conv_pos == 'pre': + self.conv = nn.Conv2d(in_channels, out_channels * A.size(0), 1) + elif self.conv_pos == 'post': + self.conv = nn.Conv2d(A.size(0) * in_channels, out_channels, 1) + + if self.with_res: + if in_channels != out_channels: + self.down = Sequential( + nn.Conv2d(in_channels, out_channels, 1), + build_norm_layer(self.norm_cfg, out_channels)[1]) + else: + self.down = lambda x: x + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Defines the computation performed at every call.""" + n, c, t, v = x.shape + res = self.down(x) if self.with_res else 0 + + A_switch = {None: self.A, 'init': self.A} + if hasattr(self, 'PA'): + A_switch.update({ + 'offset': self.A + self.PA, + 'importance': self.A * self.PA + }) + A = A_switch[self.adaptive] + + if self.conv_pos == 'pre': + x = self.conv(x) + x = x.view(n, self.num_subsets, -1, t, v) + x = torch.einsum('nkctv,kvw->nctw', (x, A)).contiguous() + elif self.conv_pos == 'post': + x = torch.einsum('nctv,kvw->nkctw', (x, A)).contiguous() + x = x.view(n, -1, t, v) + x = self.conv(x) + + return self.act(self.bn(x) + res) + + +class unit_tcn(BaseModule): + """The basic unit of temporal convolutional network. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + kernel_size (int): Size of the temporal convolution kernel. + Defaults to 9. + stride (int): Stride of the temporal convolution. Defaults to 1. + dilation (int): Spacing between temporal kernel elements. + Defaults to 1. + norm (str): The name of norm layer. Defaults to ``'BN'``. + dropout (float): Dropout probability. Defaults to 0. + init_cfg (dict or list[dict]): Initialization config dict. Defaults to + ``[ + dict(type='Constant', layer='BatchNorm2d', val=1), + dict(type='Kaiming', layer='Conv2d', mode='fan_out') + ]``. + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int = 9, + stride: int = 1, + dilation: int = 1, + norm: str = 'BN', + dropout: float = 0, + init_cfg: Union[Dict, List[Dict]] = [ + dict(type='Constant', layer='BatchNorm2d', val=1), + dict(type='Kaiming', layer='Conv2d', mode='fan_out') + ] + ) -> None: + super().__init__(init_cfg=init_cfg) + + self.in_channels = in_channels + self.out_channels = out_channels + self.norm_cfg = norm if isinstance(norm, dict) else dict(type=norm) + pad = (kernel_size + (kernel_size - 1) * (dilation - 1) - 1) // 2 + + self.conv = nn.Conv2d( + in_channels, + out_channels, + kernel_size=(kernel_size, 1), + padding=(pad, 0), + stride=(stride, 1), + dilation=(dilation, 1)) + self.bn = build_norm_layer(self.norm_cfg, out_channels)[1] \ + if norm is not None else nn.Identity() + + self.drop = nn.Dropout(dropout, inplace=True) + self.stride = stride + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Defines the computation performed at every call.""" + return self.drop(self.bn(self.conv(x))) diff --git a/mmaction/models/utils/graph.py b/mmaction/models/utils/graph.py index 4aaac07d03..7575640fc8 100644 --- a/mmaction/models/utils/graph.py +++ b/mmaction/models/utils/graph.py @@ -1,216 +1,207 @@ # Copyright (c) OpenMMLab. All rights reserved. -from typing import List, Tuple +from typing import List, Tuple, Union import numpy as np +import torch -def get_hop_distance(num_node: int, - edges: List[Tuple[int, int]], - max_hop: int = 1) -> np.ndarray: - """Get n-hop distance matrix by edges. +def k_adjacency(A: Union[torch.Tensor, np.ndarray], + k: int, + with_self: bool = False, + self_factor: float = 1) -> np.ndarray: + """Construct k-adjacency matrix. + + Args: + A (torch.Tensor or np.ndarray): The adjacency matrix. + k (int): The number of hops. + with_self (bool): Whether to add self-loops to the + k-adjacency matrix. The self-loops is critical + for learning the relationships between the current + joint and its k-hop neighbors. Defaults to False. + self_factor (float): The scale factor to the added + identity matrix. Defaults to 1. + + Returns: + np.ndarray: The k-adjacency matrix. + """ + # A is a 2D square array + if isinstance(A, torch.Tensor): + A = A.data.cpu().numpy() + assert isinstance(A, np.ndarray) + Iden = np.eye(len(A), dtype=A.dtype) + if k == 0: + return Iden + Ak = np.minimum(np.linalg.matrix_power(A + Iden, k), 1) - np.minimum( + np.linalg.matrix_power(A + Iden, k - 1), 1) + if with_self: + Ak += (self_factor * Iden) + return Ak + + +def edge2mat(edges: List[Tuple[int, int]], num_node: int) -> np.ndarray: + """Get adjacency matrix from edges. Args: - num_node (int): The number of nodes of the graph. edges (list[tuple[int, int]]): The edges of the graph. - max_hop (int): The maximal distance between two connected nodes. - Defaults to 1. + num_node (int): The number of nodes of the graph. Returns: - hop_dis (np.ndarray): The n-hop distance matrix. + np.ndarray: The adjacency matrix. """ - adj_mat = np.zeros((num_node, num_node)) + A = np.zeros((num_node, num_node)) for i, j in edges: - adj_mat[i, j] = 1 - adj_mat[j, i] = 1 - - # compute hop steps - hop_dis = np.zeros((num_node, num_node)) + np.inf - transfer_mat = [ - np.linalg.matrix_power(adj_mat, d) for d in range(max_hop + 1) - ] - arrive_mat = (np.stack(transfer_mat) > 0) - for d in range(max_hop, -1, -1): - hop_dis[arrive_mat[d]] = d - return hop_dis + A[j, i] = 1 + return A -def normalize_digraph(adj_matrix: np.ndarray) -> np.ndarray: - """Normalize the digraph. +def normalize_digraph(A: np.ndarray, dim: int = 0) -> np.ndarray: + """Normalize the digraph according to the given dimension. Args: - adj_matrix (np.ndarray): The adjacency matrix. + A (np.ndarray): The adjacency matrix. + dim (int): The dimension to perform normalization. + Defaults to 0. Returns: - norm_matrix (np.ndarray): The normalized adjacency matrix. + np.ndarray: The normalized adjacency matrix. """ - Dl = np.sum(adj_matrix, 0) - num_nodes = adj_matrix.shape[0] - Dn = np.zeros((num_nodes, num_nodes)) - for i in range(num_nodes): + # A is a 2D square array + Dl = np.sum(A, dim) + h, w = A.shape + Dn = np.zeros((w, w)) + + for i in range(w): if Dl[i] > 0: Dn[i, i] = Dl[i]**(-1) - norm_matrix = np.dot(adj_matrix, Dn) - return norm_matrix + AD = np.dot(A, Dn) + return AD -def edge2mat(edges: List[Tuple[int, int]], num_node: int) -> np.ndarray: - """Get adjacency matrix from edges. + +def get_hop_distance(num_node: int, + edges: List[Tuple[int, int]], + max_hop: int = 1) -> np.ndarray: + """Get n-hop distance matrix by edges. Args: - edges (list[tuple[int, int]]): The edges of the graph. num_node (int): The number of nodes of the graph. + edges (list[tuple[int, int]]): The edges of the graph. + max_hop (int): The maximal distance between two connected nodes. + Defaults to 1. Returns: - np.ndarray: The adjacency matrix. + np.ndarray: The n-hop distance matrix. """ - A = np.zeros((num_node, num_node)) + A = np.eye(num_node) + for i, j in edges: + A[i, j] = 1 A[j, i] = 1 - return A + + # compute hop steps + hop_dis = np.zeros((num_node, num_node)) + np.inf + transfer_mat = [np.linalg.matrix_power(A, d) for d in range(max_hop + 1)] + arrive_mat = (np.stack(transfer_mat) > 0) + for d in range(max_hop, -1, -1): + hop_dis[arrive_mat[d]] = d + return hop_dis class Graph: - """The Graph to model the different layout of skeletons. + """The Graph to model the skeletons. Args: - layout (str): Must be one of the following candidates - - openpose: 18 or 25 joints. For more information, please refer to: - https://github.com/CMU-Perceptual-Computing-Lab/openpose#output - - ntu-rgb+d: 25 joints. For more information, please refer to: - https://github.com/shahroudy/NTURGB-D - - coco: 17 joints. For more information, please refer to: - https://cocodataset.org/ - - strategy (str): Must be one of the follow candidates - - uniform: Uniform Labeling - - distance: Distance Partitioning - - spatial: Spatial Configuration - For more information, please refer to the section 'Partition - Strategies' in the paper (https://arxiv.org/abs/1801.07455). - - max_hop (int): The maximal distance between two connected nodes. - Defaults to 1. - dilation (int): controls the spacing between the kernel points. - Defaults to 1. + layout (str): must be one of the following candidates: + 'openpose', 'nturgb+d', 'coco'. Defaults to ``'coco'``. + mode (str): must be one of the following candidates: + 'stgcn_spatial', 'spatial'. Defaults to ``'spatial'``. + max_hop (int): the maximal distance between two connected + nodes. Defaults to 1. """ def __init__(self, - layout: str = 'openpose-18', - strategy: str = 'uniform', - max_hop: int = 1, - dilation: int = 1) -> None: + layout: str = 'coco', + mode: str = 'spatial', + max_hop: int = 1) -> None: + self.max_hop = max_hop - self.dilation = dilation + self.layout = layout + self.mode = mode + + assert layout in ['openpose', 'nturgb+d', 'coco'] + + self.set_layout(layout) + self.hop_dis = get_hop_distance(self.num_node, self.inward, max_hop) - assert layout in ['openpose-18', 'openpose-25', 'ntu-rgb+d', 'coco'] - assert strategy in ['uniform', 'distance', 'spatial', 'agcn'] - self.get_edge(layout) - self.hop_dis = get_hop_distance( - self.num_node, self.edge, max_hop=max_hop) - self.get_adjacency(strategy) + assert hasattr(self, mode), f'Do Not Exist This Mode: {mode}' + self.A = getattr(self, mode)() def __str__(self): return self.A - def get_edge(self, layout: str) -> None: - """This method returns the edge pairs of the layout.""" + def set_layout(self, layout: str) -> None: + """Initialize the layout of candidates.""" - if layout == 'openpose-18': + if layout == 'openpose': self.num_node = 18 - self_link = [(i, i) for i in range(self.num_node)] - neighbor_link = [(4, 3), (3, 2), (7, 6), (6, 5), - (13, 12), (12, 11), (10, 9), (9, 8), (11, 5), - (8, 2), (5, 1), (2, 1), (0, 1), (15, 0), (14, 0), - (17, 15), (16, 14)] - self.edge = self_link + neighbor_link + self.inward = [(4, 3), (3, 2), (7, 6), (6, 5), (13, 12), (12, 11), + (10, 9), (9, 8), (11, 5), (8, 2), (5, 1), (2, 1), + (0, 1), (15, 0), (14, 0), (17, 15), (16, 14)] self.center = 1 - elif layout == 'openpose-25': + elif layout == 'nturgb+d': self.num_node = 25 - self_link = [(i, i) for i in range(self.num_node)] - neighbor_link = [(4, 3), (3, 2), (7, 6), (6, 5), (23, 22), - (22, 11), (24, 11), (11, 10), (10, 9), (9, 8), - (20, 19), (19, 14), (21, 14), (14, 13), (13, 12), - (12, 8), (8, 1), (5, 1), (2, 1), (0, 1), (15, 0), - (16, 0), (17, 15), (18, 16)] - self.self_link = self_link - self.neighbor_link = neighbor_link - self.edge = self_link + neighbor_link - self.center = 1 - elif layout == 'ntu-rgb+d': - self.num_node = 25 - self_link = [(i, i) for i in range(self.num_node)] - neighbor_1base = [(1, 2), (2, 21), (3, 21), - (4, 3), (5, 21), (6, 5), (7, 6), (8, 7), (9, 21), - (10, 9), (11, 10), (12, 11), (13, 1), (14, 13), - (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), - (20, 19), (22, 23), (23, 8), (24, 25), (25, 12)] - neighbor_link = [(i - 1, j - 1) for (i, j) in neighbor_1base] - self.self_link = self_link - self.neighbor_link = neighbor_link - self.edge = self_link + neighbor_link + neighbor_base = [(1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5), + (7, 6), (8, 7), (9, 21), (10, 9), (11, 10), + (12, 11), (13, 1), (14, 13), (15, 14), (16, 15), + (17, 1), (18, 17), (19, 18), (20, 19), (22, 8), + (23, 8), (24, 12), (25, 12)] + self.inward = [(i - 1, j - 1) for (i, j) in neighbor_base] self.center = 21 - 1 elif layout == 'coco': self.num_node = 17 - self_link = [(i, i) for i in range(self.num_node)] - neighbor_1base = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], - [6, 12], [7, 13], [6, 7], [8, 6], [9, 7], - [10, 8], [11, 9], [2, 3], [2, 1], [3, 1], [4, 2], - [5, 3], [4, 6], [5, 7]] - neighbor_link = [(i - 1, j - 1) for (i, j) in neighbor_1base] - self.edge = self_link + neighbor_link + self.inward = [(15, 13), (13, 11), (16, 14), (14, 12), (11, 5), + (12, 6), (9, 7), (7, 5), (10, 8), (8, 6), (5, 0), + (6, 0), (1, 0), (3, 1), (2, 0), (4, 2)] self.center = 0 else: - raise ValueError(f'{layout} is not supported.') - - def get_adjacency(self, strategy: str): - """This method returns the adjacency matrix according to strategy.""" - - valid_hop = range(0, self.max_hop + 1, self.dilation) - adjacency = np.zeros((self.num_node, self.num_node)) - for hop in valid_hop: - adjacency[self.hop_dis == hop] = 1 - normalize_adjacency = normalize_digraph(adjacency) - - if strategy == 'uniform': - A = np.zeros((1, self.num_node, self.num_node)) - A[0] = normalize_adjacency - self.A = A - elif strategy == 'distance': - A = np.zeros((len(valid_hop), self.num_node, self.num_node)) - for i, hop in enumerate(valid_hop): - A[i][self.hop_dis == hop] = normalize_adjacency[self.hop_dis == - hop] - self.A = A - elif strategy == 'spatial': - A = [] - for hop in valid_hop: - a_root = np.zeros((self.num_node, self.num_node)) - a_close = np.zeros((self.num_node, self.num_node)) - a_further = np.zeros((self.num_node, self.num_node)) - for i in range(self.num_node): - for j in range(self.num_node): - if self.hop_dis[j, i] == hop: - if self.hop_dis[j, self.center] == self.hop_dis[ - i, self.center]: - a_root[j, i] = normalize_adjacency[j, i] - elif self.hop_dis[j, self.center] > self.hop_dis[ - i, self.center]: - a_close[j, i] = normalize_adjacency[j, i] - else: - a_further[j, i] = normalize_adjacency[j, i] - if hop == 0: - A.append(a_root) - else: - A.append(a_root + a_close) - A.append(a_further) - A = np.stack(A) - self.A = A - elif strategy == 'agcn': - link_mat = edge2mat(self.self_link, self.num_node) - In = normalize_digraph(edge2mat(self.neighbor_link, self.num_node)) - outward = [(j, i) for (i, j) in self.neighbor_link] - Out = normalize_digraph(edge2mat(outward, self.num_node)) - A = np.stack((link_mat, In, Out)) - self.A = A - else: - raise ValueError('Do Not Exist This Strategy') + raise ValueError(f'Do Not Exist This Layout: {layout}') + self.self_link = [(i, i) for i in range(self.num_node)] + self.outward = [(j, i) for (i, j) in self.inward] + self.neighbor = self.inward + self.outward + + def stgcn_spatial(self) -> np.ndarray: + """ST-GCN spatial mode.""" + adj = np.zeros((self.num_node, self.num_node)) + adj[self.hop_dis <= self.max_hop] = 1 + normalize_adj = normalize_digraph(adj) + hop_dis = self.hop_dis + center = self.center + + A = [] + for hop in range(self.max_hop + 1): + a_close = np.zeros((self.num_node, self.num_node)) + a_further = np.zeros((self.num_node, self.num_node)) + for i in range(self.num_node): + for j in range(self.num_node): + if hop_dis[j, i] == hop: + if hop_dis[j, center] >= hop_dis[i, center]: + a_close[j, i] = normalize_adj[j, i] + else: + a_further[j, i] = normalize_adj[j, i] + A.append(a_close) + if hop > 0: + A.append(a_further) + return np.stack(A) + + def spatial(self) -> np.ndarray: + """Standard spatial mode.""" + Iden = edge2mat(self.self_link, self.num_node) + In = normalize_digraph(edge2mat(self.inward, self.num_node)) + Out = normalize_digraph(edge2mat(self.outward, self.num_node)) + A = np.stack((Iden, In, Out)) + return A + + def binary_adj(self) -> np.ndarray: + """Construct an adjacency matrix for an undirected graph.""" + A = edge2mat(self.neighbor, self.num_node) + return A[None] diff --git a/tests/datasets/transforms/test_formating.py b/tests/datasets/transforms/test_formating.py index fb93e7c397..842d2dbf27 100644 --- a/tests/datasets/transforms/test_formating.py +++ b/tests/datasets/transforms/test_formating.py @@ -7,9 +7,11 @@ import torch from mmengine.structures import InstanceData, LabelData from mmengine.testing import assert_dict_has_keys +from numpy.testing import assert_array_equal from mmaction.datasets.transforms import (FormatAudioShape, FormatGCNInput, - FormatShape, Transpose) + FormatShape, PackActionInputs, + Transpose) from mmaction.registry import TRANSFORMS from mmaction.structures import ActionDataSample from mmaction.utils import register_all_modules @@ -21,16 +23,14 @@ class TestPackActionInputs(unittest.TestCase): def test_transform(self): # keypoint input - data = dict( - keypoint=np.random.randn(3, 300, 17, 2), - label=[1], - filename='test.txt') - - cfg = dict(type='PackActionInputs') - transform = TRANSFORMS.build(cfg) - results = transform(copy.deepcopy(data)) + results = dict(keypoint=np.random.randn(1, 2, 300, 17, 3), label=1) + transform = PackActionInputs() + results = transform(results) self.assertIn('inputs', results) + self.assertIn('data_samples', results) self.assertIsInstance(results['inputs'], torch.Tensor) + self.assertEqual(results['data_samples'].gt_labels.item, + torch.LongTensor([1])) # audio input data = dict( @@ -216,31 +216,35 @@ def test_format_audio_shape(): def test_format_gcn_input(): - with pytest.raises(ValueError): - # invalid input format - FormatGCNInput('XXXX') - - # 'NCTVM' input format - results = dict( - keypoint=np.random.randn(2, 300, 17, 2), - keypoint_score=np.random.randn(2, 300, 17)) - format_shape = FormatGCNInput('NCTVM', num_person=2) - assert format_shape(results)['input_shape'] == (3, 300, 17, 2) - assert repr(format_shape) == format_shape.__class__.__name__ + \ - '(input_format=NCTVM, num_person=%d)' % 2 + with pytest.raises(AssertionError): + FormatGCNInput(mode='invalid') - # test real num_person < 2 results = dict( - keypoint=np.random.randn(1, 300, 17, 2), - keypoint_score=np.random.randn(1, 300, 17)) - assert format_shape(results)['input_shape'] == (3, 300, 17, 2) - assert repr(format_shape) == format_shape.__class__.__name__ + \ - '(input_format=NCTVM, num_person=%d)' % 2 - - # test real num_person > 2 - results = dict( - keypoint=np.random.randn(3, 300, 17, 2), - keypoint_score=np.random.randn(3, 300, 17)) - assert format_shape(results)['input_shape'] == (3, 300, 17, 2) - assert repr(format_shape) == format_shape.__class__.__name__ + \ - '(input_format=NCTVM, num_person=%d)' % 2 + keypoint=np.random.randn(2, 10, 17, 2), + keypoint_score=np.random.randn(2, 10, 17)) + format_shape = FormatGCNInput(num_person=2, mode='zero') + results = format_shape(results) + assert results['keypoint'].shape == (1, 2, 10, 17, 3) + assert repr(format_shape) == 'FormatGCNInput(num_person=2, mode=zero)' + + results = dict(keypoint=np.random.randn(2, 40, 25, 3), num_clips=4) + format_shape = FormatGCNInput(num_person=2, mode='zero') + results = format_shape(results) + assert results['keypoint'].shape == (4, 2, 10, 25, 3) + + results = dict(keypoint=np.random.randn(1, 10, 25, 3)) + format_shape = FormatGCNInput(num_person=2, mode='zero') + results = format_shape(results) + assert results['keypoint'].shape == (1, 2, 10, 25, 3) + assert_array_equal(results['keypoint'][:, 1], np.zeros((1, 10, 25, 3))) + + results = dict(keypoint=np.random.randn(1, 10, 25, 3)) + format_shape = FormatGCNInput(num_person=2, mode='loop') + results = format_shape(results) + assert results['keypoint'].shape == (1, 2, 10, 25, 3) + assert_array_equal(results['keypoint'][:, 1], results['keypoint'][:, 0]) + + results = dict(keypoint=np.random.randn(3, 10, 25, 3)) + format_shape = FormatGCNInput(num_person=2, mode='zero') + results = format_shape(results) + assert results['keypoint'].shape == (1, 2, 10, 25, 3) diff --git a/tests/datasets/transforms/test_pose_loading.py b/tests/datasets/transforms/test_pose_transforms.py similarity index 60% rename from tests/datasets/transforms/test_pose_loading.py rename to tests/datasets/transforms/test_pose_transforms.py index eeb2dad84c..d65d450124 100644 --- a/tests/datasets/transforms/test_pose_loading.py +++ b/tests/datasets/transforms/test_pose_transforms.py @@ -1,4 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. +import copy import copy as cp import os.path as osp import tempfile @@ -7,120 +8,18 @@ import numpy as np import pytest from mmengine import dump +from mmengine.testing import assert_dict_has_keys from numpy.testing import assert_array_almost_equal, assert_array_equal -from mmaction.datasets.transforms import (GeneratePoseTarget, LoadKineticsPose, - PaddingWithLoop, PoseDecode, +from mmaction.datasets.transforms import (GeneratePoseTarget, GenSkeFeat, + JointToBone, LoadKineticsPose, + MergeSkeFeat, PadTo, PoseCompact, + PoseDecode, PreNormalize2D, + PreNormalize3D, ToMotion, UniformSampleFrames) -class TestPoseLoading: - - @staticmethod - def test_uniform_sample_frames(): - results = dict(total_frames=64, start_index=0) - sampling = UniformSampleFrames( - clip_len=8, num_clips=1, test_mode=True, seed=0) - - assert str(sampling) == ('UniformSampleFrames(clip_len=8, ' - 'num_clips=1, test_mode=True, seed=0)') - sampling_results = sampling(results) - assert sampling_results['clip_len'] == 8 - assert sampling_results['frame_interval'] is None - assert sampling_results['num_clips'] == 1 - assert_array_equal(sampling_results['frame_inds'], - np.array([4, 15, 21, 24, 35, 43, 51, 63])) - - results = dict(total_frames=15, start_index=0) - sampling = UniformSampleFrames( - clip_len=8, num_clips=1, test_mode=True, seed=0) - sampling_results = sampling(results) - assert sampling_results['clip_len'] == 8 - assert sampling_results['frame_interval'] is None - assert sampling_results['num_clips'] == 1 - assert_array_equal(sampling_results['frame_inds'], - np.array([0, 2, 4, 6, 8, 9, 11, 13])) - - results = dict(total_frames=7, start_index=0) - sampling = UniformSampleFrames( - clip_len=8, num_clips=1, test_mode=True, seed=0) - sampling_results = sampling(results) - assert sampling_results['clip_len'] == 8 - assert sampling_results['frame_interval'] is None - assert sampling_results['num_clips'] == 1 - assert_array_equal(sampling_results['frame_inds'], - np.array([0, 1, 2, 3, 4, 5, 6, 0])) - - results = dict(total_frames=7, start_index=0) - sampling = UniformSampleFrames( - clip_len=8, num_clips=8, test_mode=True, seed=0) - sampling_results = sampling(results) - assert sampling_results['clip_len'] == 8 - assert sampling_results['frame_interval'] is None - assert sampling_results['num_clips'] == 8 - assert len(sampling_results['frame_inds']) == 64 - - results = dict(total_frames=64, start_index=0) - sampling = UniformSampleFrames( - clip_len=8, num_clips=4, test_mode=True, seed=0) - sampling_results = sampling(results) - assert sampling_results['clip_len'] == 8 - assert sampling_results['frame_interval'] is None - assert sampling_results['num_clips'] == 4 - assert_array_equal( - sampling_results['frame_inds'], - np.array([ - 4, 15, 21, 24, 35, 43, 51, 63, 1, 11, 21, 26, 36, 47, 54, 56, - 0, 12, 18, 25, 38, 47, 55, 62, 0, 9, 21, 25, 37, 40, 49, 60 - ])) - - results = dict(total_frames=64, start_index=0) - sampling = UniformSampleFrames( - clip_len=8, num_clips=1, test_mode=False, seed=0) - sampling_results = sampling(results) - assert sampling_results['clip_len'] == 8 - assert sampling_results['frame_interval'] is None - assert sampling_results['num_clips'] == 1 - assert len(sampling_results['frame_inds']) == 8 - - results = dict(total_frames=7, start_index=0) - sampling = UniformSampleFrames( - clip_len=8, num_clips=1, test_mode=False, seed=0) - sampling_results = sampling(results) - assert sampling_results['clip_len'] == 8 - assert sampling_results['frame_interval'] is None - assert sampling_results['num_clips'] == 1 - assert len(sampling_results['frame_inds']) == 8 - - results = dict(total_frames=15, start_index=0) - sampling = UniformSampleFrames( - clip_len=8, num_clips=1, test_mode=False, seed=0) - sampling_results = sampling(results) - assert sampling_results['clip_len'] == 8 - assert sampling_results['frame_interval'] is None - assert sampling_results['num_clips'] == 1 - assert len(sampling_results['frame_inds']) == 8 - - @staticmethod - def test_pose_decode(): - kp = np.random.random([1, 16, 17, 2]) - kpscore = np.random.random([1, 16, 17]) - frame_inds = np.array([2, 4, 6, 8, 10]) - results = dict( - keypoint=kp, keypoint_score=kpscore, frame_inds=frame_inds) - pose_decode = PoseDecode() - assert str(pose_decode) == ('PoseDecode()') - decode_results = pose_decode(results) - assert_array_almost_equal(decode_results['keypoint'], kp[:, - frame_inds]) - assert_array_almost_equal(decode_results['keypoint_score'], - kpscore[:, frame_inds]) - - results = dict(keypoint=kp, keypoint_score=kpscore, total_frames=16) - pose_decode = PoseDecode() - decode_results = pose_decode(results) - assert_array_almost_equal(decode_results['keypoint'], kp) - assert_array_almost_equal(decode_results['keypoint_score'], kpscore) +class TestPoseTransforms: @staticmethod def test_load_kinetics_pose(): @@ -164,7 +63,7 @@ def get_mode(arr): 'source=openpose-18, kwargs={})') return_results = load_kinetics_pose(inp) assert return_results['keypoint'].shape[:-1] == \ - return_results['keypoint_score'].shape + return_results['keypoint_score'].shape num_person = return_results['keypoint'].shape[0] num_frame = return_results['keypoint'].shape[1] @@ -177,7 +76,7 @@ def get_mode(arr): squeeze=False, max_person=100, source='openpose-18') return_results = load_kinetics_pose(inp) assert return_results['keypoint'].shape[:-1] == \ - return_results['keypoint_score'].shape + return_results['keypoint_score'].shape num_person = return_results['keypoint'].shape[0] num_frame = return_results['keypoint'].shape[1] @@ -191,7 +90,7 @@ def get_mode(arr): squeeze=True, max_person=100, source='mmpose') return_results = load_kinetics_pose(inp) assert return_results['keypoint'].shape[:-1] == \ - return_results['keypoint_score'].shape + return_results['keypoint_score'].shape num_person = return_results['keypoint'].shape[0] num_frame = return_results['keypoint'].shape[1] @@ -205,7 +104,7 @@ def get_mode(arr): squeeze=True, max_person=2, source='mmpose') return_results = load_kinetics_pose(inp) assert return_results['keypoint'].shape[:-1] == \ - return_results['keypoint_score'].shape + return_results['keypoint_score'].shape num_person = return_results['keypoint'].shape[0] num_frame = return_results['keypoint'].shape[1] @@ -358,19 +257,333 @@ def test_generate_pose_target(): assert_array_almost_equal(return_results['imgs'], 0) @staticmethod - def test_padding_with_loop(): - results = dict(total_frames=3, start_index=0) - sampling = PaddingWithLoop(clip_len=6) + def test_pose_compact(): + results = {} + results['img_shape'] = (100, 100) + fake_kp = np.zeros([1, 4, 2, 2]) + fake_kp[:, :, 0] = [10, 10] + fake_kp[:, :, 1] = [90, 90] + results['keypoint'] = fake_kp + + pose_compact = PoseCompact( + padding=0, threshold=0, hw_ratio=None, allow_imgpad=False) + inp = copy.deepcopy(results) + ret = pose_compact(inp) + assert ret['img_shape'] == (80, 80) + assert str(pose_compact) == ( + 'PoseCompact(padding=0, threshold=0, hw_ratio=None, ' + 'allow_imgpad=False)') + + pose_compact = PoseCompact( + padding=0.3, threshold=0, hw_ratio=None, allow_imgpad=False) + inp = copy.deepcopy(results) + ret = pose_compact(inp) + assert ret['img_shape'] == (100, 100) + + pose_compact = PoseCompact( + padding=0.3, threshold=0, hw_ratio=None, allow_imgpad=True) + inp = copy.deepcopy(results) + ret = pose_compact(inp) + assert ret['img_shape'] == (104, 104) + + pose_compact = PoseCompact( + padding=0, threshold=100, hw_ratio=None, allow_imgpad=False) + inp = copy.deepcopy(results) + ret = pose_compact(inp) + assert ret['img_shape'] == (100, 100) + + pose_compact = PoseCompact( + padding=0, threshold=0, hw_ratio=0.75, allow_imgpad=True) + inp = copy.deepcopy(results) + ret = pose_compact(inp) + assert ret['img_shape'] == (80, 106) + + @staticmethod + def test_pre_normalize3d(): + target_keys = ['keypoint', 'total_frames', 'body_center'] + + results = dict(keypoint=np.random.randn(2, 40, 25, 3), total_frames=40) + + pre_normalize3d = PreNormalize3D( + align_center=True, align_spine=True, align_shoulder=False) + + inp = copy.deepcopy(results) + ret1 = pre_normalize3d(inp) + + inp = copy.deepcopy(ret1) + ret2 = pre_normalize3d(inp) + + assert_array_equal(ret2['body_center'], np.zeros(3)) + assert_array_equal(ret1['keypoint'], ret2['keypoint']) + + pre_normalize3d = PreNormalize3D( + align_center=True, align_spine=False, align_shoulder=True) + + inp = copy.deepcopy(results) + ret3 = pre_normalize3d(inp) + + inp = copy.deepcopy(ret3) + ret4 = pre_normalize3d(inp) + + assert_array_equal(ret4['body_center'], np.zeros(3)) + assert_array_equal(ret3['keypoint'], ret4['keypoint']) + + assert assert_dict_has_keys(ret1, target_keys) + assert repr(pre_normalize3d) == 'PreNormalize3D(zaxis=[0, 1], ' \ + 'xaxis=[8, 4], align_center=True, ' \ + 'align_spine=False, ' \ + 'align_shoulder=True)' + + @staticmethod + def test_pre_normalize2d(): + + def check_pose_normalize(origin_kps, target_kps, h, w): + target_kps[..., 0] = target_kps[..., 0] * w / 2 + w / 2 + target_kps[..., 1] = target_kps[..., 1] * h / 2 + h / 2 + assert_array_almost_equal(origin_kps, target_kps, decimal=4) + + results = dict( + keypoint=np.random.randn(1, 40, 17, 2), img_shape=(480, 854)) + pre_normalize_2d = PreNormalize2D(img_shape=(1080, 1920)) + inp = copy.deepcopy(results) + ret1 = pre_normalize_2d(inp) + check_pose_normalize( + results['keypoint'], ret1['keypoint'], h=480, w=854) + + results = dict(keypoint=np.random.randn(1, 40, 17, 2)) + pre_normalize_2d = PreNormalize2D(img_shape=(1080, 1920)) + inp = copy.deepcopy(results) + ret2 = pre_normalize_2d(inp) + check_pose_normalize( + results['keypoint'], ret2['keypoint'], h=1080, w=1920) + + assert repr(pre_normalize_2d) == \ + 'PreNormalize2D(img_shape=(1080, 1920))' + + @staticmethod + def test_joint_to_bone(): + with pytest.raises(ValueError): + JointToBone(dataset='invalid') + + with pytest.raises(AssertionError): + JointToBone()(dict(keypoint=np.random.randn(2, 15, 25, 4))) + + results = dict(keypoint=np.random.randn(2, 15, 25, 3)) + joint_to_bone = JointToBone(dataset='nturgb+d') + center_index = 20 + results = joint_to_bone(results) + assert_array_equal(results['keypoint'][..., center_index, :], + np.zeros((2, 15, 3))) + + results = dict(keypoint=np.random.randn(2, 15, 18, 3)) + joint_to_bone = JointToBone(dataset='openpose') + center_index = 0 + center_score = results['keypoint'][..., center_index, 2] + results = joint_to_bone(results) + assert_array_equal(results['keypoint'][..., center_index, :2], + np.zeros((2, 15, 2))) + assert_array_almost_equal(results['keypoint'][..., center_index, 2], + center_score) + + results = dict(keypoint=np.random.randn(2, 15, 17, 3)) + joint_to_bone = JointToBone(dataset='coco') + center_index = 0 + center_score = results['keypoint'][..., center_index, 2] + results = joint_to_bone(results) + assert_array_equal(results['keypoint'][..., center_index, :2], + np.zeros((2, 15, 2))) + assert_array_almost_equal(results['keypoint'][..., center_index, 2], + center_score) + + results = dict(keypoint=np.random.randn(2, 15, 17, 3)) + joint_to_bone = JointToBone(dataset='coco', target='bone') + results = joint_to_bone(results) + assert assert_dict_has_keys(results, ['keypoint', 'bone']) + assert repr(joint_to_bone) == 'JointToBone(dataset=coco, target=bone)' + + @staticmethod + def test_to_motion(): + with pytest.raises(AssertionError): + ToMotion()(dict(keypoint=np.random.randn(2, 15, 25, 4))) + + with pytest.raises(KeyError): + ToMotion(source='j')(dict(keypoint=np.random.randn(2, 15, 25, 4))) + + results = dict(keypoint=np.random.randn(2, 15, 25, 3)) + to_motion = ToMotion() + results = to_motion(results) + assert_array_equal(results['motion'][:, -1, :, :], np.zeros( + (2, 25, 3))) + assert assert_dict_has_keys(results, ['keypoint', 'motion']) + assert repr(to_motion) == 'ToMotion(dataset=nturgb+d, ' \ + 'source=keypoint, target=motion)' + + @staticmethod + def test_merge_ske_feat(): + with pytest.raises(KeyError): + MergeSkeFeat()(dict(b=np.random.randn(2, 15, 25, 3))) + + results = dict( + j=np.random.randn(2, 10, 25, 3), b=np.random.randn(2, 10, 25, 3)) + merge_ske_feat = MergeSkeFeat(feat_list=['j', 'b']) + results = merge_ske_feat(results) + + assert assert_dict_has_keys(results, ['keypoint']) + assert results['keypoint'].shape == (2, 10, 25, 6) + assert repr(merge_ske_feat) == "MergeSkeFeat(feat_list=['j', 'b'], " \ + 'target=keypoint, axis=-1)' + + @staticmethod + def test_gen_ske_feat(): + results = dict(keypoint=np.random.randn(1, 10, 25, 3)) + + gen_ske_feat = GenSkeFeat(dataset='nturgb+d', feats=['j']) + inp = copy.deepcopy(results) + ret1 = gen_ske_feat(inp) + assert_array_equal(ret1['keypoint'], results['keypoint']) + + gen_ske_feat = GenSkeFeat( + dataset='nturgb+d', feats=['j', 'b', 'jm', 'bm']) + inp = copy.deepcopy(results) + ret2 = gen_ske_feat(inp) + assert ret2['keypoint'].shape == (1, 10, 25, 12) + + results = dict( + keypoint=np.random.randn(1, 10, 17, 2), + keypoint_score=np.random.randn(1, 10, 17)) + gen_ske_feat = GenSkeFeat(dataset='coco', feats=['j', 'b', 'jm', 'bm']) + results = gen_ske_feat(results) + assert results['keypoint'].shape == (1, 10, 17, 12) + assert assert_dict_has_keys(results, ['keypoint']) + assert not assert_dict_has_keys(results, ['j', 'b', 'jm', 'bm']) + assert repr(gen_ske_feat) == 'GenSkeFeat(dataset=coco, ' \ + "feats=['j', 'b', 'jm', 'bm'], axis=-1)" + + @staticmethod + def test_uniform_sample_frames(): + results = dict(total_frames=64, start_index=0) + sampling = UniformSampleFrames( + clip_len=8, num_clips=1, test_mode=True, seed=0) + + assert repr(sampling) == ('UniformSampleFrames(clip_len=8, ' + 'num_clips=1, test_mode=True, seed=0)') sampling_results = sampling(results) - assert sampling_results['clip_len'] == 6 + assert sampling_results['clip_len'] == 8 assert sampling_results['frame_interval'] is None assert sampling_results['num_clips'] == 1 assert_array_equal(sampling_results['frame_inds'], - np.array([0, 1, 2, 0, 1, 2])) + np.array([4, 15, 21, 24, 35, 43, 51, 63])) + + results = dict(total_frames=15, start_index=0) + sampling = UniformSampleFrames( + clip_len=8, num_clips=1, test_mode=True, seed=0) + sampling_results = sampling(results) + assert sampling_results['clip_len'] == 8 + assert sampling_results['frame_interval'] is None + assert sampling_results['num_clips'] == 1 + assert_array_equal(sampling_results['frame_inds'], + np.array([0, 2, 4, 6, 8, 9, 11, 13])) + + results = dict(total_frames=7, start_index=0) + sampling = UniformSampleFrames( + clip_len=8, num_clips=1, test_mode=True, seed=0) + sampling_results = sampling(results) + assert sampling_results['clip_len'] == 8 + assert sampling_results['frame_interval'] is None + assert sampling_results['num_clips'] == 1 + assert_array_equal(sampling_results['frame_inds'], + np.array([0, 1, 2, 3, 4, 5, 6, 0])) + + results = dict(total_frames=7, start_index=0) + sampling = UniformSampleFrames( + clip_len=8, num_clips=8, test_mode=True, seed=0) + sampling_results = sampling(results) + assert sampling_results['clip_len'] == 8 + assert sampling_results['frame_interval'] is None + assert sampling_results['num_clips'] == 8 + assert len(sampling_results['frame_inds']) == 64 + + results = dict(total_frames=64, start_index=0) + sampling = UniformSampleFrames( + clip_len=8, num_clips=4, test_mode=True, seed=0) + sampling_results = sampling(results) + assert sampling_results['clip_len'] == 8 + assert sampling_results['frame_interval'] is None + assert sampling_results['num_clips'] == 4 + assert_array_equal( + sampling_results['frame_inds'], + np.array([ + 4, 15, 21, 24, 35, 43, 51, 63, 1, 11, 21, 26, 36, 47, 54, 56, + 0, 12, 18, 25, 38, 47, 55, 62, 0, 9, 21, 25, 37, 40, 49, 60 + ])) + + results = dict(total_frames=64, start_index=0) + sampling = UniformSampleFrames( + clip_len=8, num_clips=1, test_mode=False, seed=0) + sampling_results = sampling(results) + assert sampling_results['clip_len'] == 8 + assert sampling_results['frame_interval'] is None + assert sampling_results['num_clips'] == 1 + assert len(sampling_results['frame_inds']) == 8 + results = dict(total_frames=7, start_index=0) + sampling = UniformSampleFrames( + clip_len=8, num_clips=1, test_mode=False, seed=0) + sampling_results = sampling(results) + assert sampling_results['clip_len'] == 8 + assert sampling_results['frame_interval'] is None + assert sampling_results['num_clips'] == 1 + assert len(sampling_results['frame_inds']) == 8 -def check_pose_normalize(origin_keypoints, result_keypoints, norm_cfg): - target_keypoints = result_keypoints.copy() - target_keypoints *= (norm_cfg['max_value'] - norm_cfg['min_value']) - target_keypoints += norm_cfg['mean'] - assert_array_almost_equal(origin_keypoints, target_keypoints, decimal=4) + results = dict(total_frames=15, start_index=0) + sampling = UniformSampleFrames( + clip_len=8, num_clips=1, test_mode=False, seed=0) + sampling_results = sampling(results) + assert sampling_results['clip_len'] == 8 + assert sampling_results['frame_interval'] is None + assert sampling_results['num_clips'] == 1 + assert len(sampling_results['frame_inds']) == 8 + + @staticmethod + def test_pad_to(): + with pytest.raises(AssertionError): + PadTo(length=4, mode='invalid') + + results = dict( + keypoint=np.random.randn(2, 3, 17, 3), + total_frames=3, + start_index=0) + + inp = copy.deepcopy(results) + pad_to = PadTo(length=6, mode='loop') + ret1 = pad_to(inp) + kp = ret1['keypoint'] + assert_array_equal(kp[:, :3], kp[:, 3:]) + + inp = copy.deepcopy(results) + pad_to = PadTo(length=6, mode='zero') + ret2 = pad_to(inp) + kp = ret2['keypoint'] + assert ret2['total_frames'] == 6 + assert_array_equal(kp[:, 3:], np.zeros((2, 3, 17, 3))) + + @staticmethod + def test_pose_decode(): + kp = np.random.random([1, 16, 17, 2]) + kpscore = np.random.random([1, 16, 17]) + frame_inds = np.array([2, 4, 6, 8, 10]) + results = dict( + keypoint=kp, keypoint_score=kpscore, frame_inds=frame_inds) + pose_decode = PoseDecode() + assert repr(pose_decode) == 'PoseDecode()' + decode_results = pose_decode(results) + assert_array_almost_equal(decode_results['keypoint'], kp[:, + frame_inds]) + assert_array_almost_equal(decode_results['keypoint_score'], + kpscore[:, frame_inds]) + + results = dict(keypoint=kp, keypoint_score=kpscore, total_frames=16) + pose_decode = PoseDecode() + decode_results = pose_decode(results) + assert_array_almost_equal(decode_results['keypoint'], kp) + assert_array_almost_equal(decode_results['keypoint_score'], kpscore) diff --git a/tests/datasets/transforms/test_processing.py b/tests/datasets/transforms/test_processing.py index 9db87d6fd1..028f5d7129 100644 --- a/tests/datasets/transforms/test_processing.py +++ b/tests/datasets/transforms/test_processing.py @@ -10,9 +10,8 @@ from mmaction.datasets.transforms import (AudioAmplify, CenterCrop, ColorJitter, Flip, Fuse, MelSpectrogram, MultiScaleCrop, - PoseCompact, RandomCrop, - RandomResizedCrop, Resize, TenCrop, - ThreeCrop) + RandomCrop, RandomResizedCrop, + Resize, TenCrop, ThreeCrop) def check_crop(origin_imgs, result_imgs, result_bbox, num_crops=1): @@ -71,51 +70,6 @@ def check_flip(origin_imgs, result_imgs, flip_type): return True -class TestPoseCompact: - - @staticmethod - def test_pose_compact(): - results = {} - results['img_shape'] = (100, 100) - fake_kp = np.zeros([1, 4, 2, 2]) - fake_kp[:, :, 0] = [10, 10] - fake_kp[:, :, 1] = [90, 90] - results['keypoint'] = fake_kp - - pose_compact = PoseCompact( - padding=0, threshold=0, hw_ratio=None, allow_imgpad=False) - inp = copy.deepcopy(results) - ret = pose_compact(inp) - assert ret['img_shape'] == (80, 80) - assert str(pose_compact) == ( - 'PoseCompact(padding=0, threshold=0, hw_ratio=None, ' - 'allow_imgpad=False)') - - pose_compact = PoseCompact( - padding=0.3, threshold=0, hw_ratio=None, allow_imgpad=False) - inp = copy.deepcopy(results) - ret = pose_compact(inp) - assert ret['img_shape'] == (100, 100) - - pose_compact = PoseCompact( - padding=0.3, threshold=0, hw_ratio=None, allow_imgpad=True) - inp = copy.deepcopy(results) - ret = pose_compact(inp) - assert ret['img_shape'] == (104, 104) - - pose_compact = PoseCompact( - padding=0, threshold=100, hw_ratio=None, allow_imgpad=False) - inp = copy.deepcopy(results) - ret = pose_compact(inp) - assert ret['img_shape'] == (100, 100) - - pose_compact = PoseCompact( - padding=0, threshold=0, hw_ratio=0.75, allow_imgpad=True) - inp = copy.deepcopy(results) - ret = pose_compact(inp) - assert ret['img_shape'] == (80, 106) - - class TestAudio: @staticmethod @@ -704,7 +658,6 @@ def test_random_crop_lazy(): @staticmethod def test_random_resized_crop_lazy(): - target_keys = ['imgs', 'crop_bbox', 'img_shape', 'lazy'] # There will be a slight difference because of rounding eps = 0.01 @@ -888,7 +841,7 @@ def test_resize_lazy(): [341 / 320, 256 / 240], dtype=np.float32)) assert resize_results_fuse['img_shape'] == (256, 341) - assert repr(resize) == (f'{resize.__class__.__name__ }' + assert repr(resize) == (f'{resize.__class__.__name__}' f'(scale={(341, 256)}, keep_ratio={False}, ' + f'interpolation=bilinear, lazy={True})') diff --git a/tests/models/backbones/test_agcn.py b/tests/models/backbones/test_agcn.py deleted file mode 100644 index f774ee6ac6..0000000000 --- a/tests/models/backbones/test_agcn.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch - -from mmaction.models import AGCN -from mmaction.testing import generate_backbone_demo_inputs - - -def test_AGCN_backbone(): - """Test AGCN backbone.""" - # test ntu-rgb+d layout, agcn strategy - input_shape = (1, 3, 300, 25, 2) - skeletons = generate_backbone_demo_inputs(input_shape) - - agcn = AGCN( - in_channels=3, graph_cfg=dict(layout='ntu-rgb+d', strategy='agcn')) - agcn.init_weights() - agcn.train() - feat = agcn(skeletons) - assert feat.shape == torch.Size([2, 256, 75, 25]) diff --git a/tests/models/backbones/test_stgcn.py b/tests/models/backbones/test_stgcn.py index 7ba4ec2ab9..31ee57484e 100644 --- a/tests/models/backbones/test_stgcn.py +++ b/tests/models/backbones/test_stgcn.py @@ -2,124 +2,45 @@ import torch from mmaction.models import STGCN -from mmaction.testing import generate_backbone_demo_inputs def test_stgcn_backbone(): """Test STGCN backbone.""" - # test coco layout, spatial strategy - input_shape = (1, 3, 300, 17, 2) - skeletons = generate_backbone_demo_inputs(input_shape) - stgcn = STGCN( - in_channels=3, - edge_importance_weighting=True, - graph_cfg=dict(layout='coco', strategy='spatial')) - stgcn.init_weights() - stgcn.train() - feat = stgcn(skeletons) - assert feat.shape == torch.Size([2, 256, 75, 17]) - - # test openpose-18 layout, spatial strategy - input_shape = (1, 3, 300, 18, 2) - skeletons = generate_backbone_demo_inputs(input_shape) - - stgcn = STGCN( - in_channels=3, - edge_importance_weighting=True, - graph_cfg=dict(layout='openpose-18', strategy='spatial')) - stgcn.init_weights() - stgcn.train() - feat = stgcn(skeletons) - assert feat.shape == torch.Size([2, 256, 75, 18]) - - # test ntu-rgb+d layout, spatial strategy - input_shape = (1, 3, 300, 25, 2) - skeletons = generate_backbone_demo_inputs(input_shape) - - stgcn = STGCN( - in_channels=3, - edge_importance_weighting=True, - graph_cfg=dict(layout='ntu-rgb+d', strategy='spatial')) - stgcn.init_weights() - stgcn.train() - feat = stgcn(skeletons) - assert feat.shape == torch.Size([2, 256, 75, 25]) - - # test coco layout, uniform strategy - input_shape = (1, 3, 300, 17, 2) - skeletons = generate_backbone_demo_inputs(input_shape) - - stgcn = STGCN( - in_channels=3, - edge_importance_weighting=True, - graph_cfg=dict(layout='coco', strategy='uniform')) - stgcn.init_weights() - stgcn.train() - feat = stgcn(skeletons) - assert feat.shape == torch.Size([2, 256, 75, 17]) - - # test openpose-18 layout, uniform strategy - input_shape = (1, 3, 300, 18, 2) - skeletons = generate_backbone_demo_inputs(input_shape) - - stgcn = STGCN( - in_channels=3, - edge_importance_weighting=True, - graph_cfg=dict(layout='openpose-18', strategy='uniform')) - stgcn.init_weights() - stgcn.train() - feat = stgcn(skeletons) - assert feat.shape == torch.Size([2, 256, 75, 18]) - - # test ntu-rgb+d layout, uniform strategy - input_shape = (1, 3, 300, 25, 2) - skeletons = generate_backbone_demo_inputs(input_shape) - - stgcn = STGCN( - in_channels=3, - edge_importance_weighting=True, - graph_cfg=dict(layout='ntu-rgb+d', strategy='uniform')) - stgcn.init_weights() - stgcn.train() - feat = stgcn(skeletons) - assert feat.shape == torch.Size([2, 256, 75, 25]) - - # test coco layout, distance strategy - input_shape = (1, 3, 300, 17, 2) - skeletons = generate_backbone_demo_inputs(input_shape) - - stgcn = STGCN( - in_channels=3, - edge_importance_weighting=True, - graph_cfg=dict(layout='coco', strategy='distance')) - stgcn.init_weights() - stgcn.train() - feat = stgcn(skeletons) - assert feat.shape == torch.Size([2, 256, 75, 17]) - - # test openpose-18 layout, distance strategy - input_shape = (1, 3, 300, 18, 2) - skeletons = generate_backbone_demo_inputs(input_shape) - - stgcn = STGCN( - in_channels=3, - edge_importance_weighting=True, - graph_cfg=dict(layout='openpose-18', strategy='distance')) - stgcn.init_weights() - stgcn.train() - feat = stgcn(skeletons) - assert feat.shape == torch.Size([2, 256, 75, 18]) - - # test ntu-rgb+d layout, distance strategy - input_shape = (1, 3, 300, 25, 2) - skeletons = generate_backbone_demo_inputs(input_shape) - - stgcn = STGCN( - in_channels=3, - edge_importance_weighting=True, - graph_cfg=dict(layout='ntu-rgb+d', strategy='distance')) - stgcn.init_weights() - stgcn.train() - feat = stgcn(skeletons) - assert feat.shape == torch.Size([2, 256, 75, 25]) + mode = 'stgcn_spatial' + batch_size, num_person, num_frames = 2, 2, 150 + + # openpose-18 layout + num_joints = 18 + model = STGCN(graph_cfg=dict(layout='openpose', mode=mode)) + model.init_weights() + inputs = torch.randn(batch_size, num_person, num_frames, num_joints, 3) + output = model(inputs) + assert output.shape == torch.Size([2, 2, 256, 38, 18]) + + # nturgb+d layout + num_joints = 25 + model = STGCN(graph_cfg=dict(layout='nturgb+d', mode=mode)) + model.init_weights() + inputs = torch.randn(batch_size, num_person, num_frames, num_joints, 3) + output = model(inputs) + assert output.shape == torch.Size([2, 2, 256, 38, 25]) + + # coco layout + num_joints = 17 + model = STGCN(graph_cfg=dict(layout='coco', mode=mode)) + model.init_weights() + inputs = torch.randn(batch_size, num_person, num_frames, num_joints, 3) + output = model(inputs) + assert output.shape == torch.Size([2, 2, 256, 38, 17]) + + # custom settings + # add additional residual connection for the first four gcns + stage_cfgs = {'gcn_with_res': [True] * 4 + [False] * 6} + model = STGCN( + graph_cfg=dict(layout='coco', mode=mode), + num_stages=10, + stage_cfgs=stage_cfgs) + model.init_weights() + output = model(inputs) + assert output.shape == torch.Size([2, 2, 256, 38, 17]) diff --git a/tests/models/heads/test_gcn_head.py b/tests/models/heads/test_gcn_head.py new file mode 100644 index 0000000000..8204a9a43f --- /dev/null +++ b/tests/models/heads/test_gcn_head.py @@ -0,0 +1,27 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +import torch + +from mmaction.models import GCNHead + + +def test_gcn_head(): + """Test GCNHead.""" + with pytest.raises(AssertionError): + GCNHead(4, 5)(torch.rand((1, 2, 6, 75, 17))) + + gcn_head = GCNHead(num_classes=60, in_channels=256) + gcn_head.init_weights() + feat = torch.rand(1, 2, 256, 75, 25) + cls_scores = gcn_head(feat) + assert gcn_head.num_classes == 60 + assert gcn_head.in_channels == 256 + assert cls_scores.shape == torch.Size([1, 60]) + + gcn_head = GCNHead(num_classes=60, in_channels=256, dropout=0.1) + gcn_head.init_weights() + feat = torch.rand(1, 2, 256, 75, 25) + cls_scores = gcn_head(feat) + assert gcn_head.num_classes == 60 + assert gcn_head.in_channels == 256 + assert cls_scores.shape == torch.Size([1, 60]) diff --git a/tests/models/heads/test_stgcn_head.py b/tests/models/heads/test_stgcn_head.py deleted file mode 100644 index 583c2d7b98..0000000000 --- a/tests/models/heads/test_stgcn_head.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import pytest -import torch - -from mmaction.models import STGCNHead - - -def test_stgcn_head(): - """Test loss method, layer construction, attributes and forward function in - stgcn head.""" - with pytest.raises(NotImplementedError): - # spatial_type not in ['avg', 'max'] - stgcn_head = STGCNHead( - num_classes=60, in_channels=256, spatial_type='min') - stgcn_head.init_weights() - - # spatial_type='avg' - stgcn_head = STGCNHead(num_classes=60, in_channels=256, spatial_type='avg') - stgcn_head.init_weights() - - assert stgcn_head.num_classes == 60 - assert stgcn_head.in_channels == 256 - - input_shape = (2, 256, 75, 17) - feat = torch.rand(input_shape) - - cls_scores = stgcn_head(feat) - assert cls_scores.shape == torch.Size([1, 60]) - - # spatial_type='max' - stgcn_head = STGCNHead(num_classes=60, in_channels=256, spatial_type='max') - stgcn_head.init_weights() - - assert stgcn_head.num_classes == 60 - assert stgcn_head.in_channels == 256 - - input_shape = (2, 256, 75, 17) - feat = torch.rand(input_shape) - - cls_scores = stgcn_head(feat) - assert cls_scores.shape == torch.Size([1, 60]) diff --git a/tests/models/recognizers/test_recognizer_gcn.py b/tests/models/recognizers/test_recognizer_gcn.py index 36314bc618..dc52de3926 100644 --- a/tests/models/recognizers/test_recognizer_gcn.py +++ b/tests/models/recognizers/test_recognizer_gcn.py @@ -1,57 +1,55 @@ # Copyright (c) OpenMMLab. All rights reserved. +from unittest.mock import MagicMock + import torch from mmaction.registry import MODELS -from mmaction.testing import (generate_recognizer_demo_inputs, - get_skeletongcn_cfg) +from mmaction.structures import ActionDataSample +from mmaction.testing import get_skeletongcn_cfg from mmaction.utils import register_all_modules -def test_recognizer_gcn(): - register_all_modules() - config = get_skeletongcn_cfg( - 'stgcn/stgcn_1xb16-80e_ntu60-xsub-keypoint.py') - """ - TODO - with pytest.raises(TypeError): - # "pretrained" must be a str or None - config.model['backbone']['pretrained'] = ['None'] - recognizer = MODELS.build(config.model) - """ - - config.model['backbone']['pretrained'] = None - recognizer = MODELS.build(config.model) - - input_shape = (1, 3, 300, 17, 2) - demo_inputs = generate_recognizer_demo_inputs(input_shape, 'skeleton') - - skeletons = demo_inputs['imgs'] - gt_labels = demo_inputs['gt_labels'] +def train_test_step(cfg, input_shape): + recognizer = MODELS.build(cfg.model) + num_classes = cfg.model.cls_head.num_classes + data_batch = { + 'inputs': [torch.randn(input_shape)], + 'data_samples': [ActionDataSample().set_gt_labels(2)] + } - losses = recognizer(skeletons, gt_labels) - assert isinstance(losses, torch.Tensor) + # test train_step + optim_wrapper = MagicMock() + loss_vars = recognizer.train_step(data_batch, optim_wrapper) + assert 'loss' in loss_vars + assert 'loss_cls' in loss_vars + optim_wrapper.update_params.assert_called_once() - # Test forward test + # test test_step with torch.no_grad(): - skeleton_list = [skeleton[None, :] for skeleton in skeletons] - for one_skeleton in skeleton_list: - recognizer(one_skeleton, None, return_loss=False) - - # test stgcn without edge importance weighting - config.model['backbone']['edge_importance_weighting'] = False - recognizer = MODELS.build(config.model) - - input_shape = (1, 3, 300, 17, 2) - demo_inputs = generate_recognizer_demo_inputs(input_shape, 'skeleton') + predictions = recognizer.test_step(data_batch) + score = predictions[0].pred_scores.item + assert len(predictions) == 1 + assert score.shape == torch.Size([num_classes]) + assert torch.min(score) >= 0 + assert torch.max(score) <= 1 + + # test when average_clips is None + recognizer.cls_head.average_clips = None + num_clips = 3 + input_shape = (num_clips, *input_shape[1:]) + data_batch['inputs'] = [torch.randn(input_shape)] + with torch.no_grad(): + predictions = recognizer.test_step(data_batch) + score = predictions[0].pred_scores.item + assert len(predictions) == 1 + assert score.shape == torch.Size([num_clips, num_classes]) - skeletons = demo_inputs['imgs'] - gt_labels = demo_inputs['gt_labels'] + return loss_vars, predictions - losses = recognizer(skeletons, gt_labels) - assert isinstance(losses, torch.Tensor) - # Test forward test - with torch.no_grad(): - skeleton_list = [skeleton[None, :] for skeleton in skeletons] - for one_skeleton in skeleton_list: - recognizer(one_skeleton, None, return_loss=False) +def test_stgcn(): + register_all_modules() + config = get_skeletongcn_cfg( + 'stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py') + input_shape = (1, 2, 30, 17, 3) # N M T V C + train_test_step(config, input_shape=input_shape) diff --git a/tools/analysis_tools/report_accuracy.py b/tools/analysis_tools/report_accuracy.py index 3516c4b06e..2008f9fb46 100644 --- a/tools/analysis_tools/report_accuracy.py +++ b/tools/analysis_tools/report_accuracy.py @@ -15,17 +15,13 @@ def parse_args(): '--preds', nargs='+', help='list of predict result', - default=['demo/fuse/rgb.pkl', 'demo/fuse/flow.pkl']) + default=['demo/fuse/joint.pkl', 'demo/fuse/bone.pkl']) parser.add_argument( '--coefficients', nargs='+', type=float, help='coefficients of each score file', default=[1.0, 1.0]) - parser.add_argument( - '--datalist', - help='list of testing data', - default='demo/fuse/data_list.txt') parser.add_argument('--apply-softmax', action='store_true') args = parser.parse_args() return args diff --git a/tools/test.py b/tools/test.py index 17fac09b2f..341bf9f2c8 100644 --- a/tools/test.py +++ b/tools/test.py @@ -80,7 +80,8 @@ def merge_args(cfg, args): 'The dump file must be a pkl file.' dump_metric = dict(type='DumpResults', out_file_path=args.dump) if isinstance(cfg.test_evaluator, (list, tuple)): - cfg.test_evaluator = list(cfg.test_evaluator).append(dump_metric) + cfg.test_evaluator = list(cfg.test_evaluator) + cfg.test_evaluator.append(dump_metric) else: cfg.test_evaluator = [cfg.test_evaluator, dump_metric] From 53b2a43d8b498169375577e53b9bde58be531ce2 Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Fri, 2 Dec 2022 02:40:20 -0500 Subject: [PATCH 28/57] [Enhance] support spatial-temporal detection demo (#2019) --- ...ned-r50_8xb6-8x8x1-cosine-10e_ava22-rgb.py | 2 +- ...pretrained-r50_8xb8-8x8x1-20e_ava21-rgb.py | 4 +- demo/demo_spatiotemporal_det.mp4 | Bin 0 -> 331376 bytes demo/demo_spatiotemporal_det.py | 402 ++++++++++++++++++ docs/en/user_guides/3_inference.md | 59 +++ mmaction/apis/inference.py | 3 +- 6 files changed, 466 insertions(+), 4 deletions(-) create mode 100644 demo/demo_spatiotemporal_det.mp4 create mode 100644 demo/demo_spatiotemporal_det.py diff --git a/configs/detection/ava/slowfast_kinetics400-pretrained-r50_8xb6-8x8x1-cosine-10e_ava22-rgb.py b/configs/detection/ava/slowfast_kinetics400-pretrained-r50_8xb6-8x8x1-cosine-10e_ava22-rgb.py index 3cbf483e57..a7f4c09ed1 100644 --- a/configs/detection/ava/slowfast_kinetics400-pretrained-r50_8xb6-8x8x1-cosine-10e_ava22-rgb.py +++ b/configs/detection/ava/slowfast_kinetics400-pretrained-r50_8xb6-8x8x1-cosine-10e_ava22-rgb.py @@ -1,4 +1,4 @@ -_base_ = ['slowfast_kinetics400-pretrained-r50_8xb16-8x8x1-20e_ava21-rgb.py'] +_base_ = ['slowfast_kinetics400-pretrained-r50_8xb8-8x8x1-20e_ava21-rgb.py'] model = dict( backbone=dict( diff --git a/configs/detection/ava/slowfast_kinetics400-pretrained-r50_8xb8-8x8x1-20e_ava21-rgb.py b/configs/detection/ava/slowfast_kinetics400-pretrained-r50_8xb8-8x8x1-20e_ava21-rgb.py index c1f8b4d6e0..97e0197a6e 100644 --- a/configs/detection/ava/slowfast_kinetics400-pretrained-r50_8xb8-8x8x1-20e_ava21-rgb.py +++ b/configs/detection/ava/slowfast_kinetics400-pretrained-r50_8xb8-8x8x1-20e_ava21-rgb.py @@ -1,11 +1,11 @@ -_base_ = ['slowonly_kinetics400-pretrained-r50_8xb16-4x16x1-20e_ava21-rgb.py'] +_base_ = ['slowfast_kinetics400-pretrained-r50_8xb16-4x16x1-20e_ava21-rgb.py'] model = dict( backbone=dict( resample_rate=4, speed_ratio=4, slow_pathway=dict(fusion_kernel=7), - prtrained=( + pretrained=( 'https://download.openmmlab.com/mmaction/recognition/slowfast/' 'slowfast_r50_8x8x1_256e_kinetics400_rgb/' 'slowfast_r50_8x8x1_256e_kinetics400_rgb_20200716-73547d2b.pth'))) diff --git a/demo/demo_spatiotemporal_det.mp4 b/demo/demo_spatiotemporal_det.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..007e1d7f25ebf6fd31f376e84c1e23ce1fc85949 GIT binary patch literal 331376 zcmX_mQ*@?H&~0qndE<#~JDHdh+qONiZQHhO+qNfmPQL#?=UhD9yQ_9p7y9B^y&4Dz z2w>vmZfEXjYYhYh0`%Yf^D*hW7&2Phu`mJw0YRBK7#jnDLq%B|={x;!svy9>zsoj7 z&$^D+BwCVbRsm}y*EeoVY%Bm;fT68}F@TYk6X3wa$;k*{WM^kIU}gPLNYMQd=w;+Z z#c5dpf~vwlOe15%ABC{3ox7E>i4%Z{fq|KpiGh*xM`-5cWXDBM@9OGG=VESTY-^=& zLucz?O8?(3bY@Q0RzDnDJ128n8%HjHp}v8>Aul7q!PtbC8DL~=U}bA)$;-&az{LR2 zx6!w9cQoc@aAW3TaARa-23Q;Oni;zR9Gwk*5Iexm(f!BtXVi5t;$@^`_%Zq!0M_Pi z#zwmTd1U;t&~?zaF*W98WCIwQIoMk3>;9NB0-PL-t*p!)e-MWohmoPv4=}X1=4JQ^ zLEp&3*2b8ZiIIkp5n!V4=%j1sXlZWuALD-=*xTvanwU5mJMq#o0i4Vnek>e$IRI9+ zwwC&4KMUReCu0OSTA3UE#Pa_H3;-L4|JlRP+*;r1zc|cooQxf;^nXY{asw-82Yq*4 zLtAS*eW#zb;ZKR29Q4g?eq8(z9rXX>m^kQL8$13K%Rtx8{Rf*H@iP5jeItFl|FSU9 zH89tA{BIL;2jl|k8fTkR)+rzb*;^9 zc-a7shQ>C=hR#mBEDZle>7f5#MIDSC&3>#M40Zp1w*ToJ40#P5OaRsfKV|(dtDglg z6EhtH!2Z8ncp2!}exTide*b6IcjIN_{1G@h8Qbx)0L<-vdh~N1ep>kBOW*#d{r@!+ z$PWmJE!#LW5a`dkv9rW*%lF?v7>Wq-*k|Ns{Z#jVObg%xU)=&;AcFVNhz%c6kp|v?e0E3wf#UGO6@{8D5tV*kIrhlQn>QH%KUBiL%*rKbj5M%Nec! z)iyXDM(s2DNW5Cwo4}Mncoi$sj0(J~e297&K|YM7h0!CD*0d7X=ToBf@4kQGI&o7& zV;^Kzx6Hyke9&U;oc721WEf(;Idf?#=?)U#dx-fxo_5QtG0J&>^mcz20Ps7%*Bj<% zB(K1zKjp2GXYC#=fAJ+~=Vqi#F<`cgsi0?N?uPAE&Md&NwNjm;KYUZew0Uwi5GxT% zq~j+W@g!dk>J6{ZdB)#?o|{1oEPS{8+Gh+_MOG2e_scBu8*PF{YY`PJ?iyMc$i!~A zl#Np;Di{_9nLwKdvFPb$v}$zp1Sw)zf}$sbboY)^3}jg%0nLF&CUeL5p3Wow*1PQK zI|i#~>+ncRLd?UB`o%$OoSs`)Wwr5!Vx$yfA`=2JSGxuy?4r%IN7<-@?}c6-ROl(; zTRbddvZvouO@4wyM<0F$u2Si1nt-a}eY8vGP%beTkGE!1&VP94i8o(qQAx^9VELNJ zwtTOSIE2U#sA^|u{NR1&G98yqbIFrk?o_j&v|NvYFqL)^YJ ziKzJ^l1VFlqsd|iBlY-&_K~vTz5_fJ3cZgi*hYP(m-l#Ut|F`3CW)D8WWDtxgtPWT@Mmo zj^BhS1uxMVMe#r=t4F!?!5E~2K%7r828-~U8b9mU2gM`QCjU#K0sa=o8nr2LJ31~B z2FKd6xS+n-(UKJHY~j^AXcVw2o0nP4+i9se2*NN@I7qdMhz1YC->n$j^3{t|tT*V2 zT@t5(eQ7Hno6P*t=WHK`BRZknEY8`YU(_Um7o%FCwx)U5F^<&hz~o zU6%>B2wII%>~-10<6Vzz+$>g)iEl9uF*o&4VJ#xWym)eewNOY2FGmov^kTH#w_J77 z`7WtlD(LyoT=S5E^w?O_vK8P%pi=Lmsg;`uT)0R5E2H88J_w3uo;4q8cJ^NU8Qy=w zaPcD&2)CF;7e<+7Ac&ghmhrrr@LGn%ButwLyD!XK0 z!(ulx=oVJ!UqPaw5sk**&{TEnQl1SbwRUmhD^$1+jcOO1)cnh20dOamN=v7@v-W-3 zKJIS;Nu5(WuVM&qY@cSF2Ey7R$6DuHCi8&|ihpgl{`m>~zHf_yD!}Apv!qQ}x<;m$ z0ix|HkR~rK*p4NjBwrlqE0Knu7x|D-dB4-A#>dcN`R6t)cyWimbf+im@>Kt`_gnT?+!*?$ z@8FA@ef1=TVeqo_&Gt8{h-Y_h#X_mk1j=x$em4+fWgHSHF-BgTEM)8O z94OzNSNTLda3F<7^ouH5jqPxU$a=$qv!9wv4R3c#j_GH-?_+|nFZ|eqK}@%K4RDB} zqq>cuCc%G{UkxEP@jH_VKB8Xj~;ITj$gIjrIfll2R^h=j~G-RWdrEx5!;c3nlZ zl=4$9th&B2(td8;BGiuiyaO@fPwyjO`6{!w|7)r`8Iqo*P-_=Hj!0SHhf7%^Jz4@F z#Qgr&8{QFA7tZo*aDRdw6`ICIv|s*I^}wW6KF1!-&{+f*-)`a5Z(9EPH#T4|5rF)- zN%{gbL!RMp9BBGp)O|#v`=&!-e9)d+ z_F6a9xJ6$gID~yR#~I6&kp4}*X%nC#@9>X#Fm)7Or^*5J)y8l z_HYk4UcSyjKJMb7&3rWLkzZ1|D3R8__?pX#F(&m63EyXjts@;O<&`_IxJM1FQ41n;}O2ZJF7+k%|ek zjb5CIpx}WnMg=qJpMfxn}D@u9>`* zjFf3k_qAF`j?ly-N6IZlX;7hceDT!~izD_{O>mbP(MBAqMOY~;eMx?oTH7Nf2L*7< z8=%gx!NxBUXK@qUOkj3dm;-))p+FkTeDsJ}B|)X{3@o(+BtL|vD)1I@Ys>I?A0z8I zm$*8hD>1}qAXd~63}p6h28pJZG;kHzgiGMioFOS=2(y&fcnFwmw}}+RMowVEt zowg+FH!;;yRW|it%7g{IiN!FxC}Z{gZM}7D&&4O#DE|+GGVx%tytH&L6Ezhde7&Lu80LW zu-VnrTyYG3DB1FOLn_DG0{q-e$JHp0TYfdmMhrINqxQKgk$(T$ZsPjqjP|<&YbBVt zxt==i`PA1H7s&Ij^nGBBhtUB+prCL{9|LH9&I?yjOrzT01ip-f`RaX*h&XF`T`9c1 zZx|XFO_WkQ7~&{~_2XNW-XfLF5rX%#1ao(soDNB-5ogZ^Bi{It>dVHT8*=EM0c+qX z81u&RHuieSe&pUP0Y9jqC_Uv@C6znW+RYyB?nLiwhj$x(>MUAHB?Q{9pvtM|`Y8yH z=NPTMRtu=1&w?X&uXS=cR4nVV*{&Gim+CG`b>=K!M}8oN;g7XPoM|M1-rgi5+VedgW>x6_{U?u`gDWY>N$$&x56ZMW{CntU>`W zk8Bt-qVID&^0jJ`wjCh?J)yyRkS;Xr`(iF9N|vk1pN9QNz*NrAMrZSTyrJ6?ej>W?K93RNf?tG1)q=uH_(~yQDASk*wh0rr?u1g zfu;{s&e#{Nne;{_(Ouk_H(2!?vi{I$+fjZckC`36a8yii_Ta15W+&# zpA^e{&~}%4t?M1Q!7&q*zvxn<{+ASZxy$};X@u*--lgI#4-Yx)!J3Ibd#(?c@)Vxq zxBz7-IwIB)f3(5EGbR2QvdzCsagZAY55hIfd2^uo?pOzA|KMuuO)+um>8zn1wvG3M z&X~k?Y6Z=$!HFh%M1wQ_813fZBAH3WT;{ilLI~cjN$jxlMeL@=jO!i&A|TzpJLCOW ze;~A?8=%_OYYp8Irl#ba89!Ln4cB3*)@}TWcl2VZC=BV;4!e?G&IXND!nvM-HXG`@ zQ??u%i*i0)5N+eZy)**9puQ_g_c8YityAdg%3E$gQu$-9U0Po_4VbHNay%;v`Fx^r zCFl`v#tHb6MxWo!Qd~+Mz&Zr+{11)NH$418DFo(piN@CQ@z!ivG5MG0i%YoS<~1*3 za%NT`?01yb=xQ#x%&L?P;{BoFrCflEFs}{$bvnbc%lU9&nHIhmvCsQx)9El>HI=Ty@mt;;p$#p1dsi zHN{O)e;Bu_*7fhA@`pWcicpZTWmb?(U73%dv#ZRs8ph~X)eIXid~qT+>t)JA)h{yQ zKSnTb<0*tn$zE^b6B7a5s;|>`m)RKJ#FSU2MifVkv_amKXO1t(lgH|zhz=`J%}i#F z6C1~<3R46rx4g`m%;oAE-)mFLqa(aWCM=#Tr-#FZ_{HqFZ$j6@Hkzs6LZJO#6%7wC z(r)4-DgLEvbP}~*G53Fz(icRzsOtP5GzG?__&*>sH1pVwsKUytV{Xfem>L)m5lk_; zZ8%6DwAFakZxhKKFGHMxJljDL5w!Gw74?+!CKiBgS2@gqLY}7adCZ+np&u0w`LvX_ z1KmKMx)<+4o61cbWgr*K)v_dsb0Np(Vx0v$>Zu`|@sy8#CfjCE5FLxoT=dI70e{Gw|8p2mj#gdz>Mxrhlg&-GH-LVEjPR{AX`4c*z(WyZT%ctvm9EOe(D!I8ab ze2}i>Ml$-(`TYMsI-K;b0X?oUcN1x%=91Fnl zl$d@G?;z`km3W_H2hn5-p=06pNfxkQGJdrS^q%&aUBUI~(uKzK<T5UT*-ieq|W&s73qknQ?-i!H4*v!fIrPRdZkq45shfx5+<=4i`vBP;5PJ z4gr@<@IZf8Vbd^2g0#x9?_(k0kyzskY6Zf_TwTV1y=QxnYpBQC@76*FaBUj-kSzin z{W4`_w??FGPZC7^V%3I3heKMCQ?U8pIc_L&X5|giBg4$z@k>t^X|5WUYGszsqw$^S zY=eE7d!Y*Zn|zXoY9JUVo*RsI&g(T2bM_3NOhvM$1mN8G7;^k&5Vc!Zx>V3-;w$S( zA&j?m^sEw|@7a=C%`sFK&mbEzu5)unfWB&1Y_ST>42L^UwW6OGP6@`DRJH${o}B0u zKOq}epc%MA?iPHh>_Z8k-kS{Tf!LzhkUjK%jg&) zt-RBR@&v`iGsguZzj&l$~tFrl;U7CH#0F>LDomA>p>bPoMy@8S+VO*iR{4>&a9 z5T9?P=teUht9=UpzA!k=BLZ zG!F%r>l6R#^HsNmbq&ENg-!vwdP+V`lL-#XP}p1u6r~D#MLIX$`0~);RK2!9=!g+i zV;BnU)!yUpdo?p#RqTd+{yMN|DqE=)>@eJ|u%@ojd4x#4n3J^9;@N2*C`jMos~)dl zDrx+uP1;&r&}$<`f`WP3qf`p0#_;E-5mS;)*}(OXdH1j2{5_4c~-3&`*XyOwG3!5MyWW<;pZ!QXillm0xu8 zwaU3BcHfh`NP`7@1VXd7tJR3O9%x%q>v!WESq^?b2&YPfK})o*rW{CIPgioF0WAmR zpCw#GKITeXy=$-RqYG|GtDJwb9*_^Pw{m>oL9K~!G_2a)Y|x+9!AOmKK}d=E`x-x7 zc`~p|Y(y~Q7Ed`&eZ1kkqDYNj-YL7z@x|gGhd=8>6dU8{tMgj!M+NM@W-8dQhroMX zLP)0l8^uEI-_!>}Q{cE^MKWH?oe5Tv2|!RDe1&NeJ`~r505y4qGFodEW=*@5{&QY5 z*>K3P%6#n;;iW~gj_Zwgo|*ftNNutV6NIf+% zZpbmN9=tk5t*`M)bw%nNM=2!Kt4*IxR2B?vZH5hkI=K5MSCA{aelyUK6n}=s&Q3Sr zN2nXG5>0;5S*<|K(en<~NT@MSR0MsErvUF2%uDMaoiMLQI-6YgD7%!JT$Lpik2Z9# zyM)zi=*XTAUwk&Ou(dH{wQhX7Q84_^4auz`@r>a;O`MEf#h>_egs=x{66|P|qvnt3 zjcN{=#UI{^FR7=vRbT+!q z>EhPo7)J4<#r)t@=sq;)&(7%yK!N9z_&ib0+4H%SAt^6!Q@-YPxbx#-aMjka8Gl53 z8tP#r_<5n(gqu)S_T|h>)K@ra2CoxZjY&|~{tLcXLDwnwcf&LGsvMlZYWwXJP29W+ z9G)m*_YLiaHeL7b5Rp5|a!4W`Y~ZE{dsZ<)F;MX4MJ)*y>IrlL&VXfSK0UK1k)VO; z%5OcENX_k9wDLpUwk9grR2RJXqOCKW3~u^Zv5_?6?OQKj*asnFvYBviL5k=zNc2u_nVOnJAa#aPU!J=GWC2Lr2{g3l}mK+1@n#LlBgFe0n4-Xx%Y3{m3MT{S?|cxp~P8%=Me^deV1u&_M-Zs|AOf&oK?LA6aRvyO6aL_x_AlYN`*J z^fmK&2UB!*hG;+o!g^AWEmE9j@6&;mw^rKxOT!6xsm&^*DUK0jS-t4g(^cx#5F)lIEKs0ysdmvzmQcm~2FN*TA4P6@#?g!p#D(R({Wt@o?> zKWAu;%w4IM`JitLwM6%jO&Bnqm&FESMIu6x()jyeB?w zTsziPjZm@4=vrb0aC4tecA7%It;9CG`K<74MNJA5JiTd$IwO_Zb=9oA^B1GXy)>^5 zd4yb9ZAO^uSp=H5FhzPk3_IL)0(M5x5R*`H&B0FZ2zjn6y}P}VQM#teXf;}Zo?(*k z_(jx)8{a&nSWoE${rP9s<^ZI`tJ8Xs%EB2u~1mm<(;Qef!- zlH}&pN>jUqZ8AkvG0U98Ch$+RL=fR_d>fy<4`;o=TX9jqMmSb@-W%%3OG4av9J0eE z)+JE#R?*)(!r(c5LA7qCI7@BB!%;CsB0)CM z!-7D%o!D;*4_|f-HP-*GXhAR6tERYqA(5$4=LNw-Uoo)lV3Rjapkn2S1PbHd6-j)* zakroqoNkk~7$o;42iiRw(NUV@v-^aIjW zNU5@9$?h7lwMX8h!JHvR-K$dHsIuMgu%q!)=z zV>1Tchp|QR2i?Nyp8VrWcCTtu%v!S<543NtyI0TJUuZYssVbMO$^e7MgF%q-L??y; z)}DI3^1QpwO<-5p&M-Y9F-NEPA2)I!mQ-N}TOHX#HqPVad@v>P z2~Ul>T2%TnoB6h-Et7b(dYum!5vQl*w1P2baL&I$h*!DuC5XhP89v zM(pf`wNf#W5H9$HvPttcpJ_*zybj_nECrAV;CDe+e9r*zZpe32erzO0_w$?p zqB2RP?TZ66YXvi=0UctHd#Jda4-K(Z3!@+yNEg@{*??v znx;nBGSv{J!)f&ogaglf<@XLdu+M7msGN zgMJ0;6N_u}=R;KP8b!1kW0si~`=$XKc!O^Z*zzM=s^(3&HJY3n3l3=FuGXjt?R{Zm zJs~_(WP7JA&0Uw2Nc(67=g)uaBB;5U^}OapU&Jz(d!)b{&26yf29zcri4T}B)5oFw z%WB~i40gr)D_+#aih?g6bHjVT2+jP)yL>}aP0y65n+UR_RP=*xPZuh{sdMN=#C3Sv zGi0u&zv4miHaa$Kx0#$0qvCuky<)Zz78X!l2cLPd%H-3uNk8bq*_V24y64quRz(7r zSiCXvJ?5<rrykxX@4oZ# zJU?d0G<8oOc?$CBw{4UO${cjd7C*!=2MzDQRJ+04;-v21K}QR5#d(N&Gd_q_fC?8( z|2WQmKr?>EnRxekt{2Uoo9D1(T))&ctrPLWiv+~xaSJ4qS0De$%xqOSv`Q*;R8!Us zk>gNU`vy4y1zyZJeA) zISjrsLzfFD^={|lnwosX4`jW@-!mU@(9J%WAxmR4JT|qorv(RBXWGVif9YWHtv4hH z)+PQ&ZRO`oP_Cn4MA(XMEfa7(NK zA`i_k4b5XA9vQt@xOq^^xSe)29e!~wWXKrT9dT#?>81LjnY?6LWwE|og|mHr_B?`X zx$Y7A>Df*)b*klMerC@1AiXqyeJT z8lTy#aaVithBc*)W4wMFr@XO|*5>MUJEHP)dy)wxHVIur#&-;x84zR;g>QLsY@5(f z;lY!5?`R8YU5>csX2?_)0{^#%lLiv?jvpP$OG*cP>j z@FB_;|VRv_fQS=LgcXhgj{;e6yWHgx4!6h zd8QWI&;EO{D*I%k5TCwhpwO)Eb3rBwn|gt|pXan8ho_S# z9b)b>IR>>17?Yf3Wxo6%on+n`LC^nSMm)q)vtQq_BB{&-MLd;vAjti}(atmDII}o1 zL7(VR7*X3uAXUzz7R}}Fn{pJMFjbnnO7bohh-C!t5&YbiDrsS9nK*eWm4HJ8Y8OFZ zUijNWS@R-@(fptGiUwO@Sw+@T(< zBp;TM9!jH3=~3cajsM>6+Z{P$bk4TU19fK1^b&(S5W)A*+?@K+efaH% zc2>JS*;3(2Kk>{%Y7(C1<3*Xj*OOK}$MntcU6aSQKDmBD(Unq(p#AyChrIF|;Z{oQf?Gvr|RA|p5zn6!9B#YI@L z&BKEfpap_@Ye6KuOqD7HpiGNx$h4(mLi{sJ`$Unb%U20>PXxR0jHdx#ii zP`}-f8eWVc%*^*Ozxf6)rlfX&mW~vl);JK#x59|-iq@e`?7k$RW&^QlW z<2C;~?O6gi$za*6ut!?Oc%ymW97#il1UdhPT$pq_=XJK*0A@@T1SpQx{j$@TF+zTC zE|zMd+qEapX+((XMR7h!ix8!V%InS=GOvL!SCsivaOA^M=d`i1*e{9zg=!9=vfi8w z`$CA6`n3&UiECQmC%lBQYdI-qaLjs9o~dRTU;$#-m-g>$u`9T21?58S_P0`wwMns( zmk}EGxMAG4CKj*D>oY6555^Q{61zU661f0rm_!IgS7zerF-^)R@EeLIOf&(l zsXJY)D}e;f_Lr$iCb%G1;F37zx-2iTaOuqX~xF_eUI*G1>08*XrIr`1WK4F z2#&Vv$6vJB61qOxqA`YTb}u8=)3XmPPEXx*i{MRMh{Xxrfm=FHwogYw+Bk5H=1{Iw zPSC1SGtmVs+2wwbhZ^QK;yO0N(+=Y+50NJGbH`JoqxfF*bjbZ;}I zPCAPrxs>a+SJTZ7*pm4SkJxde(1mHFfrl}Qy?w!%=u4;V`9wv5C-L*RfI=zS4KKIK z%ijzM9ABLHltf^|sjBc5I;L0Ty1Ms(2Q#skMAO=p&AF1Q`F=JtKIh8Ot1U#ho zkcSp9lTBfk{k^LM?v^TZXPw?_NOj_}4di>AvTg_Wq4fb0BUf(dBZV z%G(RY_BW78i5+#d02qilsXNm!=#6_M+|$o7gsa(do~D9*5QuQzrn+bAHS4&%uLvq5Ic8CzM_mWghtTi)e>P@Xr@ zK5UuGm?Qn&?apZ{SK?old&Ycu$Hz{3WJ$iT47du;x_|SB)xROpfZ@T5N1-DMaOhCF zl4;jRG|owY`5KAWGZ8o}A>n6|;;gxSw^8d3ws4!F~VK5~PM zR$~=sLe#S*WyCa%7HhFPn=VLIOyFoMx;zfOYo`}G|F5(5iXmkzq;1V40&m)snrKku z&Fm&L?FzjsZ155(fX0!?LF1HFz7AL58Jmob$bg3=_=N4B4Uoo7wK^@R(-MJ^cV_;S zeZ==7u)F&2=GJ#)SY*}x?4VJb#Ft>1fpEw=k@(GiZi6$eU*x^h6kdn9ej(@kQ6}?2 zeropr@Y`UITSzOr$~Iu4Thx(u6WAv!228m5?T=* z%($a^0ZrwD1Ad+e0H4NOb z7By?u8R|mQ@cmdqzoC)oG9B_A4@ILKP#O-4<~eQ#g`7#AI(7k&m{o*~Y}>G?KF9ni zKqY@95RfpgZs@pB)i1cRECP_qDpGm;0;W(Sx7gp}b4OS-Do1%~pz_3f$XeRLj{UNN zNGJ4f{M09y^Grk{uh3S5kce~)x(cwU-N)zSEPW4fr^8P~Lti{xIncXOQg}K#dQf6z zuOII-XanXFI#7e|=6ty1nd{+cMu;qW&#YHeF;<&>tnyc8jv*e7gI0 zd)F|gR|7j$jY9jDOgX{_=s+#__i4*M_HnEwYnCO**oR;A&y8m_TeyuNvZkYOm#|17 z*_so#QU7yufx29KvLYo?4q6x-o5e2-+`KBCQ>rI3?O@q3&e50M1;SrXY}S z_l>i0ehW%@L*Vs+xB2}(S*hJ(B_KOfGw=T3%?W;ysp4_{(G)V+b_WI3HE^y)z;}y! zyCVdZCsvXDUfJONGD=;&K!H#6aOqUib*_lN8(8{6?k|)`bk}~0G+xXkH@@7Sel|>`Fbjw3( zUgaV(=^gl07E>!_|BiXXw%88FM}iB}BOp4Xb-J}W47^D2bdcIV+>@JTx|0H-PfCj6 z@+V$JN7ApK=1xf@&U+>kam!fk@lB$*UCE)VBP-wl z8=FcMcRWH>5}oh3VjzGxl=pft1Rbc%kptswj3SKV=1oq;iQ$VuX_I}O%!YaR9)x`> zw&}Rvr^*}eG@atEc)MvFQC#~8g&@?z&zO4~%}8nUGCxx3ALP1EE@|u?<(HXC&W#^y zn-8NM8oThSr|_fUcFFHW#ikNR|^ZQl=|`_afd-a#=m|y+D_TdhlM(=0&f(l4KjW`|A zvi8+U3~>y;mpL9sxt8Ewd70kj_Z|zY?G5njXhC}C_+k~-C2o_tEft?@x!lbmon28B zTg)dmtWBasjLrbc^~2o_IG4AZFJ_M0a9UTF-kOKLducf16@_Q*NhIaPRh)SDc>c?; z{^zzn#7f#E&sZ_SuD=`0S*WF3)tVW@)-e{(&uVq#_8t36rR$dqh#QI>xki;g5O@z1X|*y97GV z-Sy1`GdSTlXnxc7n>jQ!t+1)g+zK&N1odefL6wC;NBu@_8rZj)_Q9QHZG5eKJ0|YT zzNYL-UX4YPuNL%(+Jjt~6J-w64#~m;m8?Vr%P*G60Txa8WzlL`Qm2M81IAqTJw0@m z;-G&iQ!n?La4Iu`<|&yzzijJPto{~ocy@tbzfgAl`xRMf1i!mlJ7~xHmys3h;ei6B zO)RksF)J8@>+J;5y@k$3q)|$4o3q#7)yQ1{@oUFbXZApRyj=p<(SUj6f-i1J$RR?@ z60_2UgSxW7M1x5CNsK%HS6R?jxO6sw5sp-P=!v2MR+MOAi%F%<-NyM^2)BgNxblQ? zp|5(NE~Qg7zuQSqgxNH6z@e%C12pZbkkS1!S?^7aNWiwWUXlR)p}BTCG33t;++(pv zxy=kxZ~2&F6SP+#gi0|c|DTSpB07@^My*J3k)J0TJeVq{Nfravw{9CQfK^szcC8u- zCg;o;^OOJ=Mg)wH={NC+ow2Aq|5U1TVSEE;BOZ~<1f(V5yWMzA!LTd_tg+DF+d+Dl zosKy&ittL^&u2KU(}CTukx}_Eqj^0}{sFf^N~TIBTJynVkNU=%@v2H+xl7or=5Y*6 zNj|}_)MsVG@t^1Jp34tPEv&LS>*3$87V9= zr}J$lJf?r*-vc>XK;gAYG{^M!UeRUkMI!Fww5dpR>xAmERv=|v{v}I{D}WLpdY|7M zxqdgV5eo$MJt~I%(w9qTWmGk+G@^?~vhDtcof{cC2bhgEqfH76-T7%)E8L&f2|z6_ z%wcf$f61AOU5{2mq`X&w@xhn4%o?9CmhtLLpFq*Ltf&(uBuUFXWRM`izMQ>x*?i76 z%*uV~*Uz6tYRu(A9F+_+6%BhcA$I`-3^4x3`x#y z9rX->epoE?MA@8-jjz^?u(0_6(L#NPs;ziSE47D8z@2EEi=fOp`?kTOEqAl(IAIjz zF~1kxAOG75>clluKS>bI>O1m15rW8`5C+Aex4w)KBRR@qWi%xfk~fxB*?N0JUET#J zM~V~yE&OVL`L?444YU_hC`oK|<+8KWbBQJw73(3%reT~vW4ZpM!rQX#?3++skuDK` zHu8oTwB{}nF{Ys$m~~oh17;J2?tJ(L#*h)uNj{^jNM=+ieLIOUf!3O2SA>&i!!8wU zx-RzHhP)4NDM}cdeg3BJk>_7qbY(p%RjgA!mSnf5aIhzwTxDT-oHm?c?2KDWc|q0~ zMtcQXRWe9Bf#b7oFd=k`HN-$NU-iwP9lJ+ZH<&K9byptTsHr12Oyq-2 zvyD#8hGO&Yfpi7zo_%KYfm304qJMd2QO%G^3WuX!xO>SHW$R_n9S`MDij--4kY!S_mbEFuW`%p&gjv3Jfq-2Y zNmX|6fX;7>^-z^t;*U#pOP1sJ?Offu?P1sz0wq-wzei+4;Z$6K<}jk0D-MpPJtfAv z9zRBqF24oxaSZK54StS{?RZ*n6y#LFEdFU6H{5T!p{|voYz^4iQK1n|%s6bPeXO`` znLudIegRZ62^DR4wGw7lGm~qW5}-{!?pUGPn>;|rr$#971PQ*`PFjGHFK9NozkQMz z3R<1 zOzO^lA1Dn@e=rK-pqBDwn}$E3&P7~k)JQ+0#a&s4HGG6)d%}UdN)7pD?9tbl1A&B? zcDC%TH+yt(?k&P-5oVpNT*!nMQzd99muDOs5@kTJB7{sCI!%3kwpF#yrUxsk?VC_U!%tD9Y;~4)}mD5CByw3`Rh5QOJvxw zZgcP5GIqzLde*9=kh?DmY<39y>2LAzPQK4SS{4SLokE1wf6oHTb(6p- ztaWjLhet{ugH$Gb^pVI-0Rd5z3#OA=`5u!9>dI=p- z>|`NqxdjjJaAb8?Yk_H)$4h%+qS?(8ttLXcrueASDUhwxno5t+kYwz}4m5g_sCiwK z+4E1-A?N{3E3@7wPsmSAX;GaMpQRk^7f1QJLrkz#s*wX@1gx7`!y=`|=!6VOx@2c| z2Fbw)f;J5%=o*x7y|&h`3+#a)^mZ3`TlwF5QXC{l`ff63;sbL>P;pkV7dylgsKeLK z;FxtR#-kc2R-vhDkDPr4Y5l1Ml}G_uO|%;r-D^t(%p57W6^rN+4w}c72d9(M@mqD@ zCPI_BPZ^n+4%-Ljndkv92Re>c?$VUB&a1lma_L}m4ydS2+eN}lT9T}VYNethR=1z+ zR#m{iUC&aK|6n>N8qISI!=#)8Iq;v>-0@RiekaUR>`#w7O{r;eof+ zMqOjN(cf_m``67JRc3l8lN2u(m>-JVl^WpBtgC3!U>0Id6s+cz`D^7Fg;(I+PJ$IM z&eRV5Og)rWO6>*%2J$ID@CBcP4Fx`uQ%(!M#+EdzNQgtY4!&N%P7e&e5j@JT>@ z*Dh20qIrJxsuxJ}`r5&94A%!T5Qc7#%r8cpJ+F>hc9}&^2dE-`&JFdXIg5*6o>q6d z({L8$82%T5^QXp`^Za_fl?Jp66gOjMx*j5FXEEu1EMJx+6eEvYq`#Oe89;3BVR^=} zj5_F|XP!+S=Q3m-F@$IJAG@eDrRgqUf&q-{MHMz1$o~9DAQoU6zRf3C7`GT`i4ec_ zC2jGWLI%!no_(JNJ9a?af0sdqvmTh+lr22SGVe`pmdTn1-ve+HGB))4>pc%BTir=J0dLW(PuV__`z63 z>t?}ctCY|jh{7XmLfRv>+Hf1KKh#TW1qPXR;-Ww{RKIr|hJ`GkB`M+bpk`f4cjfkR(7Vj-_%9xOfCGw;7T_29t!X<#d_ZYVru7nWGqfD8K`k_OvYA7W!fUZGQUV~x6JtNX1v zLx1?O#_m+}Z3CbmgaV_IEB&g*2pibxVXKbXx=RX?u~9DTSIoO*2eTQ7+R%!SkDplb z=5nQ?r`Jvk$5feC?b1z!06HS8HCG#8$_j5gu?=lnrIbwb{5@RUj>l&C!S_}qBqIbk z;>TEgo9y$}=EqFb|5Q{kMv8d%#19uhnuKwRvJ|00UbMHe{Rw7j9g$P};8pHyt=@1r z5My<`et(d*Yq3LlU&d$R@|%Lp_L~@jv zZJ?Lz<$(m2pVBi!nUZJ6EZ72_c)?2sW1#XsPQd(0pfS$wI?{}4hFgf+Wl@eenPd3S zA>ynLZLoF)J=o}4P~m^Q#+l~02F+LiDGw+H&k#U=X?DSxv{HSCrtNX zZ*bb%LBZwyDFm~Hov@Vgu#eCtFDU>1J2ThY(S25Nn%*=|M-h|7KWj(2P_K1X%&#z+ z@$)gy)5bWZ#jQs(YDI0lBJtSvp??c3(WaF+-O%A%OiA^86ZYR@Fv_t2kz!*0-3sT< z7tZe+8*%CUt1hWIfp|*JrLztx@o#@yXeR65R@k#BRueJN@tRGt+>@|#zO?%CUTu)v zdSLdB{Z@h$X#o{qi`U`)mL-B2EG|eqzON^U05%3#+@Z3472RkGR&Kw%$Hgh4tjnVg zcd?=wb8voY4s6wN0n%OJsYO`QfB2H@WEU*EN9J+2{lF4Ofo&G)jMGeRO(s|&Fbo*_SAc6&hz!w+qDZ-yswH3oBz{|khOiFlJ$fa3(yhPLJeM;LO%m$;e z)CUTC%v9^j636v7 zeTm|OoPo?yG?Sb7R1wBIS*xde^|YgW!Vepkh`|T?A__DqEKCYPB@h4Wm1YpHtLg4$ z^BPi?-Qp(joPYUzD_QMo7EgQu=1nsY)^)z#?H^~5C#R*kR)Cd6>wawrUyClaV&vo= zyI!@z1^>&t$zg?_H(UVZgq$|KXgnW|k5Q%r;--fl(1!oZdG?Y1J2oY-p_~o$5Y12P zl6sO^M!9{y9>zHPZsL6A0?(WolAJpztCNP{uNW&!wvxbfd&KMp=7zS_s&?(@45-5! zioed@208M_vDDLkDW?hHj8n2l@O2(kK9bMg$9X6doP`Or-Iz@zc|8jZr6>#YV9P@f z8Bc3v8(?D|G{drp6Gf}OBcUf(E~?d|lre==q-!OQ2)~l?;2#Yho8Y~W#qm?tYo50Y=P|S8zwXMz$vh%zVZ^oc8aPkPJBdMX! zk};DdihNd^(V=K!jDAYfils0~KT>@{G?pf|X|D=pkB?8%f{uFkNX^tITI^WyjK$85 z6^RN<_rAi{VzE?0-K%%azMq1J<^|;8UXIIzY+Y#g=iJGT*=c)W^{z7di@$re?c>uX z{JXJ0HNiEW>zQt#9TgeuA?+D)Sx6kD)0FKu44(wv_j{uTI};}7nDR6W&apVxa|Om5 z+(j>f!Ur%%P*~snLFE_aKc{7H0UkvTS!sk;B~gKlihLJ7&X*1IjaU@Tr;?NUzZcACXVmhX)U2q2AHr2W!?& z_?ikE0p25CMk-qVk&^L?Tip&eHt%_jz%#~8^)0S1lCMyi3L-UE zl9^HB0v`5Quuj_ds9Y#T_+jR=mTJ=H^X3U8&R^i8JKM>vE8q|;D>+wqp89C&=YPrf zQa*D~WH&fN2fhpf&r(OkXfScS)zx%PJ!ph|evwFb`VcOizqfv>_9Y~_ZsN%K{MuzX zd20TLMVMDHknre?%y))l2G%-AbsAI(+zJIzCsH(s76_hRnsmEO`y~wsas=W@=JUnB z4cvTUHtl`JwAK&D1bIAwu1nWZ)47=kHmi-(wOB3fa%t$NTiUtyzBfrShMtq4Lrw?806AE!c zm3b2l2r}r!C?!hh8J5;_)_%*f*!H#oyQKoq_PkaM>EybrkF+3I|(iHk%fSra0Q9h_OtU z-T{Odo&W}P5By`e$rplL_nocQ(S>kSdS0i}UGjW>I6&_8Jmy~Sq&l?*6dH9{Gu3G5 z=D%MRl&Y<{F4(STzAKO62u7VMup7KxZs8ZBTE8G_@+u58!<+!ZS}wxSwl61zW?^hJ zDTISEj5)%O)J*`3Z`?V0eS7eM#My0f_k79;)Eb3(Tkrn54__6eE8*MyA+=A;&E@@_q#+j%0a=37ESf6K=#>jMsV3z5iFIBxX))7Z>YN=r?(J;y^JjtJ?a&2SRw!#GdlB5Jd;$2(yT9~P%2XN?I75;bal z6{I=tQ<7~;Xi4XK?I*ri9VT8f95)P~3U&A*PFW{0$q4?HaUJpbx5jHo^LGVMaOZ#T z8y{=IArPrr;3k46PzWrvM|DJ zeZC0=m2B$a`f|NdzwNPU-^3G6z}#t(xOH&&WTayloR^OG0@lOpSNCQtVARy~)Y7Ez zyF9X(o^$NlPe z{jpKa!H!YH7rVTTBlrL4h3JX1x5va{j0LJKsB%t53Uqa}6m1ZY?K1HKD15qN0^E6A zcvqn-5#`>JS_hbaS+JwZ)st-@*5QV0uUw*G$4cRI_c3y9cHpO`yd)GxQ}@10R9{h{ zyvLXR&kA+EOrV?hb996NhRGe={>}g_^WqUbw&yxl4nSS-#Ei(lY$a9Hg^zh3^aAsu zsI}dBK)|+N7etkikSKcVI5Nmj{98@yA@ljB)UbrIc$Sg6P_jfY7lS#U5QA)JI25$p zv%sX(H0lP)iU+Bfr#VaoEBu#N4L!Wg)tzm`ERXMmR${V74h`>(7*`ez*J@`ymo274 zXXY_waVYxti3y%ywmLo??riE!&uA~(-iP6=b9>9>qeJaCrUU051SZ7$$L#6n`^*pE z5LJ#3ZhUu%ebP$}f89lAirfwC$G38bquJzHU{v3*=0Lxu7WkxM0+Sz*9tnf?azS5E z!{a@tcMdxxpTHLYD*3+k8EK#$$sEDbb4f|Nr;n+tYV|6vvNtu)!vP~Wh^`4@^-iq3 z!5r{*>UnWc6tT6D?oONk-*l_@HlRj2(Gt}b#503-ej^uK25K<)%_ZQ;7YrD2%x7q< zW+e%hP{sc@H^R~SE$OEeBss0lm9Z39U+so5NwTq&?S!YcT5$BMvjv_d%uzl#F&Q)sXIT8UWR30szRMK@VI39+RA}}s}0dKa!>t)Fd zG%|Zh3#VMtpm^s7f{aJppZBS~Y@-fv38I zG75zJX0=rzZrhv(pc?PCC3D{}xxQbz54;241<_;g>}l9@lP!Vo7WaR17pusXN^Ob1 zhMC68MX{|)y!B6~e5PnUV{ys6MEcY69fF#4TUU)&@Bn5aj}HcTN%8ixMyvq}Yii{* z+RcdWKjIa~M#fLL_%ij~X%HjYqOHejk(L9XM%N|m_A@BwG+i7V15hl-&6VKpK}ZHd z+`C(&0Ix$auWE@m91Q0|3{s_X{lIx1!Qv>es@%EgGo6rXgRnbjm)cNxSKJ1CB8}!E z%0jlc-;QSg#TI)A8M`_8nF}!8&+x6?pJj4X2e8veKjUh|cDoVT7vJ4|)XUjti*DEJ zLQ5wPZ3ZKC_K0?1OY{f;UrUwP{NXU~Rvmx4$M4hQ7PFN?#B+Q?;Bo|@sg^BydoP*02JpozH6S7p52M= zceN0{lH_0so-8fy%??&G0ai>=p>%WR%es#J6F%uyHwC=h%&%CS!h;K2!g?>OCaAr8 z1#Vtix7>?UsCq+7g*cMo(GO>&r4zErU{fMvvcqMI2S-d?cChyU4g2DhDg z^BU(1-EVEp^$)%Bu_V=CsB%3pTI=Gd?rWE&E(>rC1%tbEcWv`rLY4$zR#X+;vX~RqOY7?;rxoy1F?PUHau`E&Z zcafxFF+3hU84t~FpyP8suPkUx5JalVFbOoYkYJag58)(yTV~>+Dt_J!+u(`dQb;Oec6~oeZ@eZb=`|A`^W%gb*l;vJ<^_#M*LsUG9p3``rR8A$vM6C zIZ)J2>A5CMB83#SxAh>uT49wKXU3e=G!@8igCFa1ES*#pE-_^G#Y6OxBJjdLB?jC~ zOiDsiPp(OXY_-EFEun&VDxyN>9EDn5tjM(%en;6>e4F#oZlxjpgSH zEAl9iSBt;_xFZ4XkQAl~JPGZ%K1pf*aRlNi!&MPrndnh68JDG;&Ppkcn2*zU%zo8t z10+ZO<-QSJ9HW_#H*TAtst%UVuJfW>xLr}H(shG_YdR&BRePBfYuHaWA&qTNmM=DZOst*PW8M(Ssc>ouB_%#E#@u#r%++yz@+OwI5&N_o zrWYcF!jxYMjk6>8RA#d%f^ZrBw{Wf$0W_5s<7?)5v3_uYp3-8I;V7)phec>N^8<|L zm%n)%-V9++>g*&4tR@@Pg~ttp`tMKj2|f^Q=k2bwK?(5XjNgMMv4bxid(sH+#r5@4 z0vsH`2_1lFCI!rlVCGqod@CBpW89i zMD2wtrFZPafVdfJBJ-8K5lRg=E=aq+yiZ9j`SUB7LMZ*~qYw z3rwr=*eh`kbC9JpeIaFhOV;L+tZD0|$FzEtdCaj*CyC zwJlc81RcLnG3JRn1~n{3``P>1JH{#&B6q*&QDDQWKZ)MV=_^fCBR{sJl5a9d%CS6V z%n~e!@{O(X^1bY^C9YP==GNSmYa6*H+7jC;8zd}5q_(nko6dzZFqEdoXO5WhZjk@` zgO`=9vAuALP-ZFv*xx|C$EUKiT9gX1lJ0IAw>xp$J*5o!{-Fl8ju$P@sk$k;sQ5r= zoQrtM*H7Oz?6PYsgsrK=r+VO78}^=w85;0z_XLN#ckt?7jtWwQ-(eGPAC}aVWzGS| z{E%o3I8tcXi}GcGu+KrN6K#2Zyl4~DSU5(4 zZnsIJm%A@c_P6LY@6x|aykjTmxU!#Tsk{FrzxS*9V?ZUbqmDmD=PB4egYh3H~~WHCgq&{%n7 zK~vo-o&%)Mps#hk$~psH3ue}S*chq3M^|IraQnSdHq3vo8WKqIj`w)V055DHl|p#X z%sIAdZ^?S94Bc8No<)+&abBp)Mjz6uevht^MHkAzfe%!k_0cy~V^n(4Mifz-MMj8~ zV=iv@Wsd#YHSwD{Pg-qd$|GuT4$m_bO@_SaG%bFC5HYCA_|xH^hsHUs7vTti35h2* z>E61H`RQ;9nqoj+VHap&j(@?+7YoTj@(#%_$Ea~}LHN&rpq|iPd}NfC1ok-GrCHE5 z&Xw9+m=6rJCleao_py4sq^>dD{+Z{kw=J)5zBS_N_AE2B=nOEjYdZ4we}L z@S;^S+e!}0VCmU^xXgn!;!ncTd2_3B*UxyZIlxn$U9SEjgc2hpDhn(hRr_sDg96_c zw2|}0<&G&$8r_^{-`;rDtiQj1QQjS|Z+zPN;|B^|ZM)}*Zt8tN-!g^9@Xw^bGvPno zud)6=$RcyZ13JC6nrawEVMb?wkx5?Y&-5g}5@Hc@%;IsXN3H>FB}5St&vvWumAnA0 z4c;3ep}tP`1>~fC65<9=wp12P9cwlDbA>X&wGkV;rH!|+(LJw}G~~0C!!BDjpnzHq zt5f#tT+Hl@bmwB}DlQfR!UZk1;f~CJtZyY|fgtv8C=uY>hiZI~(ecL3CSoAYaM-3D z-R>`Fpuf4hy950)yXOZkyEL^|K2k95H_fYY$re8nHtp0`5fDwlg3SH-G)|l^?wlkv z;)A}{a2+!RjV3YI&S+-er>K~a6%!wx%*9G=+-^uxEiLi5Avf1CG`uO$8M&0S=s%Q%|0c zQJtu;TuS)p34v@w#5iWI(cWWDT+u$<|ptap%{%eCnRg;5E^JE2t8P(>BUR&GbmfW4bI5b0q zYL!?-h)2XmFMFG`cH&OZh1 z5ds*j>qA@`D%jELIcF>nQVw_m($|mn5@Mo+Ly=dDu<$?MM(%D~p)3Z- z;a@r=$sg~=vyS|*&O{5$+V}~k>Dlt{K|O$ysL%5VoP zi8>&sNzMR~D_Ffv8P7(P$Hldx?4`1-gW!)H-JhoDSu7o&h2HLQo3Zz36UPA#2*$Y-Zz7Q8 z{I2AU1HwNONYKIEV9EQEpV8f}1eo$?y*k7b8y(C_!WM6r+ejo8$znyQ=YDS6&^Ljp z;B&FF%7Xv8vh~VSbyZW$2VX6RnHhIjdk?(1kMm>{RxUBUG& zhwK$3M@i7}+FA$&kmS~F`RTm2nHpCLRt-g+wuD^T9l8FL_pIi^0Q*~;Kc|+>3oZ;Oml}1ZxlT6@aOZQwg4xj1-~D(1&g9k(b+ZO zb7=uH{a#X4%Agl7j0l3C7HDq1V{D8NL(A&6));N5l1oyGncTRN<{p`wqh!5&>w% zH|TE2mFk^l9Btxk=yb?qk&Ig}D@3Ko^i^z%1S~ za?|&SjvWL+$N0`0ZEV`1b?!l|}~=$}vh zpw&)s2+Y*T^7eIQxSDuLO1z4;xU z!mj}D&@Fbzp(@b(9Eq|?$bah3@0#oIh#%-CB}GHeCDlzZ_oAW4jUn7N9?Yt3-R8A) zywxh^$J$Qr&yg+EhJ4Z%2a9c_7?J{ZuNT7ZapfA*Ua3-=Yb$QMEKE-vF0DFoC|fJb zE+=_6k4L2OZx9sefi3Z!e-`P`*V&;PxRi(T(*wk!z7Yp)Lgs~=2kqQlPSYRHNHw5D z!i(p89{nX)xmh0LP+$KLOi$(5qED64Z?ZJ&WbcGoqEK^Mz0{vh=I0Uh1C*MMv)G0K z6PijA2K4yLK|@vtf)9V*ake&(`Z8XywV0s<7gTVH)5@;*pZJ3{>g{#cf5yad6z>41 zPTl6Ia{P0q(i?#6wAhL9JUO0g8qm8W_UuMrUE==SD)#zPkP71sC9F9C3|<1PF+3e; zrHlFhZd|Fe52%&K%CU4zTya$^9LVs8M-1K6^5GbKlI1|~G;2IDc|OB}>YI@q*Q(XZ ztKXGk*bj3~FNf~SRF`eL+vN_OsQ)3=Kcv8Q=`VNoUQ`&|nrG8Oh+5!Pie^tBBAq1& zsr0!UhL!KbIr_lO!$8_W+QHUc%&r`X(GBT*P)CIck`d+Xn=BB8Gb6fL;-IjArZyEN zA&7#>lu*_&( z{;;6cALy^m>Kxzi@icwAW$jWMP|pf>>E{kcRSD?tpQb%P+oA4ru!3k*9zaCdrKB+@ z($`_N?pU8RM+<3Gaky6PVAK>0oI~CHMgwuNH8J-c0Jaed*MI)5Iu9y)SMsQitUYcB zpn%p{ft)aocHoUV^bH4TPs@owCH%#WE(9pl!qFrvOvGDgn&lvgs+DZyd_+lf1YNAM z)&^0cr#f#1ZcI&ITMgHYak0Z|&soK7^fBImz_|`RWHcdQyp~VU-OcxfrJ)u zO&;FX@L-XEU<2nCe`6Ry8EqrlNv;nxyB#-g^bRjBE-ok67RFp{>6!+5pC;}^bM*y{ zp`Y%8*R=wmIU|6~?5A+iW%O!7IaxANbPaE3d*14kR-j>w*&)!vq<3`jXyJ2rp$)e2 z6kn0vUWY{`Xe{t2FQTf9MbAZyg*@{};m>D_{H#~|yGd$LTj=b-^~#T4@yLy*CZb+J z^O-WUaM|x)kc^9hsN-e<0j4U^$(}67G4%mGWGlK>e#=IZ*gD_AtI{6;03q!`oHM;n>dIp#ms`b}AbDLf2p1}!P`e!+8s)VT7 z>Zpil^RFS<;4PC#91TFrz{3?YyGQHp^voOzD)}qER z?*BEo6<9QTf26LO3`1sk%9?K4{+2j*^vT|@Yl3B& zYer4Rv9pG!+ddwPkFR_mbUF)BORJ7fH93eFp%M5nWyUN+jG{;?NC_(wQ+SQ2pY@cwEKrpori7m~VfB%-w@9{S{ zRMHE<)>l6Ww#5%~@C=DBkyH1c+3e*JU@`n*0Gc9S(Ij-&fkv_n2N0){Mqji7fAB=!@Zeg54>S;Hi^agK=T$oCJv&{*-PX20mgc7&aQYS_Y7x9CRhFn}!YCjxq~jx}qg{M!Qn!R)B9ETYtPVV6<|08qPhp z>L_;y&D@)vDz>j~8v9ikkekdwZRYrPZiw^9+$~&q`>>6lvvuQh|4j58R?Rlk0nID`Tq?A;JfW? zNVQ=$X%|al9j~v>Z37=bazn58rsPm&#AO0bWpc%@*Xr#NYS|990m|5mnBTGU9b7<9 zq(KFh989%+yLE86vc+;smDMRL4Xclf4$tyiS}wE18acaDMnk1#Y=c5+E?AEu9=7|% z?BU}ToRf5&Xk9gH*TV26;x^saRiH3D+GZnK)HK;%mhxzZJcM@B_L=)!L&Fm7RpBj| zZD;V~;2XyuaivyKw16q|01-3JXt1 zrRSD!HUP0`PmaJsT2;ypxA!(QgDM}rzA7O@ly9zeD5JVlji>6ia|0cpiG3B zxHYGs+Xqu#t&L0W4T@8@t3(orBz$BnNitb!o6Q_&`%dAd>h z2%hgUjWXh6A$4y9inyZz7or7-qfWG>D_ac7z*@hrzuYf!eZfvHKiEj<(>RrkCwrg8 zB`$);mqyMxFgb>)JA1t+C<+rY`_fV^`Y)eXtZ_UubQPli8b%t}*=~)43vvkYM!||9 zPPrG)(b9Fq;cJYqPWE6;G;Mt3UwOB6ou_Qng z=#jh2z%9*i6dBxc2_kpSu7{H^Silf9co#wx?C*Rrhfln=e@x$bH{{F!sPM1M8HB5; zr$?fP=YM4GuW626vIJB`A9zpsY5`Ezl^nj6nXZne!zHG)0NNb{3O(rHfQGs0o?O_@ zY-qcfhw_7@C!?vDXVv;^NVn<&2pN^(EfbBoVvbPm8K;4`0ZhVP50&gcT`U~(&$O;M zi&aI6qv1$!EbS>d&XGjYq_W9hrI_$D1c!-!J^sdt_03AjuV~NnDlrv~K28cPJFm&2 zdI{hTCKx+hVFgp z25BN+3?i+Zp~HIblTdCj>K(HkNU#&s-@O3fd5F^En8YlKBL_n{_}AM-Zv zE^Pq|9Mv@rjHW12cCqevwQoIVaV`>`i=A@hP>rwOy1iL~7DKGwS`D`SfzIFF{&tpV z7DI?;bsy0XVH!l_`PI8Lt5xf)PLq#~q8$IKKRNhkI@>4Ja263$FGp5V@a9rd--vd4-3yDVVMJ^J_(# zx`lo35l{lxbpfu z`v)sN74Gy;Q6)j*KuX+A*am&v75M#_$lAhqmznqjE7+KWw%&3Me9SCTriyN++Q`!g zF${ZNvs@+2yJKkr&{h}09nf4^g@H@q>)a6KSVw*%FKRY#@g{edUdzJYD}6l_A@#fZD?jlJuL zg*Y?3ggcNh+^(T=0e(4Wx7mxEZpOv7*+Hw;@n`bag(FGrG8h*O`he`A=h&8W>p~mv zN#^^DUMbM7^1nO%1FQ`(iPqV-8RJ-o1}J@jJnlBolw>Dq7SOY6mxt!^O@*;8bHib| zWZolW0vWfv@u1o_P(a(%r~1P^Sfo#9N`BA*9=E_#${3~~XB;+#{Te~bhFWXst}85< z{Fv8XsPF}XCvRa~f;6WGdu3O{M}?`InyRilO6}vS7qn+?G9ra0-9%NK<)9yvct4h& zPQNMq$E>>5ZdMQVW`H7_n0okCkn50gx3 zTH76YkUG{IcTMaSdo(Bw=cf0F|5RhmqUspwq+FZ(^*NAa=FRxNjx%n5*uKnt8$@!- zBD8pn&gz5D_uX2i+BsaskpZQ$v`dV`JVUy578UCe9f_y)6r-NTz2Os;7zjU__!Sc? zr~)()nVQ%XMAOox&^637wEEKAfpZj@k4O0*xX`DaToNL2^7WU(Y3n2u2X=UE~ zmFU*P=k{At`-lI9mJ)=viSiQ!LNuPIf#b?Ws$#bmD-nxh(UX7Vz-t_Secuet3hAu;)X~ZuG3N%w!=Lz zy%B^1Tn$6MJ2Xj2A8!__pj~a3Exj~DT&&ebZ+!rvR~D-r{?LY;(8cN%|Wq*30Br=?hxq%V!5k6lPn zIWf{oGk*mK$bgD4FWTzp77Nha)g;&~hS3^!RlBoJ;3MDe2k-9v@V-@$u0rlVfzUhY zs8cc0LJ(zOV%_KswqiuSeF~xjlZe;yQh6P z2>CeArPc5tNO{#cN6#}{Gk#=~Y?$OGw~t{&ZZ%ILjyW__rG0mU1@C%j&QYBB0k)Wz z=HWx@C0$T-ud*D$ZZ#D)>k}-8yg(pk`H{4&thp7c>?Amy+zhwLlUdZeht>->v;l{8 zb&UW?cht^+IEO0A^4wcXC&Gf>XnTYd%X?tLE6CF;_g)`_ml(_@64pPdsH=fOL+ipc zVgXj&aNv=0-hO>M@05y@l2d~!=f#DF#B6HSs&D==`gFV$8=umlIx_dai#0p^n|13T zrvJg|PHS!8c5!g(R?RaW+Pt4gVPt;g605Xeav?|c3HlK;!-}gtYhu+(ma7jf=SPG0 zD3Bn6d8unYn=BH21M#`G%)i)rn5e>VYjgQnd+@Cw%s`+w@G^u6A1emaX!0BF!x%Qr zSa|w8T2RXwBjs(u?ptp>P!xsipz_h5^9L_`FYi}zEZ>XYY?4t)-~Y3!Ow8+VX~)LD zYacB2`WsJZP)!}xw=Y!riJwqpNN(7zU;t0>8v#(dXts$!oDM;bzR?WyFH-i>ztlUE zW#;T9C{k}II~2)DAbSpT?D0!-mW8QC`d?D2THR_^$$%+ksBo*St6MX>SnO`M^g1B5 zFOO>;pN06SFvGy6J(z(u{vgT+K-_#T`JPn33YukIg25XHD>y>xGOtk}kSch{T>W8! zu%gBw`%!0+;c>mZOZfVqy96CyFUK(TefUU?EMRDfi9NO|444xFBcF-O$}+6G^gA#b zNYL^2WV~p)ImM!D_vV-%1GPHxZ>k>(KhLP946lII02zq>hf2F2xW=#-lP7{c^(Aq* zP>E&_0iVb(2_|~6M1JX@S*_N(;{eJc>9`4lDN4WUL`AZ{Twwu%-c6!3TJgoF?9Sf$ z*bfoRuHnWQP1(O}BkF4o9fG&zH8yDa64vB#M0+8B=PV#^wsx;UVdiDl`=x6{T10RC zh9ONhAEj)CN@rv;n<*72$F2*uLfEs>$~X%h+PIM-x1k%R`LHj=Hrg{cOYCrbnBOtkkcljSoJz5zMURX7l%)m#XRd|-0SiuiucNU|XE zspSxR?u%!qnBaC4QqF-FV5JN`uQ{9MSkOgZ2A_0v1_K+q*_MA@19#n@2$afqJ+S@e zcpZtX=9>Y^zcb*PO)q0E(uIyCS+JePR(B*U@+l0Gh($$M#9uv!Qt_H$-{Pvr*z1Cr zYN=lE-3o%7&bXl7hMn%>YuGqGBVr_h7aj3c`CpEiQMF?A!_Z!BGbjwkp3(2N>7#iL zV@(u9AzeEQB`5X??2+iDA?uK0>R`Q{yNjxN`sO>n&5qDbqwB-acoCFg5x9C4i&aj&pxeWja3$ zzH)BtKR(a>&4(a+IM5JuMq@kMe>Bqm$cN+*{@A9$Xvn{%*=Xa3y-Fo)-#d`x-d%F7 zSzcvj0Z-64e>^vT=tjYyC+qU;8e$1H0%%xaQ=T?H5oS~?5G+)%@f^#KiY!Uw&b)J| zQTpL}cz(JJJnk3kj3j}~sBAi*Jgw)>;iW?HN@XSEf=A4<=T&nrR0`0*NggbcvlaEX4*K0CSKJ6t9h1PZ%0KK7P->*T`4+U7n&Yr_h0k88W5T20H9? z|H*6LgF|2P#7miJ=6lkf+MpJL3lBnHF;S{npov~B(}2tSd|Ej9^*z|^W< z_9cNNSvC7v0p(X+z&$2FSJ+K_1#3_YZ<`LZ)fSWR5QW!`vVShhfCQO^s6K%jIxx@%JXt_M?E@dTRiSt>`7*S4Kb4 z#_ooGoY(4TaoQp#Q&$+p*ntJEyf@qTX6=n;diT>$-Eaw= zhey8gDQz#Oozj*s3BrA5k^1a{oCfU!eEw`?bU%y5unac%*_?{1&^``XWI zS&pcd{~)_yL`>PXch@iIeMXK8O|#4&PiV+IfOGEf;X$VYX^m}?AiJqCaB)J)?oEK& z6bB>-BS=5G;W_X;4DM>Z%526HwzyULK8gd*R?XFu+zHCa_RQQU@5QK*exf1u_r-7- zniue;R*iebtLAub_CBSd*NzpF?UXanEC!8fXk}kt8XYB zZYYt04q5o-N=pE%dO4BhoPZzq%`BR5_IXb$Od+sBm^fPM1kl4L`V0dqE+ZGRHRoN9 zSNiOP>LcvWtapVW@CFSArB8RU9t6JC=v(Z-b-Hy69Q{*+B(Yqr%jziWWaC?IqpS4# z#s1h!lI%ZXZ4u5!2%{+5uR7b`Kd00%C*1@bSbNK>u*`h=fitXVcj*&G>04O>lt}Np6`1)pEuZ_aN?&8 z=>hqD!;kbYd@R}hj%nz57uVJvLQ{V$cA)cAZk7KMaCyh0bwOAZjVCB;pE`Vi_^!b> z1|QOd1kDAIyK&y=O2;BWkhQ@x)!5}R2~4^@?pF4?<^(TgvqrzH4;AYiSX15{6nDn| z?%-1t77hfV8qWDq)FDkFZnPkdj1~4pElWQ+O%)Uvc%L`wLTRd8@h?46mG|L5#{1*s zYhu4PTnF!?6_|s~?v){;o4lWZh{YclO9xe`(Djo=*ZLHI>A6&<#v z%ss&DIIAkM13RIJ2n(04-`D0ZBx2>7$YPY-gQEMy?e8$ zxQQGCk&UW7K)(czSrw;`soeV8UwG}#$>IS-beg3hoiR$z4vVbwR?O9g&G3+ekP1VN zxJRpzvGyBqg|(kCf?nZdM;mtF<`EAfoIgclTCN{_?oL{gSIeR2X%8GBl+WX?*B0?& zn4-fq@t$o93EC)z;`kIqq~+yfbfFvL zCKb*GZwYl37<_358b#ak?6p@1hxO|M=+6Z={lfqPNsM)XeTedQPW6sSC6^E-lGu5k zZ$a=Eztngv>wF5+Tbf+Zv@1B3)DQsU2rAr(HKejr?Dq5V+?Dn_K?*W~9H4mHE z!_`izhBjLx)psdd$WX%N7ZAdU1ifrwi!zO(3rfYl~BrjuH&#w zUCj!?^h`Hca8lq4MZ|K+a!PpJ7RJdBV05K)kO0ag4_ogU({c)-le$cOEOqMI3%V?x z`!KMA!RBZ%u?p?PGcXU&|6gt^59@BiABQwSGGhwS?h3*h!7M=<7cgrBdDat%q3s^gjFd)Tx&yS;`Y^BM0&o2oy zdr4Fuu_$MzEtG+I>7`uZR+Xv~{6e?S@`m#(o;BjcLtx0;z9hPx8j2oK|G!E@Z0%Rm&GgV-@WT?`;te$E!RLSV&$rCj2Sx6T_aJ?gLz|o@a_Jz;LQ2SpT@aZ0q&Rg1GhF!^s6Gn&4pooUD<{%Cwpf6mfnsC`MFGMHF-xYb-X z5W;EM(mMily?I=rC~4uJ=z_!+1boNHJCSy=n`OAyag1n|%KzzBHa`zRtv&{G>p(;N ze(24T8hIWA&Juq>*Nf|rMFNY%dHOJ7HR(ga8$eh~Ctlmrq&e*-zT?-8dl3+a`V}bi zeRWVT)^UK)dDoT@Z{OVQieH!w!|rN!UgNg}NrF9INL?0wv3`giu)6C}?YlaaiTWjB zW~ns2;e;5q!DLzrrv3dzq5ypn3Plkg{{gvY;fsk}&+>4sQ7^#WGb#G6^VruCaHzKF zPJb+b$_~`7N0A5jeDsaU!HQX;DD^lEyT;<0Cp2OJg2mo7q4{kx=37GdSstTyI}07n zsZ?d>;70?-Sy)Z+dhdJ^Ib5L!Sy?{C{88L5;pptDzEv>CE#59cVwEB8g@U!*v6N7GA zc)Db3{8>LR?6O7kCMYoX5bOr%L;1a@DfOJ*B6*$Z0Mg|I6c)Cp#TrN&?f;FDj`;daA>7DV4raT;-HP6ig%6o+Z4n18t&Fe^HcFp= z#PjCav)pTL{AZa%3+Ud2Pd8h1V;ukV6=2R)nXh6|il>V>*gYe)lRgT^ZHIEpyGWs{ z@d1Pvg`=MJ!#$=?k?YvJmlVM{1i8d)>adBS*%Qt4cl9u2CGf$t{O&HQBR9-YZ4z@l zZcxRh&(l^gYC*40f>ti%+_c(*!^0W^8j?c|m*a&D$?xGX1cju3OA@t%3s0jVSa|t` z2~%7!TisJ0rGx$$42+(eMvThI-`J8iWBfRtendl@(#4G(I*D=~3||0})^X*tr^sNriknLv12pGvby-wMLO**~gFZ-(iS7vY zC6cu9=FyAk@1%X@^?6NA%v&7nD(0t%blGFNSoM=b-x7Tul-L%(c)(F-9eYhi8x)X4 z9C`#bXJQ)+@ykL0H$ce0gw#$&HZRCi;~Y3Sm)auQNS=x5_UI&>sb_(!MBoQK1q|X6 zgkabc9%e{n|6;>O5X~BJMr#tSX?*{&hn@1Y1FJvV|M1JNYJmNmbQ4D_X~$d??)cbj zWY^sx#Un{SA(0%ncTbG~iY0EtnRqk8-w$L=FxbCfz1^JXc>xI7^XklTG5WxxOEJm1 zi?|_uACM(^09s6p{OBXzzfG3}cn`D%`@M^b+w;N5U4|Yu>sOKAx)qn`@ilB15Z@7+ z!1vsm;SV5sP)-oZewMi|ple^rCFB&n)7Wcik;>M<+BeK!$&1u3GxYDe3_dWTrI&7t zPob;eBMcViS!ifV+|7<^g-IT$KFBko7@JU|-e_%Qd7du%3145c_+Xv@5QadV({;gL_x>&8cJ4%q zfH^cNk3q(F0dt{sCKvz(`*x~u_Wxg)#m=hI)bP3VqxL0sO_?OjpXse_WDbMqNDNfx z(A;=PU*#d6yFv%iE<)z5T5)V8j9^KZZJ7mn6?mP$pd|i-p9!YJN`v&3cwEzl1260Q z6PMO(y!6W?^(YK!xD3seHUZy&z~;m__IKc&-)*9Kbz=LMDxXp3t80X*TC$6Vvsi{+ zF#rGzdjXzdbVh&k@b|b&IT9w}}!$EM0AY0oV%4TN^Cmq^8Sn`Dley1oqy&_3IYQ5Z@j8d6s|Bjqk{YVhEXV{wQ!{jqFAAAfb_e$OA(hAS>0< z{~GAoQ{J0|L3ia7=@r+!u2!A3)#1!*c!R3+8XKKfn83C|m7Q_~eJsmZD{m0Os_^g0 zq~|fx5&8cbmZxWgml5Tyr>EPnRM$jKpc<}p1zM<~D))AvNm95<8T-jY)X=tEYLHJ+ zge({sZnPwx?Q*2WI`uJPl{{n|0(R+~!#BnsbCrr3wtr^QpolQ~anqL(VlC$~2lss< z-F?#S@_mV!d7Cs;4o`Dv`eVULu2HMe<>~Q0y->|3}^jt zv?^%QVCVd`y~Th5$N<~kt}A8!9h7_1bsgteyg)TY1U3_TdSQUYoGmz?a#4w7Y(C0p zx{aLG!IVfEv_fxXV$oY=HrqLV4W>oXm$b<0YZS&&dOS2;*rGe zhB?G7WfHo{vJZZ)Iwg^3X>#N_8Ah5f{PA19IPqRzX9UK&53@r|iEb3#ZnpJ*S|)2i zW%=Y{7g!_7N{}1twJ?41(63aga8YRpF2j$lXd)?di2Jc!kowt7_74KZ1>ir0SNPJ& z?|+jBto*zj?o>A>{w+|<#-m~Yyp^}Iw_S#;YY)7kiZ*7N@YxM)^wJUZZcXpx5+E4f za{o^P@!Q)!PueQOIMV)oFJ6X&ZiE7tE!*|x`n0wx?5Bl2 zKik6esWtSwC1T-SToUJsp~qL{R4te;3c1QxT(=SpRB7l9S(`S)j2Q<$zs}_&f+rn)X(Vr;WOt*gMR2aU*?K8()Z7~%aI{`522C)>qe-DyqonIFBgWp? z$)*6HnSd*|XBj#;PZjL$a~nF^+5Xz=>y-|kX#UVwj$HDPGeTzw&t?+%hEVH&DoJzbjeX zU>YBMhTJ?#kPY<`g;f~*d!Ld3`1Al2k*;_RY8J8z3Lxn2ukr`7-#OUme0_LD4X^%NMIjydi5US`v3G~AFop1nI#Qa?ppArWw>0=uy^=v zxa3pE>V}ZvXCfbYE&M3@W7@qi8yn39$`L>kp@SHQwo$2mOlI<<$QxV@V+quMms5|z-To_*cTplWy&qGiffFv`_Cq|IgyXmJKqV|dd z+9Or^laPXl<*DSYq2F|xkXdy2XFEe~y!?*5XJc)dXY$YXbQtEH!~zBFrT2)VeDRom ze1x@VX1)}k6{Uz}x#jxC$nKJm0j2<`Iq7b;7Q!k0Y&&(bTcT1!AD@~7jy2veoFHx!c@9r0sh#-Q#TNGCZ=h(!;!2w1J(2 zu->YiZ*LTfXAB3*VOs`zYJUcVC>HDcP{a(8H+17TS1yyxwNP9|?jSZ3(I$K_U6Sk@ zxHLgq@v|=hFA=FP9@A}aA~i0o_HkG=RObJ}zC+}r2v9T~ten!|+OmeU#aPKh25}L^ zKm@hKBn@wWf>1(m+8tDyzAU}@+AP(PF1%vOOTI-E3^m$!WPY+GjQqNh@+LwQW$f%c z;#fvNA~kojdC+tyd;|y3aP*2zp!f4o6t6I2vpYPaJ9Ia-8R{;NxWmH(YX^X$Kk!jzm6e@C z(fb)IT>R43FfV&ij&Z&!UU^TWSGf+&0??j1`Mp2R1z4jIbM};aoo|G6kbj`(3(KLy zOA5n(!E6*WVOpNGg%yf@VCG{JX)Lj%+AubPHg zDPj?vjkekhC6l(|;i!S`l@X&N2=oymVa$=9ZUJy#_9s4jXg$t{+sVMgG|*{`R?80Y z8YAg@jC3(KXu8CG`mk1YV<+MIo<3;cbgJbRcHLwzC_)nkKy_Dq7owi=&U0rxP)#vm zoX6O?B)jfJs7ITWg9{@-z!nD3CCSgQW&)=rmWXm)Mcc)Zm0w))&B5sv1mru0;E*ng; z(6ne{VN0g4H8GsrTEzTY4d3{sTKi&lr=15OBSw;OAYT@De}gc0 zz;Yv0C9m|q8cPY@46it(Ril9Vp4Bf8q5XDEI54ldC2_&>9TGOy(e~9sVzwb7XNo9y z0!nI%EdCg*sofgOaM^mTCu$AM0v=65mkpcb7RNd;dkRQax%w0%{6nLkH*2*7$IL9^`hc!m`L&p+@ngM0^ z7T$Fy@>?a-?xs~WvOVZS@2~n!#>dOv1mV@xO5e>HL1M9K4 zRDV-Zb7Z$Xu!W>l^zCw(x*A(dyKMj9bagS%000V=0iI)OfBR$5d7~|lPaT|{si#%h z>mFG254DL|BmMSYK*CI~LKjtN@pVT{Yoiz5-=kh|f^46M9nV8`cvz(+Nem`d?_y=v z{#cb%?+GH#@n#xgx5Qg(1vg9Qr@$HQ1&7v*ZE+pnLgeAYxcB@1f!OBEpb7)|zS4=T zkzI;71#+Ano$N*Tv~3UnW`Bvs{#=m$vN6zxFOj z*nC-rVdE+lH@;R$o0DW~wAO%{hlZ@aIh(!G?g*T$`)tzLL0}N~(nJpaSrQ^m(FRCl zyTlki-7#4CJAaRkN*q+s$B_#J(eosf7l4Hr3#5D!JB}2F5n6eQB1Gg8w{?hk7XHWD z#6w}lh3l&tCmj}e8fNQrnwBD2>P8Ckuy|viX-d>)Y8>j|d?3b2CTm2=rkV_Y@FPY! zfwt040)v267u5kom1r?mAdt(VA|>qWkggN9Q}H z?{GsxX#ZEDa&NxqJ^QRLvaAyoL%Dwui+|Su*Z!QI_kr*vVyaO0f`3qgJ)S5yccR72 z?!pP~&Guv`cdAxSr&P~PH)gHVIt3VmVlS)!0TabI9b!BCWJ&W}76^%%VxHFZRHkkv zZ!iteWB{RD=oRKRY;{m!AQodPTISkg{Q^w}>tAts0j25QP|RuRd6JHt+x5v3R6XVZ z(c~DoQW?O@`>zN*2}=i`jpW2&jWLFs1G zUgp)20mSMt9jXGR_{tpS{wkwA_}KQg_w{U4paf-;_~j(he!|E8Q{_hrtbL2TntjYB zCZY``+8XF=E{!$Lf^!LdVXAiXKuzt;v5seugpbxGddbmi-C@4yb*u)A%1a@MpO$p9 zX2@)YG+Ma?SIxSk&FhF4nQR#G`4cG_cE0k>?$!eP9-A+)K{(nW&jl#E zo`JtFGC*sgxO?-d%Den&enT%wE_)ulRWuNQHgAi{vVjD>W~uC&ooNkd?T{8Ze&1lO z+_Mhdsyov8Abh$yM1W9e_n9hnIt%>E50?8nC|$c>zgEcD6|j^=VGGDM-c0@6G9YLS zMK3lRzK_#mj@bBIaot_m);9C}12-o&pU5_m$G_CT?hlj_5bD##B%Z_YecfNXiN&+S%ciE_RiVx8Q!^ z!ahULMUuQn?0)|-%?oF@&3LGt^WM4nEt%k(*LS==Gk>xI?Y7Li_+(_L|F|b>vHc0> zH>?wJvF>Di=rWePz-^>yzLdrE>omie)h3Zt(_JDZ^k{=YJ?lm-h2NqZW|claN2}-@ zQol@d;d5)I%0#u22G_*Rw@3u6O2U&!jSfLk^8q#^XeN60GhVpXTdzL7PXd^rJqJHy zGRbaYn~v4@#Zv1E&gz-uK8tqqD9Z0zX6OqRy1(#|-+)lQAheC4qSyExKv4a)&MMl; zY9I85-Syx_u*Y3$THg=P`>jQI;yAgwzxHNJ;sRT!4sR&yo)0C5^QpPf2KaCa(69~= zLfm(9YmUZ^Ge+5?-#q}di>CD)w`=_5SIPVr{^GU2%iKudUh#=02-n*EPQZo8^R3rz zWeZ!1YL0Z16UxFxq2*3BCk=xsIC_1@<$}bQ2RiS@^Bre?zKFc2eeO2VhY7+3Fiup+ z+rkF;iBJ$0odidkBH(hIAQZU#DvoQC)(ffpbn)`+?x%8V=CcS=gmHAU-O;=dZEBx1 z9c+C+myMu=(jwNzM9TEJN=0IIsn=3wqPmI~h<#Tlp2@@4nnBVqqMnBHQV9mfo{y@`g@ey6 zc|;NbC`QH&92(U4j;_P@q%FO6a+MoKnX^e7=VW z#u2m=A7Z$QbBMjp3?Hw{C`D1zBMBTiFpbaN$SR3X?K`$ug;g$jR8UU|s(Et2w0sb0Q6oQQ9+-+S&O*?Z-* zLbFwY!h!gaP1gb!3-#0#_-GtV341Ea>wPK9tOmU~`+URJ&j0{3fkB#ROQ=E+S|E%E z^FV(T0#C+_+qd>R`$iauo^vfa@!0awL6YOHwaF?;%G2|~j+>gC#6-mi>f;>940XIB zTB7ijjq|!<8dy_gFCVxpzsn~V%rp9yd@{5ePcyu*S+&!2RR{5Jric&a>3?z-7rA-4 z%Ru_-?jfjeFx*fey;O=#r2+Y~ePwsi+hHy^bANqxaK84=Jv#+L?&Ej#Ad40lX+# zo#1Q!S(bFaQ*XHS$l>9~bWWwqn8O6{>8kwva&RaVE^2v2tuYWuJhrtWFfP9I`KB^J zM)B`RdocV68(0mx-VuHUUtC0G3R@A!to(%}W6atA<-Om0V~Fn6jki>(Q15#n>UP&> zL%wyT%eXLTX8&EWSot5TKLhf?4@D@-w5j~DK(*4}PT+Lg=>vAx7R4B={jJFq&JRi- zOZADv%QB6?tajna^J))TV#BzJij&}W$kR&lr0DWOJ=jw~U&}2z05T-PfI|;XY)E5e zr)JgVImvj8LOi}NoO=_>rR->(?5sO|ul}@khig|bsTPgj0s&s6Kr#sny!eYCa*~qO z05wV__ixrYq*CkPztsTT=YJo|gxyfHtDIly*&j`F12-iH+N z_y)~6)s{Go9CE3pxM3Yd`9G~172VWGyQb6gVPhDdwnT#1PRQ6p+_V008N+=2THcdN z!%G);d8Co|&g;SvGwUYKKonUUVN4Q@f!AQQ8bBki*$~x(Ik zzdjHEravl%)sQpYE@7Pk%z!0+YjLN~#}!kQ5mw`7Dq;5nd~XW2rN`~9hl}$0T5RS_ zITno=BruO?eqn)A#6JziZU2iC)6xR2JQECnu{P8v?^qi&1_#j&1=3%?@Aw~-uSzKl z6rnF)S>d2k&}X7y4GCz@mrDDL9sD9-8FjTSlW@CCu1&T_w=*Aatrg@er z{W~kljb9%q$d=gp?0xJ`@v;1%)GMvEeM|^plNa!41~%&&4Hq9N1y zi=|FFC5K7T<2?pSK2AQ;B%mgUM}`wl((oVf7tSDOzEKM7tDro!=Rls|2r@h{r?h#d zS9%#5m@@t3RR!y|huAclLCfLl?8QDOO(f#KdeNk3Q`D&zK4FSElW2XFMVl_O-HC*^ zHHIz3OZkLT%w7}hr0w_ybSiJ%4~j!ddrij(sToli7WvEbXHB!$DtwFc8|7jkL4`=_ z>_Og@4Lb6dyb|eH^`EkeG9#}-(^iP}TS7rM`=}@mS{8t8Sr3Y2i-@pDdjRQCJ=dF-rqMM>xFtI?}P>N=*&y#~&;@6FAj6rW>pot0sKV{@}3; zS7`E#tM;w`K%|MFPS!1qo5zkGNMIG(FJxsNx2u?-D0cRmRx9V$Y@m`$hLUv$pk*#H z7mRX6ywEQFtaYqO_5(>)im{uU>umR(WO~UHJ;_o0BdTxL&awGxB;j}EwhRqhK$-cMj>48SZekv&)M8?hiou!8(B{Tee zjEHZn6VGZuL4NRD4O$`fdv==fv5+W$Z0l(PAz>T2PlIGs@dx{#Gr56jVCOanq{d4g zVvXfyO*F$vPL2%^P6gK=Q#3KmR_w&GlI2n{R$4mm?Il5e!HMFM8kG#Ei3*>a){q9;z9i`8CWOfZh`HW@V3zTvfU@Q)CHdK` z@RWzHklhv?QTvD5#=n}B!%LAR;972IujDk`5?udbA#&N*i4G=>y#C*HxFTQ$7M6ryvJ#eQ6~Pp&i}=1&U$+ zFu10Z;L~kGmzgX0n{KgptUy;)Q>~lr%Y^_FfSk)TPHp={uA{ z=>EK3FSS>f*>*PPP`Tkl*VqH~3W*#Hx`BfE@r%s1TZ*rY?P~GSXEZ;I)34==w3VryJ5!!bj{Nw)@G#TgMON7;vh2g| zqB`%fQv?TYzi6*A9cC)p;fW_Z8DrYftI)jUoiz&T;rr4FEMMMwaU~*6Z%InT;)@g^ z!C}=D3H6URDuDGXc!JDV43}3HnxJ|PVq0c3cWvi43*@7EVV_P-s~DG+&v@k9OeR?d z9|r)=dtzX<)OJqH@SOHquK!P$NLzv|_taba7Pp4mtsMCz)0i-Q&sWewDXj%lt!s}+ zmXsz+r_OtdRW?$rf7ccXA|x|1C#7NSvu?u6R91AsK53VK9*WGZZ>1UZal}Go2SFph zdbHrVXeEoZ2E>CAw|=e8f}5#FT(J>fu0ro6K+U_|QBTcUQ){GYCsTW0%(pecY)l2D z#KGfoeqaTR-=}uK9?yZ8Ay&L1Qk>hafB42K76}k3LsBBcHwqB2=j+%F70)9e5s6U{eNo>L@{^0zxwBz|^?ms`MSRelc+-SijYBuVHFRef~TVI^kX!F+j^l8V(?)@JHz~7x*cA=W2;hbcP{Z+FSW8m%2=T2he z8RQ`#5&?vviCD#vZ9K3BA$F z3xn5J5&_=ck5@^5-w($kTzX#rZ-jj=mvK%<)G9nTAG3krxO)N2KN5|hB|qM-OaJ*Q zv|w83d@!u6TYuy%h{!vc`^(hx3c`_-CKv%S(~cu85NGlJKXz(!bdTvR+wX-{6^BMB z8U*#Nexk5;d*I)Wq7W(4k^ZZ>0@#umQCW?Lcng+2vm`z;gsgNvg(OK?W1F-$WI-*o zg0leyFe|?EgN+B)Po<^9l+zbqzQ|Zm7`p+xj`CrBQRFSSxJFcz+JcP>!Ir5(JoI(9` zmlx8!)*?$AYw~L8f17%*IFT#!JQM8((? zOMB+{ToD?-3eeGy^8*b1Y6}Ksht<>}2!Xm^N&n@@8*G~Awx2v$`0cx zJ~Zrt-@NILNC%*JGSKlmcb?`QS8XZ^j18AaiC3uZb8lfk{TQ4AFl>_fNm0UOtHU)O zx6d5ET!P1X=M!p~g~Ge3w~yRnEtu=pyQ$R|x*R|k3fPUn=v~#?ABh78ikjMIVtGJ{ zaVBYz`}~b6ZY4C;wd@C%_90nPiVCMGxSvL5O@*+fJE6PX7+Y+MY7au7nh9SvAmcP} zJ$XB{;k(ynQ39#0=nj9;{TuGeV^_j%e9s%@BA+MNE4cQstebt5qFd7Hr8!|=*{5+; z;&m|R{u6UHsp^M<7B~)j20o+pKZp?XrUlGA3tjxxTRQ>1fSYG4nrPpZ3St2%}h!JyW%+`nF4Bt5_ojFM4-k zuIfHrU~(Am9dmubQ7%=*(06}P z0yb`g=w8)W^FFsT(BM8~9Xn;g>#7{^fN@2c+!qxkkNCQvxNk>l^z*;ogVVHYgeCV% zUj_?tE{yj(5%Yrw559wOVQmPVs~8r#^i}r4!NaKC_^l0Q!e`?Zxz4982CZLR+A}~v z-Fm|(XE-UQZA8=;ZfFh`VpbjK#FbMHo-&nx>Jc4pFw_2fvEr@8S1kh8-@wp=uDfB1Raw+=VhU;&c=eB>LF2S^%iaWtijy0p$aev z=L~rEz1D>nFTdf%<9Qnlq^rmZro)TWj2XDm1cn|!wKxfbI7>JUpZuvI&Po_!MED3u zqA%*|SUi?*VT<09;iWVoS)d;2mKhe{yXsHh+g^0No||NT6Gib}5{7KntCQJpvo&qt(-H4dWP z$F`BC*KDkWROAi&#BCZcrOg=FA5P%S=@pJ%s}4DXpA<>oFNKP4glDuh%z#VWS42c< z3^ixT!-o7`J^3*+9;i?3=}jA0@HB0rksr8ye7}r$0BaOh$yG92rXfTIIloI*lYvw7 z37>mZyqw8l@6ls~q_IzMXvep-hHY8>WQNOYmuEEYs+j`s9)XS1$y)8XC*$u)#3{N| z$sDIhb!>Osz9LVmflZp7dkAEOR0O$qS|7@OzEki{zE>damS#nKG7ZuNxmz+E)Sa-$ z>}v&8GSkVFMaM|FkX>178Gn4$T-Ekra3XY@#wqtV0xll})Mpz~@o3BvP~N#;mPHEv z`Mwk!JiZAfX2^8e^rJm#D;B3`@-N>O9C8g(N7WF%4?18C<4Fne*d4<3?A*=|bUxv5 z**Q7dV(T&*6J^o{r1^5Z094HPN-Tj1xc#XO88wf;$V?DeP;E}HwT@!i6ysSAb0-=x zS#fmeUe!`{W(Z40)=FJp##r%!xVx4bo!*#3(>q`cCLVYsh*fN9Zou6DAb@^w=B<`s zG0V>2YmFDTLoVRbv@AlfU&L3u;-G5~!kX&MaLtDs>W=s+T(_g6vrhVSU7FC_69z?D zuv-D}*j6QPqnI(KY5cvKD~Ibg;rc}7{hJejSV{~ita4iWl6RZ@lSMr{5zlqo)#PHw z^c1he;+MFF>x&jCqNVocB{CDlw)eC+Ek$=h4wHL>uq*3%le@n7cnUK(h7!d~?>dz` zV2!93D|tAi)@iNYJsWEeOjID#{=vy_El7EeP{fUGu{~}HsrF{jRV+4d0{oqhe&(fw zq^{Xlz1lU4$}+#hE2R_wc}eVmywcRnhZ~he(;satR66tcXl@`m($8!6ro6y5?f=x3 z1dDScGissV^^I-BLpu@h(B5pg?R!FR+Aini^O0r+y};3Bn~IqKv7Qj{?stu48_MpS z)Km*Di+-~&OSSWkU9&EGlxW;w4qzDmx7QKh9n`bY zKPok}mW>|~Cskcqt!CF+$`=27nV%LOJC~k|@)1|EEn(L3!K;40#n;+)+%}R-WW_9% zTIH$#-X@!+GJnRW96*RoWuYtTPX+J|te%JdJ$ zdPbo!*0@-|-eO>UTM(If>tUlCRxb3&wb|H7@cR=|!h{%EI4M%jaXUW^AwMQhoU2{8 zjvVQ1?|S2RgX+0GEum5he27K`Rg3!%7?C*a6}o7QRjv!C-FN<*@|tPXyd4V!g_7#| z4*;i6I$^M3mH>x9!g{on#gulvZW_hAq1CtkyM(pd`B)(0PhD;Xjr}x!OH0BTLUTdR ziCx^%&!5dKnz|}2xsnp=4bIttmeb^rg=f;n(f^?9PN_*=|BHzp>(P+@oAR}lT))J( z0}Yrbtg~fY4tqz5i*0ydyH81AzyUDK@h8X%LrADMt9>synTN!rBJeF)b+uunmvNzDYVQe! zw-N&Bb<`Oc2Vri-*qH!JkNk?S)%w5Gq5I1qE`j`nu<(1VkssB!aF%CO{Yf7B$x50g z>+TC@N_H*{gZI7R1pc-UO>{94+_PzyI#xgp^vtwd04Uxm*PYBG|Htn|c&l+Lr6#g> z6HY;Qe*(?~Y6t5IjfEzLL?!@4DK_QhCVwa@B)Qj*)KY7oSNigTxAaQ z)=(Xzb&|)eiHny7Qy+(T+~WC;3K$X>p)!jJ?7)yK=$z}yibD65j}j(4rqp64ABul&3ktOIg-<`)VFo1BmZXeE(eYjgAE)Ni*2*)l=j$L2k?^ezN` zg%w`C3)oI<9_y=w@*HbzDAu{ADt|q1CJ_?*!QRkIaD&c(<22_U*x>@GYb?&%hKenzIe1mb9Nuv`-UgD`9Ymy4gA+(p9XQx8zLN*%Fx)6@+MU$^7|a4W zM8qx0!x+>J8zfA49M=+rDl`pPb(wT*mN$F*RStjIULw1Cr3Z#@81dC(Yq+g`95j<| zKh#!~=)X7AeNUrLX*%V6v0RNC@ckM31_Nn5Xw3rVXVXA|&faE-MXVk$XyIi}-;&)d zLhAXKV8UF@4K+cD{4T751L>BmRd1+&ASv{l-)ge-OR(d?>TTUEP#%J3#i`BM!)MU! zf42a1@ba4KubLTf-u*^yQmr}$Mndp}jlb`}Jx(IF@`&8iVyC7DswIub!0(6!pa_xa zPg7+%&yM4Ibt0iv$;ncC+-5o@CtzK)LVT)S5K#7MKO*H;n!FP1LU4igs~=0^0Yt&g zK^PNpz*H~ld*6`H;5u$x+m7r5-ai3RnyY_0v&DUT4xMFVcI0y&JnG++&!_~e=YMdU z*Qg2(nO(|kX?#XQySsJwhgwlDJf|rB#$0$*a1+jnZkwQ{z2U-qqBtFSPJrB4ymIJk zd6ku}50SUlCmU1G;)Dm%EO!Kz9Fxx~@76YxmNfokLg!VXCR>uPx+O#-+Bhs@cB}vF z>e>*jR5=d5bopl-)kvf~4EfGt*s`e)a{j4E5;hB3|@5>W|njjggH2gdTErbpx z@Es(3jmR-w0gyQyRPqtj8APcgzSmN*j)g}G_LSD8n#4{SAyrQ!BfA~cD#!BXHv#rL z&LS{!rvmkiQ*FVR$0Z5I0y~hL@n@l0l`_bS1gmMo;leve#`Mx@sOuUwSchUfO=hHB;&>dTtvO}N@Tm!I z7ygQ31UJ&{NuAQzFMcca+plMKFRoH^I#M||GV*HHnEAhI(A1LA(THUHSA;(e)A5tK z>g2-}@KAM%aQ?hC=0c_deT+hsBDW=WM8k9_kI}KMLe1P8z6=X3J7{eM2 z4a>`Yy}gp2KR2P|?!nu`{uN+(FH+ePJR1RLuJ;0c|yBZX@VV7(!3{V9c2;p-7pp+J+Mc`b)l=@@&X z$s&+>18TX?2$r+vpTrc$*S3@(0@MwWmrG(?c$lkksekZI!kUlwcJN*>dK)z)$Firu zc|`k|ou3MUxY?|5GF^_JEXSIf5=qtCOqzbRLD}h6{0|ic;(A!Jd{4?7Z~3X)PvY7C z{I+!R37G0nBr@@27}t_#z`t#+`9wSurnE1Y$LqIfjtd68lNQ3t2b*UuG53BOTn(>; z6F(BW{gM~8>fNwp4{2>Ij~8i=>?f)4x;0bSf5pgdv7#3EJ#(7hw!?m2V{-wxQ1bH4 zR+G%r{+dF44&>PqCox2iA;hp|6Z6*^T1oh zHYrvGMTdL`ohAU#lKaV6t3%}@?>v-iEswRZ->@XGfJk^mmNGx)b?y{8#6_%tjH z9a01kEJ%mQ96Ni)YjU5P#Agz#Oq=c7$LZNcr~~q*kEX;FT&fMFKnQgKFHW}R!F-?* zq9H0xQn@)YlDdZoSX-ENts|um{*1?*koh>iHj{c&BBZmLaG6p;U!$oWCu|red}(we zpSfNSzKK;n#ihiBR9_QNlos?WUWJrdG8VA*jpb7HDW${hnre}4@p|hK zsK|&oqz_kxCpKmzx2x0^V=?nz z>}yasjs5U>6xux%iMlh>?Sv2Rb%!qvNYX>duH|S>InjF zW8e{$*6RSTSGVW0P#-Lz6|Q%TMly_AjJ^1#1b5jprsck zEwvJCAKUOP;(q2&VJadT4${A`X1?s<%>BOFQncA)iio+s^kZwF*{T-^-pH z7M#nn9J4$h2B-U>F4BP>{;+)GfE5%vJKPt~Z5Kk0F~6xgiiGtf<+e(IJ4Bbq=y6io z#F~$7TCeG0O>7RpF#SLI3$^nsVFIV3uHza$#AMY!ePCydM|h4HZD{>%%UqhZsmjDE z%vr_0butENder#{ofu^xZj&c=n<8NrMxLEhduT0yltm)QncB`8>Yp5|API<4`r_&D zTFDZM6CR}KY>-P_b=@7$x*?MfK#2-SQi#}5dBe%0qZBWkU*xISVubr!)R%4mHJDzv zs>2lkKw;m|=4h|!Fgb%tD~Kd~P~8uFZpnE8xqMS!HU;t{N=_%)x@&(s?S>GJ)qhhSJa zLYXzMZ-dvM4vMf_8eVvCGnIOIMV}xO?uMvK!_wCkEXQn}eeiE|>eHIVlCl^{vylOk z)5=cvqxaMzp4rLha}qfJe2rejt;d#d>F2v{cgc(g>VV5-+XXqQTr^u2t0o^WfuN+2_rNeOt>Tw z`w@Y>j5crOU-OVk-bH?a#C-88FEWGTE7;o!C)FpC$*rkm!r%M=M?kp0Jke6foY?PAMQQS0kIM!^DSlp5fRVjV z72oxd2DeO@;l7#+MVp19=HH6WrxLoq$i)kzBMzW3igirLZTn{(GH%7n9yY}1o!mIAJLF_lgy2cP~EA$Gl2%6pDBlzU@GFczIP3Empn84{|wjyog~jhz!?>SPUd` z5~7+K^uSt`kw3&$qKdgH4M*Ik3R)lw ztnpu2uRfM_I9U}t`g_Z)Rr<>PX|7;1qyHYPagzzMR$E>%m2M_J;wBwa9x!7OnciL% zUUbRTj#kpV*-5N))o3ujgz*JCVdKTg|~=EaR7W>+EEFS z&s0$o17>$y^;jf7*``2q^^o_9Rk%zGP|auw`V|s3kG$tw!wxk@Yx|;FNxWgr06jiw zalrUDB|>Y|4x$Vbajg`)rUVo^G@*ckl%+G_I)?L74uG7bB_#tR0SDu*d%>+W(EC-S!}%y|p>u1M=zMQTZhZO>`&ovCH-{rZzU0-G9~s%iG*r~8(C^4To{1HKDpAqua;IQ+}02S zx#*SPJ`ezOc;g(SfWiBs)L~9B=p#F@a4b2EEY^kG{F!b?1c>$Ph3>$&v!$;PQbaZY zYH38m*$~AM9eLD(*!XWyySom9ceJj`?179>O~K2He^YCu%32o7B0d5Hm!k3GHu%RRyC^fMs@J|6|6fvIYpHHaxrlGl zB>|qb5EBA0eFO|20uMQawGkx+Kz`1(KDC5k-=fc#_bZ}kE1=a2uxM21{{${cE~gGH zPStItkd`W~$#gZHi@tz-;hn89adaNl_0y_{vUm4?_YIL@V6*_0X-hTeR zpGT%(DR4wkV0sv_1xZK{G| zUdrQwK>VHJy~gww#w@GwX=2BOu4xz1#O!&C5X&TA}|0{W5ABlx%j&HvleEuc{ECESYxkUVx{a@ z+N!!|C_TbKL&VrG%t8cV{gKrVzU8Q2%TlscQ)oHm5t zz(ZP@szKMJHCC{9ZuBd5_9OsZxWL`Ae#A}TT}$WHElI2)@D&!BAuC=!7BH4k3FbvC zlLQvjJaxTGQf*7?^3|S@H`6_DrY~Zh-?j)YIkIEZ)#YBG8 zW>S-*hl@Ja(UlbI8nET;-*WEV%0s;xr*OMn@G3b6o=px7ne_4%FgF*}s=9M5tlw;- z3v_j)07lWc)Io!XOgzEw6>=Kye4X!*^){++h1B%xDUr0D8up32XfEaNWbp?W=Ru;1 z+Pk!*O@#T}F+z;r31xH0cz~p?nkwo-5aqt>pfS;Ql2GI5YCsoHOnp%^6sE zJlpa+ef$7b(@}UcLVsyCTonepS0xE9z!u=|M@kYI=_`%sLrw3r4`%bD{k9}^f|^D3 zKD}ZEyl;d4o(WpYZe-Z%D;W=~fMr=KXhVMb2`&A`&z$pA+;e zvZ{!Y+U&4qEODs;S&Pyo_%Rwjk{ZE6@fkVzZ86Cs4C{U+W5i0a_t7nnW@%l+q_zF3 z=c}vZA*qQlahjXY4Ry70U35#Wa$WIWNvLQK5+vlUKslM&|Xb#`?7rd}! z9e56Z;b@?*gm=&zdU*fxVt{Anc1Fn;FGtF$B8)!!w5}T#;c3k1EBoH%B2mdc8*)@c zGI@_O=xh;`CO6TPk3ePsddpLCM0^eIi)yh~p7aVo@k{H2mW)lN=5|5Rp19wCK07n~ z3rg7Mv(=u>fSq8nMBs^*L!qvRUa9F5T_75hWs>8>JdQi1zTG-x#l5r4G ziCw}{sm5qGfTC60A)e}V8b^iH?P}6Mf-3+~Uf$_B|Evk9({&qF3v9{kfuDk(4qHiI z(--4mbi!SlJ?{-X#`qt4m$sR=rbVen6V*UQM)zD(K6u^;eZL8ZvqTRUA#PMmt0t>V zoSTAzb4Jv!e6@7x;&2hruCIGi=@Kvqf5@_!v!E0NOsxU)b3GjIzgyw^dbb*+UXNGe z_kco_dS7MDIoyrL+>Ua_4#PCcUlNcZ|KgbIDvVtihw>1}Ftkl3@x^O zkVqDj;zQ2a&W51Ft>> zJWM-MLuA|)isAYowMYJMUlXNnyd?_xw$4~OoHtgq(|qxS;430}D`P2|cp~KXzrklD z$gBzLu*a{VsrV5WUCOG}pqR)Sv6xB8ry!S^`DMzI*wAcMKxdYUUNBX!k*W>9c4C>Uqn{C7AT<8t;<+ z$?xWb%b$Ke!X2P&I(w{!L}&id3fw4E*``k{WZjxOMPHI@Z4J{BkOQWY?&oUkuD5Zn z9yzSMgvrz6;9GoUxQvV6gIWUS7->XjFLf8N95aK>}25{+kyXgNx(A#SR6fA~KHEI@7A;z}ePRQe!b*I%UMdCe3SUl$4{HxYS*}?;e@#)DHB7Q^qiG1iTvqET1nEv}UUuLO_9Mg@C$AvSaW2h2><@*9F-JzS6hOgvz0raa=gww_`N_00ijz^cDoWHY05(V}9|b%SH+TDg}1QSBJ@;7++jqsipm(#dmBX)VH2p@Qrp zG~@oOBU7l>rny-O1YrRqzy}F662PRtY_GRQUePx_uqDW?J$z5uW0T99a4P9=iM{+v zJY8B3Hv=q*YG<5r=u&`qt^o_Y;Eq5zWHSg0|K)AI!1*e;A3)MMHL@NCV3RXY{=ffc zoDnKsq8XLsKjl)n;-5OjwG4tVr6^Cs9GC;MYY$XAFBlO;H7qw5Y?sp9@sr+R&L(K6 zzc2dy>Q(Buz#<5x1M3NxWv8dassMP}wHjk$md7V7SHFSP16_K+P^t(OcRoy zpIi!ZLS>;U84ZYZg|MDPMd9cUsOOaI&8U2V(+m89hKg9lOls46crX7DL?koZ@vZ;J zRs#VQC60uiJS{Y_MMv#w$piD%W__X_kk)Tigw}QHfFrOv8%1ocpt{ScuPb6E!2-xWS-KZO+cq!RW9r<52>a(>V zFKic!<}$qg3ZW?32bT_bKp{*QsWw=cL$wan*X>(jmvsWz0r76uQJ)~FW`zzlxXiU9 z#9ur~I0@{|6itKgB`+uVs5LPEA_-_>ALu=a-`tMX*%~qebY{V6dz*yA_A;wO#q8IB zco5pU&Xjm;c9K`Ee?@Qk%oef+iy3v|`siyi=NfMUQ10f7vd{7X7~!LUsvP^PP^2u6 z3WV!en*p~waEFO?h&uDn$V_p>UR7wI`h;&dlp#TO+tR3e-)q2_x3?&bAb-$Q61}QS zZjNFaD~}kEUhM2491lQt9F_AXDN<44AQAL5EoNJUC*3G-()-*2 zGV4vDt9!jR*w&*aq;Pbrc@(>{^CLyZ)OxKAj-pJ0SGzz*6pK~=$259e_0lA4#!;^f zR3Rb~%CtNeumxxxA$-Py79J$>Sv!^?(bJe{z2r!HGv&e-g#&=Hn6F5|9%Tq8)}(x3 zoFtZQke9;We}5T@Qf*!W?dF-I08(S(&*LniYz18imAf)2^BsxO_#1Io;$*B@oZIJQ z>y5wcJjCMIj0)}7cu`?J_pNAN8NfGZm5zsO(|RT6V(axnlNg-QKS*XxOe)mFiEc_f z5`iK|End;S=3tr@=78*8% zLeJz;y}a;Q=!)rPi+NpmDgW&%4{RYm^OO?)6nwH?iSch^POtmln@uU1YG|z+BgrCd zIl))z*m3*eI`hQqJG;Kg7(dPtC3H-S%YUbLf8mVnP!0x$K0+{z zvYx#&w*mav{H~qZdsJBg%Cv5upz$g7mJ$00NT-sL?P<&lZc1vS)I3h-%dWV|AzTo! zorz577ILnI*0Cvjx3N^4uTU0ceE1FqOoP1LUv%w0aQK_Z;7QbCLK)c;JrSEegHtn< z;pYM^%C_YSmWijMu}K-dnJ0nhMcI{JdgQQ-yznk3M((DybQXa31>@!#kROyA(_h0n z!Ig*Fb&SYtfj7#SYwew-7b0FxK1)y9oNj=XPXpsUgA7eE3i9Zx4nsz!b-Ak1Hqypt zb>a+T7D&%6i?gnWZ?GK)L)mA1-h1m%uT(nCLkQ=duMBra%(hk6w2ucTCzR(K5L zT|Wvq84e>3{tJWL*jB4nWV3aDnM=Od>jzTO;%%O89{)`)WlvB9im54s#32i;d70I) zLD7eMW@vub^M^lsJ4B`^8E{RKn*NWb!9wf9n1(Irp2&_427Tr_LEd4S<9c~GzSggW zLh`A*;p~Wv`Dh6~`cux+uVsR(W%q=({OS1PfNB0m%1@)r{u7C;^+%t!fcV{zH(M~glt}^X5$DVi46^rH+za6T6UWk<-pSFEe<*+((u%I6EwZul z2Z7WnCzPkt*7@B-vL2P~Usa>Lr;XMB)14;!j8+&p=5atG9Cro`FDm#qFK&bV`UrGB z_39mv*{RY8Xu5QyI-gj9S->hy_y4wy*j=fGL;FC>PfgnAWDU`JJkLs&Ne#e>vT-YH zP=mIpo*pmML|w3+jI+_f-9B5Nx|r{l+V$BlW1{2+r@r4eoq^eI2cS#(R{QY;e=BZH zq=Qp^Eu<}nEZn&H?@D_}gVDhV+~p+wm0~qaXMl5eZ`jqvE>dCEIoRO7ug!V&aPfD- z2jH>zqyT~dN}B*cz{H6tshUBxviV<>j3n=VY+K&~6yDT;Dl>${iEZ1ax%ZT>eP>fx zkq;M*;qmiEAsS0(7i%*RR-li=YRjitv9LHBe{*wE3tSY4BWt6UmU{gW{$~hp;niey? z%oBM{8ZNvXf5WzN&oh+U?ANp!)q>yhm-2!>zA)nAKk%b}66f>bZH-_9IAaS%mJ*(D zS6T}p;DxLy8AB6WF|LWirel|7mLc-3l5t*sKQeAW4X5rDbj-$Vll;&b6>UVgU?Gswg757eDjTMzB1nbuTjpoAHeREVu!ldcU*%=ZG%l+NuNio|u)7%Zl=86eb(=y!G6m}?j#Z}6|jf~6;UBi(&7Ql%< zxRRkIz|EZa;839|39%`+Z~Y>XZ*HUpq_@>=`sLos{_?MgpH*hNgsP|ESaTm(O0#t?ltmL$O7>8V#s*5vh5 zRmd(6CyX6=iBOD#tP9{K9vQ$jdseCO4$}sGo_qQHZS8Si@Le3-sB|nE$pv%{#!0C| zSK|0QxpQ%Q$r%AOp%F{+S0V1O2KuK_HZ+u5R3fgi^^BgCsfSQ!xvvgkQFHh|YIJv( z$nRohWtSfKJwRo>EamfotH-N+<8_iH58c~N;(xw}_~C&T}I{y5>RSp2>zbvhtHJ$+Uq$zCRB z2F^Dqn?UCtafIQOZHfQ?qpQv9iq{>fiHde=PB1?kS{ft^F51Apyaje^;5oN>kel## zHizV_<8GET!J~2bo_TeaK8x+=LIqcEfD?Gk2U1Ka%IBrkM%m>dr+8iHy56j?h(kQi zcHt2e49DsBLVW|Y>cT^QwLzf;h{;sasX)6$-PiRsybQGY2PKlltF7(xl>z7;Hb%xq zn!ZCHNNIZzbl$FQZJqqY=Zikc!ru7}bb0Xmt6ayv7EAP|s0zk%OyTnX*ByOhy=gLD z&m(oe<|APe2`GHkD^x7@gm1;Mgh&16E&tg42T)d^t}b^mK*Sm@A=oL1^7GsBSE7bs zgph|`z1m6cg{V~CDAqJDb|^HAV~yZra&a#|j3Q4Bmn!uIM{ybcWbua_ROiS9If9N) z*Hy$ftk0&RzbG#!w>oZgwA!*_M|8D3DD0V;KuxNUo)FjAS3csPrH+%2Vq(3G1#Cb; zWjMzNd(lkbe!~vC`qE?RTygXHJO3GCYk%Rd)Gt#EbB}vyd(9)Z9@6ac?V89+in8xa znt`1CVx6Dn?RPro^x3|?BuHJZ zjQgq5z?IMg#@1CIOr74r6Paw=?{30f>)<1-3Xf0B4)DGD>!zsg^Bqxk>E;Yg$Hq0e z(_n7-D?eFm`V!|8QvaqAVZYyt@aOk+V#uRdFPVy9UL{!rI#ZEj%ivuyDAgGOl9B`+ z?aV05F7KOC%LaJ!*k8_43(8Fx;QyY2m>SIgAv@u@E{*S3AqA@y?vM$%5K}`5OG0l& z!7{`%_104y#yKD!=w)JfDYZC{oI=FW`ERhTaKs5KryaqR+t+w3}WXSbrjoiwHQdYNtcZ#x?F!`XgcGO?QI zSm;!+rPyL^CDahGU!=i8U?Ur!D!T4z#p-kVa>?VX%m7Q6QN|!E(#^(x5k%{+GL7X8 zS$JD3vYE=k)T_fJNAz1~3_Ofgwr;Pj)TL;ywII<;SD{SSaH3XQ`l5#v1^yYSEbyQc zFyi{oD{K7;q@*E{g~%}jP%kHsTMjD=$OPmyD*zg=e|R}SRNo#q5t0jPbnr5Ip>JwLql8{b0n0B;Y!WN zKki$I08k3WVM_9_VHXIhaC5;V5Z|9g!vwO;G@R9`C{F6ebTj|f@m}3v*wq3(@|-E5 z9Tzc~-)8)Uw#pg%r+DAVulc6A&|YR7=wq~_1(c3tX+DuxaAT!?`v;E9dOFn2z3%jI#yDAR?5Cq!+ zZsUAgU<>ivSccSJH<12{BlcU8rN+;14yA&JDckby_EP(K{)fKJ33!g)xO(Vxs+*sgCOcZYSyUZ1y!$vONcEZ{# zOx_Pmi4}8&&XlrjR5ohEJ&cb;&dAj?BMvw(GiN#Wb7p)N>*o%TYXD_Ov>?9utXGER zq?sXL)d#kIwh|8kTlD}){KF}AP*~>uM!0_-hpZf4+Y!$RLw5vR+_>3_p7sG(TFzXR`=rso* z!EkB6_lsUKdWYO}rW}94HtPjpH{aMQE`W+FL}c-~AfSFYXkRe`K+T`KImYj~10D;D zwkMC-1u0eHC{*c-yfmV>&mDdFh}IGQlA8IwX*oXJRo}?=`bO&225y8Q2|YW20yxSL z$oi$(nWR%!n&~0r2|DZ*XSjPr4wKiL@kuN$Qv^p zI5{hPoEmL;5`+0JdCM_ksUJx3fwmhx7^tIoda$cOW@VSvd<1(Z?csk4!>mkuW9OS4 z>09&oNLWv@tAmBc-F)Lpkv1(V2HwTLUMorNl+HIohs$L} z`!*b0S&hYmezAOP0E_q8*t%C~-mvO%VwO6L^3|bd|%kPT7<(mjkb4vKl;=0ZD zi4XgFb(#q}H*T78y2794e&O!+eoY72-Vd!GtksX4+f2X`4* zR=TB%KT%p$8N!`S|9oc^JQ_i#;YF8_z02g^E^1^PyP;ofqiU6bHc*$r0v;5pp7Y|d zWJOo|gD9S3651_`aLH*ubRI`FS^|lyy(a`iB^uys>%ArLySL~{{NEbZHlbkXh*(0_ z7g1=&V!*`3S zBcp(KN$~!Ggxt9e&xg}Sq5;E!oVPyoT9Mr)iJ8k69E49ErvOsItxWylJvt@yZ)v+_ z>U!=(^uepqvdyhe9jI+M-p`f_6VyA;o$_3hCW14H5Qt%-_Nf^k+@3{O$Mo8RG&iJel#f)S?t71`;xF5dPTqv*%34daDU?6o%>fxbp3>?U$2q(~24>i?ek^fZ0v+Rd@ct?WpO%cVr zMKbs28CK<#qF(CJh2nk3ZJytW{`6;$Q@5IFmA9qroCK6p7U|mmYMY`JGsz^K!?vzd zKAx`k(DlS>Bd`<@6eErfsTH@^&xSQOz6mh#@fHoBiOlF;uaLe+yx3K)7ip5)L%Fwj z4-g6Dh(p$P9vUw9R()~aCGKjrCO-EK3*t-rozIU^zr)61p??|{-f#LP<2-P0KA{V% ztmK`!RUZNFB0x{q5y{QZo3_XZ!FeD`al)(bqNlu_3n52KtRCH7Iib>18#f{>-`v2U!UKsP)93X;=ADL!wRY+r|xpvPYoV@S)1!oCU z8JE3jAXe5tJpB5ooiU+)(&wk7&cev5B}M8|qzX6=Ti-PiuD@iPA6D)(733inO{Mdpi)+rFU)&3n6vaW|D=X&nrMD5c;%EJ z#0Rqs!E=)@nxR-?MwMLkiEX5lX0E(O<7)s};CSwFk2U9aP>1(ZV#J=fOOJ$AJdE`@ zFR`Q+W0muGwKWzt_rfyfJ%JHt)Ar;UbukhEsW$YH*#bAjchiP{I)W8oZ0;_-B78W8YMQn;B!a)1+bk>&{G$ zICq1|k7s^lyna6}^rsyt83rGQ55~H4{#2Oq3-~x{r=HEcosqYkfd5_yC4L@f2u?5h zw!v7VrrYOk9!(i1AGfzQ*Z#8`F8})tWkYks>D)GffbC0Nbg7*vWUCn|;f&KJl;Ii5 zS^Ck~SS}A?a5pn$Kz!q3_?3S%8JX@gU@g%dMnc{zyi0Z_!#+*vrNmHv?_GDz$>s{7 z$yeUZ2|}ee*(ZtnDxO~bR(P*ZI4K=!Ga_W=lr8lpq!+H2^bnWtZ0~m>*twc#m!L{s z&$%%?_?$Xx z+Er?o3SRp+ly6iIc(}n|y%m1aEmRlkQA-MTer=Je@|&*&zxDAuVco4d1Wc(ID*XvA zKZwGqP*T0Y&QL6@ohW#Hc3!I(p@+7r&x72oGkG{}S)`;XjR*@hpI}TL(%S@Cy5ORO zbtHWFhsH)Pr+q5Up>I_tmssw?iro)5^ax)gq-aJ3-uLwzr z4by*GLx5p&QBfnD0>I|Gumz)5aMNqb@V_}Z5@kN$fWYd9W~Gbs$`Bslj$VPOqLB7N zC?ED|2Pc=oIGxy>U-3_rl&=7rqiMf>En-Dz?g-5TL+ri&0&Xy{MkjM-i7HNP4470z zNB+k#huWE9K_=Yseb zN!=Rkl!<;Xxo^??%e*xn0IT!UJerkPpyEpPXjv4Rxa@$-&*{i6Q6LIgouLS45qe@i z0<}C|=5~`fbxP@y$ORoWK;-QUWQ_DoT*|kv1`LLXL&v!ZC?6#V3y`yid=utrsB5fH z8{l(#hSt83d_L90l;c#gd_(99=0}7b1S4p4oD~sPD6KbyV!G45(G0%P$3Q3Veg&)I zAV8d3r-cDfbD>d0?Kt$qnc(=GQAP~G7%sZGR!hx_-zJN%Wr_+rzt*AhITeK#QhcZw z3x+t$!;AY4VJHn$FDhSIliigpzf1Pr7ElynP3npe7YWD*ckA%T=lMUGBdT6ZC1G)V z?CAeiYpCBY8+-s6Pl)h_r%s}g-!HKrhozltUMH};(*zb0(-STU3@5$6&Y?wK2}E(g zXMJ0wCMp9Tixo=MU6Pdkvp#DmJFp#l2$l>{U=5qfWi^XWCAcZX*CtODMGL^>e8J*~T@v(cZUK69?k^A>wh9lLrZ-W1ZpP-yEX~yDcy?8Peq|js zjk(BhdPaC)_JBOi6&u4e+mi%50|dnXZM0r&&<9?P1YB zR(GanzM>|zNIm?3)AM$e^+lJ zE4j^)eyA(XTvcQyAl30)CI#92Ito0oYlc)TVtz+`@US{95Cs3#hj0mfEvwJ z=0}2BQ!4+Pr{YOFRBccC&Tt;P!(46xgGWE%qiU5T4TqIHVeR-nVYh#klFt)!RLUZ% zt9lDEQ5&&{Nryr0-#16shT#!QOOL*}OrHT0oTBfZ2}E&SF2AwYnBc=5BQzC+RXkk| z)B2n;h*aqK{|w!WzqbPNl5x^Phj{5=^ju|ayPXZ@g>lnT+<|&~ytX79gQb%U_QpsV z3)YcK*HL($bo8%7in$cKW|!kSsX?QCY)^EpwjZ1Z zE*IqTPS2N+Yx+ZjU9%=iaa~J`-G!kL%=lrxM&lrm?d1Ng)LgT%Z`K4^1#AHvEaszyJR-M2S5WjQVAq0RYLv5oVP}!4++; zyn`Cm7HrpU0yqDN{4_Kdl#*fn=#@lK9Pd9B0WhH2t@wspdYLwaG_t;$wE~Z z@QG;J?ZQJ_&_pF}S`uA7Cy51>Fm8|%++0S zGV89TnMtf|Y%lrJYGDq-m4{z-97kuJ`6g!KV|Q!V9~wlXTP(vDBwflTGa`*gLB9tT zP@_Z2(reYNh3BQoX`VK@@m${)8J%GXTXY~!K6Yh7+=_RC{UDZOPoH3wVkh5*D`)o1 z0(}1;EuB*MiT*s`m@BpRV#86{#u=<7aojl(n2S`eiV8!qw6BZP)xDQpR8;!<#(agc zZ!UEGG?wRg{tdqwtR|D>*TeO4NS4%E#=?yLGy$cet$&WDgp1mI>Cx^!RW$z>EVa$9 zvh=HB)$ZXlE`f%UW`{%!nPk3U(7fqMYXQQjNmOEr!D%(y#`L6W>Da6oaGd=pl{tx&#u%#W_WMaJKb0PR`>^9ct zF}G@Rb0QGWhZ?CPGNVL~Y(ZX&9*U5*&-6|vs2Y5>ze8v+H~RXu@o*Y zsUiG#FrB6oDPcgOaPk5ZuQ$QHc&Dt*!6w8+$Jm7?_tW`aMuRB@4yWI>OeO+R96pf1 zVY%Bf!rHfO=eJk%_2Vt4h!YN=dqIC!jD5x9RC`6#W_s+(x(B!6`FAfGvH+ z;(Xg3rR6!#bT2CDQ^e)5`ggm{Js|h0UguiPyk-}nAeGhFZJ~ZU~p2D^IUve8;uR()-Z=1ddEIo4gd=ecxQW;})wL zVE_OaT>+k@bXWYiGxiM3x<_WlEg^l~86_V9%!89eXD=X0_kO;l)YV6*-?~ku5fZ2| za>T!aHo8{an+gt}RBi0iTkz&!z0(`ySq$c2;Il$Hi?*p1a<+gMA62L4ohr^_UOO@k zB6U!ws@jI0X6J~QcOP=cTJDzBp8&uD0zt!f0&^$5tfr6Q9D>sgX~!82I>P*K<&Y*c zWVA7OJRF%URutkOTf)^7BdLD@uE&p+qC!BPLawkf`#mCyWC#vF=2w0Qc|<9#gPU1Jl zi_1N41xi8hm*+CC1STyRYEG?A7#-EbKXN0S=pGORw5D3Tx9|GtdXdF26nj{}n0>91 z>zP*jNF?*{oxr!+sp?|PI15i>b^=OH4Y1!6CQ>!D1!PL&HNA#)`K9mi7LNat445jC z@tl(W6^O*k|7G>7YL#Z8me?mT%xZ@CLsHpQovGmv@ea;iS#=dXBtB=lEgxB8RgB?E zTFhzUtV_r0tq6&qEgs3D7CkA~!BE{v?)xl~Jb`s&__E8|X&`JCX6c2o2U0F7hw=9@ zcE`0zFffA~1j-Ri8|P>-f=*-G;tg=zaOd|o{is7HKI8~F$Q19`if}jG2$GSr0dJFl z{wp5r>ZL~*!bkV5U<3z}lIGi3UwX#&(#;Q-ma>kp7=@ZBNqYpJclO#(kAD(d?XSZa zBl&(=XRa-Jdk+GBlSINnH38=pBiVRllaUOQxii0!12rostbQ#&@`lezR6&tRZByMW zPXLZhjVI#X0fIt$M74!3Um7P!#KB{3$!0@9_Zw6Hxgxrfwj1uE_HEt6lU>hwAaA6p zjEH({R6b$boP++_+&xPlhPHAc^gQ+rqjStLHKLmVM@&WVPX%%Fx-goOS-zs3o~?|P7jhN5H&LEkmF?e0l&%w8bV>{%jNO}P#Ml^; zELv{hp56cqoNedO&Ik?jORvbzr`v86{e-6#VEiZ7gAhx_&H!{aIT<2s$cmz6#?^3< zqF4OzyYOI?x`7*1G~F4kL@5|v$3E*e82a&BGq$Y` z57ctbt9xTOXYc$5uV~=!W*vS2+(}S-&1=n=VA{_|BPK-o9TZq+Oym0#)cG*x#>!Q( z`5XsRXVF{F)t>&Zw~WZvVv4h9@#75z9m7+zfCSm%MQ6k(4&GPnI?veKmLm%kV^=-9 z+>JqWC}v8dnwks;HuQ{bQglVduTp-Msx})mdJA@JX(u>teE?gG5cC+lsZKGf8(uCw z*z1L*xA8pq&`A>7I!T?beMs;YM}o;RrV*gpVD2r;rv34l)En1NJYBMDf)KCKYbLn! zKaizm`e@mcJn3WyU_Om$`1|soR6!5wTqHj-1jyqPV)7N(kp@%GHgoq*6cOKGiOigs z2&b{Gk-N_@oX8kBRp8*fz!urLu%L~ghLX>>sOTBu&M^lh8B>p5b?gFPOmwKw!_8;) zDd*;PrGMXLsoa&b(+F{D7hl=HMGcsRF`hVoldFm*{~Hx$q(2n0Cts)yval z(AE61eIuCVenPR`7D5&}tvbLXMaQn^wf)WHju^XPo5optCH2k5L^HX=KM zBuo1NwKpVi9R%E{%rk+K`QS&nK{PQbOPyZ?XaW7%hh!^uz*|3*rL)&90G$6}K6qx? zwcHbZ1d7l7hq$c-*q%*YQ~SPSD0*`6$4FJMa`;GMR55&Bxvd5d$V`&@(O+SzIg}iUB{Yw5Gw!rTQX^UC*BE$W3VG z*Z8Q@ImKP)HVwv@iK)RnPrfZw&O_C1_K|zk!YG);STf7}>-OS2Nq!=K)tLpITbhO* z@RJBDrT*AQ+jpruMhxVe{=v_5+H(-sF0 z*tOQPT0P3jnEiRQzLNKnVMWSGR*iJL6x!HJF=HO?`GT&iXzkQZ5K1R9Nu#34#AC5a6L3c;JqJ)30~5l$k@gjUb@Ey0 z>?;y4GP#epP=`LxNVsvzqZ|Kz1SJIci2u6S6iedlO8jBx(ni(QcHW{#4EvXHySPom zn7Zl372r2HLWskbk&ztQQ~p>Uue`n}TglX;xZOshe>gKK7MTf+x|-MOf;+!j%nNqP z&|g)&3d5jMm*LK#94zTPm9+2ISt7DSv6Rv;J?W6qeN}g(a3rEk7XSQF*=%k#b66Dp zTFgQBjY**vzw`RK2P0?9YzlcZ+a;5MO+Eu9Bm~%J=_P(a(l4pf`jOI?Jlv~y1RiYQ(z85JSxgRSfOein22?OU4Bxl11-Kf z!tQMHW&LUU%ec*i+DXfXMsqLpB^+b%I*3vpNsR^Ag73A2XmZUM|UxVCrsGm z94U;IXfqjJq!HGaM(~%)$4JToBP1hZF1m%0y6ODBKbWees$>##!xn7tgd<@Eoz z#22`$L?%?RsG(kN&{Um!y$Kn_1tx1tQ1*CHoAy2&x!l`z8brN_awq3JuG;X&jNWPwJ4BAZhW$^H<< zN$^SZ+16levGLFW{+M_-gUXW{Owf+E8~`EnUU5}?{@q$GgoEK_>6l~gI&XT$k~9sf4h!gz`nePMZu5@F}v z^Wrpqpp`2MJXp560KP4$78pn$y$1*~&$R{s{4NvzPHk&qa#ZC3@cO@0e3L(VJfd6n znU@b3kv)r@22OT2pb6Tfa3Eul_-ea8Qw1iv&YLm#MO$(WCO3E;8 zc}t8knuWxrKrq8f^7$gMSVK6eHjy;bz}1<9GD|x$D`-RX6k*y;UTKD2Mfl2z@ck12 zJV3+0A}7IOysiuS1bpKdmdB6cIi!H(-pm2>KBruGVoby%IC2Q&_OQr1XzehCuJ*;Q zm$j3!)N=Q}m}_xS^POwhVY6X$A*kh@FgbSsv+}jneFYbnZ65xq4a$M$9yDh6_6x{@ zhk@%5B>hJX`-ggh84r-%>O%CS1T2S}RiJF;Y7;*=--7-}nOi91al`k38o+T%MJBu?#a4#J`s%+=r z+0a(JS;G$&@l3N&x|~^YjLfyJpiicxBRFLDqT&*lW|*SipWd8M-I=jnZ$eAY@Jz1H zd56TA02a!0d{MCKtVw!x_eeysREx$^NxDP_FU|6m`70+3fHuR==3h6*S%M6)ib0yK z&6Yn!K~2`v}lcOFeuR~-X*u? zIXyM)+=%A6gk|>fw^Y;D;W5;28+49Qy1h7S%q4|ryj!u^G$kS!h;gwUdO3Zr{UTn( z-pZ_(UFFk3PES||f+Au_xkMI6afo3qV|i*o%O2`iqJ zvx2BE0bh;3$d}*40h&5aj={mku79d(G$=ZbD*~ z1@>Yj>w#k)I@7!g$B^W@z+~Bn5u6J9fw2uAWi!hz*;uCSUkbAqG{6WrLsK_lT)B_N zP9nsX5w%F)H;$Bo)#n}RKp?~xWbRuio$M-!cBxMd89N~QfQ6w`h)kG=x0|N!s8aUk zg$=ILil{d*8q}NXWSs~5iWK3-ETS-P3DPn^)6Tug8+n8BV5x)Vu|%4~pi0=l??~CR zfvEJr6&QO2It@cFcxhXG?S$TYs&-IyY5cu^N5zwC{&o9n6gy51>Qo)IsLTcvOFUe{ zaXdH>?1CS}@{nTmSVKa9#%uyXpC=T5=+;~L!+D<@vASB~dioNUNSexCk>e$1vUUsP zPl!BaS-Qp>W4zU@XZ0JZbJ6~ZogN_DksLc-=NB9F=45CcMxRNfFWd*gp`xSD`)CY_ z2%x*7ZyeUwHC|^zle zwUvE}S=l^^zEeq}$XV@FJ5@51aCHiYPw+{EpIbB~<6u(H8HTXv5&Ccuvn=iQ@#k4)P>rQG)7`SIKmbHDi>^0bY8UNI1gDkEny%xfvlO$y_WJMZlEx1jM#; zT6s&r^bJ#1Q1`#n9!sQ>sWe)HbP%80Q;$Ea+G;+qS8+&@Ix_;`Jz?{VHS&L?#lLbZ zM4ZYj$d(c{w|6G4(fu)U{vNch3p)jv+LKMm>6Py@A?_R(a7P8*?o%L9mZL{h(oRMn zIxs4!_ts24M6Kn;uI4^ST+|3;k^DVGP!HuCcubv&>(2*LVpM$68&}B5_{|cYW57nP zTlm&T|4~F)*f82w0ui2Jk%NKkKLf;mWl-=N_zf#-+ig@50YhT8G-)={?+d{5hW#8L7^YW z0tZ~rr76kT7v6mtkbiXeEH+&2y}@MDj0F<+%>^KdM@@qgQV#-Uy{;B;kb6U4PaIth zg=axP!#qO~?lCNnw`;S^p(=pB7fO)v3H_W0Aef1)>99igs_1pt0J*Xjl(*C8ZXq_z zi=`cL`T?k51Xj?n&0uV*J)+QwtGc@X1YBw^fFyD)JCz=z0X$8edHQak2DQ4k)4OfYl@o!K z*)SS&#jSzFnvF06P@;o)9R|lLIY;RW$ztmnzafJ!dn;$1^bDB>3!}R5Zk*bK}f1 zg2{@&%qPsfiWieMn4{H4gE64P*Yn{9Nm#A8Wi@<2oi)(Frt$52PQQIEPaRHDV)TA` zbNQ^Xw~zE)gq>Fb3K#F|hgNRFeB3eC_MEm5vE6u%E}6nh)YB~0(7Y>7UD;UKcSxDr z01m|~Se!Qk<~{qdWDgMfJkq5Kd$dj3a@R5Ms23!Ij~b6!IGdFxX%411%(1*UB&C&9 zr>Tne%6Z#9*bEUvgq+fM@n?k69AkhloF_g}QetJPQZ9aAN&qh3Qvyrwl1Ub7BR9`lW)`++pY zaeTn&hNhnjkd!+CPm@;W0aETjn%Qa;EX zfz=UT1QEGG@sHV(GC2?IqXSW^xjt1EpX1n!0^ z6#e3^0qgxMtjn)j^bnI_+WVvc6wj&FmT1RdnZ|Vw?Jk)I6M_3|)o9v0=CoT*D#%3e z5wG^*YAe%;X5A4VQLUxwf}-UJ#EE1fE7_VBLFgIF5Pt+*^OJBmS5?t{`3vP!P@xF+ zwZBV%_1(f&<(n=L5%BJm`3{lI`}2fy*pv!2biHFojU0~U{B>XY#8)KfPSJ2G{<=<6 zA-VtTHcny+ol(&}VQrQ16r=t=iZ0z?O4!MKo(S4aY?l~jjC-xq5A!`&`;udJ3M)&u zOS}}j%c|V~01rI@o~LR@fBNtPyp-wx_45a3xMm5uv1a3>@QV!K%oGd!pdA}=+XgvV zp&{kCI%TLK*oR9jYLg88_psV-lkqFpGKDJ>M|YPiK@)<+^3lDkk$~`wG1{qb3vo0L z@iTXB5N8@VIJRUO+85yL0-5+vtf9s7M0cpvSD0l0i1v0Sjx#^RvS@(c z#IL7{R1=l%020{ScV{V2W9Y^d<_<`dj!1*^I|Y0CcQOdW_GnJ$)#}e5>%OD|74qN5$TZ0v3S~f%(H`>p z;~Cu*w|DRHAA#9WC9Y-PR8R_yNor%}ry%@G4AYf{vq)ny(mE(SPwoJtbF)lz-PCr7 zVdd1byKA|&?VpHU|3}Uh^DTc{#Y_;7uu{V!?7o}N{S2HFLhqnS5Aw}`KDekMGw9n= zwjWq>h{gk^wQ<^(u|=N)pG_1xTwNLE&3<*{?nY3wUI=C5;+J-iH(J0|g$J)PcZuAE zUXkzc)*RzgZ z>VN|K8=FtnCXR5_@ilCf1Djs{CueBMH@uTi%Zw&FzqOJ?z6_yVH|`h`kvhT6cjH{# zeQwCa${L)}$AM1(#<3CbF2)?+pDMP2RDz6% z)C=^0=H=U&Dk{5}qB7ze?QyHjTuKpf#Pt;Shvw)9Z|Js*nFm-2z7mjmh*kg-&b_jW z_1pDr_>T6BNiu2E=%73?G{c5*vgFaVbpXL)$IZa-hPGlwygUy(6Q2C~FkReF(~Mh5 z4#wlQfHq>|niz3Eb&)c|VB(Up@9`vRjh-+n%MU`affSw6G8!^c(C1(WtnnAVg^m40 z86a)fys_PBfm;0(>}KxN4ti%$n+5mFy$y{&!Ck$O>o@P(=|O!5w+(Z}m;ufG(>Ynf zP#&)Z47dP)G-+P=33_T1#IDf&6a>FEm0KoTyad`cdDP4d{WmN6K&j8lC2NQhrNI+; zvw_C*4}m8d3qs1LwRp-zE9h6Qi4k2_TNcqBaKrw~iJFx@TAxzoSc6^XxpO6$8>;PG z^;5Ucxu1ld1F_XG+xV~OL`b$Xt2DCQ47Augzt&dy8bLh)m;o@LoFOGB1u8y^UJP@H z4**xi@e4yN0fkc(S)GEIJZaKI+`qSoI$)WPJ41Oj)$@}+(2vOz{E&nmJUb?XTdeI* zzTZmc5IQOmY7{qdMm*_O!;ipDrkRA_$$Fe+W`4V7_a!}K1D0S60*w6lLqYw6!k24x z#Tpqh;sbD$GkvtY(rCxszt_wUNgnPA&Z9~(M_mLnWP7ch#b8WTs=!aE%EHyjt5g(- zYM-8=FqRA_dVFW)19~}#mkFnz1<&5G-F0EITXXnUPzh-1<75XH!DgrG2en#hjD8A} z`=)7;sJ>oP8LdNdo~7_9?a@*xTsE$e9R4QT6N1V5oTt7G9sU#z1NR-Ji@R9U9Hi@I z%IVI{c-%}?v*&D664>s;A!S}OF|-STWkF)$L5 zDVf2${f$-P293M!-WI2o{{l}rEcQvWkP8uF&gkht#mzK?HAlYt>&STOpH{U+DmAaD zt4K6P7tzQchT_W01rUA5U>M~+rv%MTuRKh@IJ15zq6Q=Pkze@nD}oa{y4~WiDHhBK zUTjSn$;bGb72N5a@!P_zH5K&XLA~nnXKhsRqMSQRj zS(IadxrZ5Ia_tO~TnydHWZJW+0DJc3bNWV+-3APin8uw0k%+WU91mIr;u2ErG05ep zHn}PMqb;D?$X0!ayvp^m=i`xZbbq}Z5ysBBXkcl5N}WRZ2Fb#Z{uqvEQmLKH`cdAr%=XpRKLDajSoqT;^rZI%X_P)e+DV!l z#ecwl##N)`%{2W?ql+gER$_w8BBSHccg7C{-+vvX+Vrmo_`5kgt6YNMK zZGTw-Hq!;wIxV=i4T zhC-u`Xn~RbpEg}ZMwFGj0D^i60IG3xKJlKO0s&V?jOV>Kc?sPC3(`H(_EqjKEvR!` z;=Ry-u78Jy$tETbUJKYD65)r|p>$;$2C?{P0Jz=BgZLGw%KuwXnXgsr+rK}uG@<6V zlGg${503>KYaxIZF<{C;el)niHLFIJ-`&w&Z~Zx@z@N#n0k6u9Iyg8RE8I0^^|!mT zubb}M#++S-$C!!>=DzE3|Eq_gt8GR5a133StY7edHsh%uy@rovJ%-P($C6g#;4=3M zdPW3hmIb4zE)$w1SEI4X^2N@W_!awdi{ojj3jk>owd?rwC*5r67)}2rD!NoGvDy1m z-VsJTz5Paue~iSueTdloQ(ng@pKH*VfAxW-CF}6zMWQc9?#32s-cS63FuMt~1Sc>_ zu|?*W8B8@OeF~IriAfiRislM}NX|KY`f7A+f=tCk&W!G4 z+D`ppie)r(Zh#pm*LMjhzyX8Go{nTx4{KF_cVrpuACmPBQXd=z>F?tH!l|N_rA~hR zKkDmpw9i9ywH_(sy=x2c7r@du)g<}yA~6{`dz5fJtaJ8i{@WEO+mH26@t~xoY^x&a zTIeiG{T;abg`aK$Np8D@X@p>g#|45i)(@-(3>UKJiOjJoewQ>{@TG$92zFNuss`bP z_Luu|B;@6|=!Tx!p=J;ywqVnG&)uqi@q@)0O_=eBj3M+BVDn6bXq-*=Ag>Fc;LACT zF|#*b-*`#5#y@)s53R3t?glZ>NipIr?}g<36o(n$Jkm=&Qz8O5sHl%_XbDExKSBd! zJolrU0oDEX(c-)yq%_GRY73%|_n_LLz1p#INeUcuDyvuPoPUs>l#YlwCg}yYsq))_ z>Q*`^{VQzRZfI1qaz6^#n8}^o!POe3MDPDSc(gEn@>V;ZaIifuIcT2(1*WaLhCDZH z+;e>a?vRz|H(WR75>s7)341}C$xM*p!DUXIX(ds(j?Xq*Wg}^&N%OvH!Q2*?#6c=c zhWk$Q+;L;9ARDZ?`aE3Wmf3UI7J#wl`FN^VWKpHQ9hdA&sAl?D0*B>;IZ>g$eGw|B z+W{raock#;RC$1eQ-Y&|7D9OO)U>@eid!dv6i^JIn!1pQN|(JF#xSH z)v@dkW`}DB&~={#c7HnKC0Snt<@`rto1m&-!O0L%hUweUc4(uhb&>&5KY?8p*NkDh zSJEzE4dn$xi*BH|EEP5-b}{%8vr2PfJ9;=o$42eX^zkx3XoxT2Uy=IwL`Y^5m2KLm z*B05u3Xds6lL*ObY+w7HE!*Qv>{pm1CE!u%%w43u`+cU{ib# zC4f^wW*`9%w*Co6@=^30z%}Nl|N3$aKRG#%O9o-@qgJro7sVI(rAYs$T|976RShdw z>kMlgmt(33Opp{@O*Bgs!p75f;5tsUV;Yzzk(}58G&S$8AJ}Io?91DGv4Wp}INBJX zY^AZH2xAldKzy|}#fWY4tfW+y2R&@7fy~GnJQWT1#B>9gbC?r<_)UgD5__`xiTsOB z*s``iG*{yKzt02=Jg1|ios+5if_^?pj1VCFt|MDGqB(9#NQH-1H8+WI*=Dz9N5;b4 ze%u|%U#&6^V77*abL{>}Upg9~bY$>Fm_&Q_ZaN@}&*lslNw|&z1+7Oj2_iG(tt4GQ z>^?3+$ACUbOnL#RDIu~AXZm?Pho)s@Mq6z#1B5D@P+ zxidxgHxg{4T+(oBEL3T`{BA>RzVkk0z0X`XoD|L9PiR+6 zxbgVSYLOGIC5wfyJ%1#WLIF(F|(rvJPGnv z8}o}0JA35{ys;rdO>BCS%_u3pi(}vve~3NFCH3;ND_@koF<3q2BX>{9n%=@vxj6JI z^(;kmBcSss<0_^z)t^kQOvmNSGm>GFQ>xjc525@Ftc2cCY>9-c#*q&&sU zF?88fl5~K?K?jy1^-F4(fOi%u_qy+;8v0*~QmL!gV5pX9m`qh7m zLH3snlZQ%c{Xe3dz7WWNJ50#g{W?Qn5&d3b(`1(q_I3t6)Keh>z`}-vn5~f}Po^fx z9theS9uTnggDp=s7a+A3yZGO9{k3x)->KZGhM%%!74E`VNN+qTCa#%nOmNO_VC}a! zy+A>oKQI6$#HK-ny*)Q1sCz4#v~w$)O6X&~R)cry$9|BA&pzXVg9s%aL-C!q>tB># z0k?SSoGCAl0s0^LZmuK#k8OdjZpmCFI)Vyoe!l#YO0>#>-7nX!5t;Ryg4}{p5gIai zhu>6ow?lz0U~28UR*@k|>+JcSy^#+)EA)c()M}0N*|U!D*vF0!eSnJ-PW8*ScnN!Z z{TEPb&S5h4+|1WI7jel4U;(B^Oc-Jib&s0UJ!{}na~=JD zj=(`pk^#$h705$DI_QAqB4owldKFzJCG;!vcDsZWKUwYz<=0Ycu(mI4pWLnf3_F%l z`=1~klnMTf1zS13WWW+jZ0n`lh=zxO3Ry7d@o3a*Dqp#R!0T~CUX_asC0NO(h7?Y* zhOghS!;|-}KuJNN63=G=SH#P#rMIpz9yQ*a9j*SDD5zu%SkS_q3Tbf_0}AjZ;XSAWByP^Jt2RwGjSc zKV~Lf?I#{@*k4Y_72kSa;(cs&U$c%!7hQJf2eV5xE)*E01o0%W8*_v;kEjD#K4!_KcrJhP#qsyV+WcCzCo%!) zZjX4VjCyFk8ieqEQ#g1MJva}3VS1|j2kG=7P8vOPI4 z?WHYKx%c2(#{UMCRs1maeyaI;b$@{+6{H%?CL9DOJ>OsV%YJ`IV&(dc_@ZUl*l9*zckEc3C4cl%e+Te0o#&k;yPMt9hH#!SByaYs!@V zC!g5HTiB0B7hJA3DpZ_tch>p2IW>sgtL6jzxZ&H zBvgHjUvEGukf9Q`^BxNU0p(|<7+ct>w4nAWz&-W$C_IGA2fCHu!90I|E=44vol&$c z>;$8?3AME9Kx);kWHI=1HPDo++Oz+78PgN&egEk7JX`M{HmULTmtnZ-l_JX zkD94XRz%$@)LM09U%SICrm?1%`Lst~$0skypY@nYHZ=9nSRKnS?B`mH16~{NN4&$4 zCF{OwKLKmNw%WC|DpvSQ4YT%CWKjM^cr++JhABw4drVrTj`B9u zEt`&tG^HO3z090!Ox@4t`X{0)Fkq5WKm;-g)F7M5p%Bmg>b{CQL$|G_=W)^IE@dtl(rH)tJOr)G_NHY;@_QP))zRUpMHscJD>x8szO1-qw5rd?!!e3 z$q?W6K&B-mdX0}IykYBwd58ckAzcWo4jZ8vs777glLCuRYBw~bDbzgJoK-gVVkuo& zp5zySm{_DD?8YWT+d5Yp%33lVubG+N8>0qQ#8-7Eoon`(Y-rk~Ny%evDuvg=ie-N{ zsPF5DvB|E9v8DRp%(TJM?|1OgkYq=m=nG|!4d#id%}$Ewv#Ze!^(4TJYr~^rcc((l z1a=Sq35Hx80eRQ8uiI##c!fV?+ypUBxj{6;qs02z>kxz-5Bb7FVJDN=n}1(e5J^K z4`-t5`_>y(WDF_u<0JP#5J7=r*DS4u-x*wdP!!~6mJ>oPdD}^iyNar1f&g*978h7F z*>_&N%ge>f7{84DLW@|xlB*WQ7y52TlRq}{2i0fp{i=_RsewR!NjD&MdQ!`-FOQJO z``!ux&J=kF98?h%d4R5vMcb{A1Emg|wg1zxNZF((n<5fiR%A>L%ubjV+h{-NICP<7 z!6v13wEOo~#8_{gYf1&*>t_Tke4 z4cJ^VFNGUXs?<&}d*WtCJ8Bx`%$b^&Y+Qk&*KrPKT|EXORThSWzBWgPJ6sf8Iq?lj z49RC3;y|vjIg30p9gq$Eofj#vh89N+%`E3uFL}MraJ}5rgH7!lCwm)p#+62DI&;cF zemvW)0T@hlQAefx&6)t&)phfh1d(=REcg}bY|KdIY|`?YiL`(zh8`sAg*ifBQ8|RV z@iz8t-Y?=gXP!`G5s%H^H+IfOcHAM?&NN+Qp4jacXUp6o66VaJvsC%|Kt)928xy=h zQP7ZFWGlhZ^xzK1ml|_6gcfGd<#+A@>_BlRb$Fjb7y?5=N&LVy1%+a*qgqwS4vC^- z4C>7#k47$ir{rYZn)@)!f{kPCt)8N!!*tw)_BN2@Q8Z8uOC5o?Hkf@5;5i9*NnOVz zn1)dSEKt{2>Wr!qJWDx?5!gpxV+Ptf?zr7N9VR`Vt0qeY*p@yiaq+@R{o*$DVR74` z3o+Jft)$pZNN7aiVss`eWZWxBnB2CmwfE9ft2g6$9$LauUwWMO{;sOOF51Wez!HXU zqq_W=CHfB+KCWH-uJxuH7c0PUi%su#*Tpgtsf?u9)la$l3O(|L4~u_e=BD}dA0ku& zIT@Ca!c`F5c)sQdea;WKjj~P#vM5%ppQ+JkgAklp*$)oKg^cd(VTI79wjq`r^MZm! zJKtY)xet6k#{zx1poQw|lRXkl>6~<9n{39~0SVpYLdn?v|ZuYFy%n7f$g zIGY*qee}&qf6TI%ozNbWL*Up`sVdUiA_hQX*qnA016biWPI=NI#8Z%zNqym*D9fSY zvpsj&<=p9zC{x?G-brsy%;~==5+eaaFteax95Dm2puM#QPW~VUNHpKIrtXm!u;{I# zXIJOt+anY(g(z{&E{fcseza1EcdoZX;)Vc*?3%85(v$Iog%518TgQ}Er#15?@?ceg z_yacdJeXbtxpED~D~H_#p4MmsDduvM>t+`vWeNr(V)!yDMIi!O8ov3t4GfQc@sTWR zRV6Ng5|vYs9QOULV50F`fHRB-yt4;Bf%KqDD&|j4nJ1(RFyHJXphVK{*J{;)ogF_Z zQtfDIM#jG1=N1E@cKjni3mp#rjJi92Xo zFCf8Sv;XwaS5F`&&lAWEJrLU4)v?92D^n2xL-sTGc&#Ty;EoapHw&y=U<(%koyBc` zl)LoWW>50w(j<98A~Zpyng&PR=6p7;xOHopswS=aG+RBp=qaG#h4KyWy2fACjJ)jX z;~J#RKT9Y}doI)=YD@pMA!o8Eh}f%v>yY)E0IialK&117$9prC4x z?%A>F3Z2;eiNNg@j!S!gnd6Ls@a85`88Cz&%WpWR19w-yKIzz_8%X)Ev~0W_C4Ys0 zT$m0aV{w4>JcQ(eb?|%2y~C?AMr0v38qB0paQt|m0&{BXbw$z$JYjV>emT3)DCLaN z4uCLAq+$F5OVHB3Mku|yh=D9v{D{-{2Mm(eAF3lP$E&(u(KRa8iD;*ZzY`@3f$m8+ zjA+Z*en}#|*zoLMg8Pm=*!F1nX*Py>Ftf1q=Vbqv{)-Cb7723CWu7@uF)^S4D5YSwp z6?$KbCt;tDZi{V`mYFXR2lxDkQsil=u^Yv%dj?G>itaBy;jfIsb%1BRYOVj+|<6;+`$)te&O|B-Ih>p#jaiw%HBK{grV=0o|10ChUZe&ZT%H(G8eoAT)eE$^`!Pc6w5 z$NTEAM#~fFR|?2 z3EM8eSi`QUZFt?EbuQrChpYs)vga;OG{PJ>lBY8UG>llR;DP+sevsf};c39(A!V)e zRuHEJpVwra|GH2Q>A0<^olbV5ENNnfl#S-Ipy-ck$x-J0Z*!fS+m^!JR**m#Rdby0 zhfxNsuh<{XZ?bg@!U37qa|TgAvvz0Kj`u4EJv!fr62v$N^t4%fB-zvig848WX;YK_ zJ<#4u%8J!)aJpgQCHX~JgQNEig?O!?n2C@|?OgfkjqiQ0(rdYrh_1owZBTTLYejj_ zv;N$>&+}Nd)frEqvdP5 zdZwSf$|PX^yzUn-)54@P_5VzyBL?y^>(vKC)eAS9lMyk%O?ut$o0KZcqLMlfZ_!EM zZ|=xiF`Y;akp+R)#zbF1Gd!AyKkn&{OWLQF4rWM#$0zP@GI7e4CYr_LqBgX4>+s8n zxb0Cc-s`2`j|^o${n5uheAHpfU1>n_ub|8ubNh=GZx{ONI!iSMK z@0We8u`q|+VHWNh@PWAwC)yVqLv7NQtuya-$ub7ybEu2`yfEdnB`v7u>vyDM zs*Z5zfL9n2{`7EpF09pHbj2)}E=cr1M#$wC;`d?nr--v2Np^HZV|B6`&JzFta`L+11?S`Xd zWS8&$V(N?$HybWJ@p!6q3}=SsvEs%2u;76Hhu2=l5Xon0npDNx!55I5v_ANy(N1Ny zsAy**Yt;AK6oY0q@}6sd7`ISJtodXN6MX9)VcW~gaHr6i3A2tniBVh$?d?)R(mM1;8USm zF?R#a9xJIt4v1TYP#?3>Oafo5B7@$w3A(63t_n4*O7hR}50-nC$ZZ{X5MfglpMcDz zY6Q%#B0~Is6L;dxn*KWAvH_J$S3dp3nsrRVnXQDnB(v4)a0i82oKj6M48hPg2MN7w zAckP|FQd;5N(T?gwRsxMp;`|vy5MFaGwwaVksNv(_f=A_vvu|K3NS=HKP2v|io}Mx zO=|3vxkiVV0e^07KNRr%O9&sKqOHtY)R-pW7I1=)Q_RA4^$&I| zuIqJk>b|y}P$zT9 zb2_Wh7;_!4yN_dxBhkCBw@>_(@g9!)`1^oDqJQG?s}Uz>@nwPw(b)R+rG6JlzG*%R z5^mJn+UZO2-ze91KuGcb~46+ z_oi|d1orJf`UtgHlbDwsn&k}u8E1`m1CMNnl5>hq4{WKZH*fVm*CuLer9qt8d^WC* z>#FP*)=n(4JC*+wWDS!Pkt>TQucD8`{%7_1z6)a_|F$CmH{e_WSwr#t==wfSo?cAn zkI-UgT)hw=(n}v88A(jL4ByOGIJKX4QA!06_TtAPCfi-ZEEe_Efe^kUvHjvTD2))fTR|Rp&dVlF6;ET8=Se#~ND#)&E$`l? z&I|G`pN_re(K?Blv!6PREZ+aP(qQW}oHhy3gT|%)UDDb$pT^3ysxCE7N727Yba$|W zKWxlxSiv0=Gsdm`@X?lQ(V&7uW_(%C6^n6S?0{sRRC2T}nnggM`{Z5qtKH}jClE-@ zoSNN0PG3%Bwzg+NZoG7G9=-g8pz7)y$>wYyHs6*BQ?S@XdPA=@ohwHoKqC0v^Z~k- zSGRvc)&+KGY0@u#cmIq%g3OlD!p3p!!N3;SA5D@Mw%=kXhq(c$7a1Z3^~CXARYE!y zET<*!(cJt!wBkEQ2=PRWRTNd;d?C#F5dM2*Y=~FU|8)BD5o~O+7Cr3y{crf(ag3Y3h+Wsp|QSYbk` zS?T$Y=J#%D=VuPGI;g)3d}A2XO4Y_6gNdg<{HkZ4Q?_>bET27)jVY005G%}ldt(^% zBpAl2CX8u?c2)q*WV&V|+fXO*qy#3L7)qdORna@~W&M69Q?60DhXbpDmJ2EiPzeVm zN9tA)FI?wA#=h%h2{>s8JKZUkRuKcRswvZzp?K202i09p&-eOXQXU*n zdl3BUEk9=->i?1gMt6n~;#-LTa3wk;D&Lo2OFbPF4k}nj%cO=Z-|d(mc57)nt6lf| z#T02-JRb+dqfvY|B?(;2^6N2JbDC`C}3*|k94wbnrw zM?#rUMX#Tk!#F3wTGjybtLM&0zunAb#Du6u$91HXl&^1vIQ0!N%7x6A=_qh^Ds9KH zW0>(h{aXY-$Bko)CPL0iC+D0PMY8Z5G}I8#micmgSfqeE>&0R4L+4o4Kq`OPXK>b{ zYy*VhlG^0G!OYo1KcZm zC&Y#2i4}R_nPiwr0!f23eAI?u+OzAtGLIyyssx_XE5nu|rrax+Z|{lIDy$+gCf6)>~S5~Eb>JgdS29NJ=jq@AkFRCYBp%rmCJ2ALM3Eu5ZU6>%dEEg zH(Ur@q945UI-{}ycd6DEtN!g?4Qs5R0BcQ4Ug)-g?Yq;*8BSdX#`5#N zIP;T9QP7Duko?9AfQ~86d#wTfv*>bB%+g0&{mTO92A}}Si3w>#=B&+qJKSzl&6M}x#*mw3T|?c<07X>bfg&-2c9vsb^T(@2nxE&t zyLMXb<}veiq1-ErFBH`63EE=Dh`$WiRAZ^y({5tx= z|BP(81-&w8X{BU1>adB({Yr=d=bbQ(c%{&mztxS;l7jXl@UbpY_=H~^+p&UrGQ0Cs zzHi{uGnEVawusKP(i^CfcbaG%1rJKnWp`*;<&0T{OJQ0*6pN%B(nKkrvkn~ryJRrX zGDai0HMt((;xjf^zEI6Ix_`o^eh%fE%UZ-gXl=#5c4En&;qy?}&+w2i7Pp|6JWsZJ z)6g5$^9Jw|ASek`55~BYvWh(LmW0*gpw)P^sMGY>V*MRMgGYIa81WRb(G6GA)z(7@;(+6TLK2 z{4gjybOu?S^-BqvZ32PTuxcsC9VC?ozoOUiY5TV&#(a7s3zewd(MfncRjAmq z!*S9QgfI&l`D;J(oU>sVM6_LEs)fvjiQLlcVZ}xBsa!_e?@}(v!(b;SQRm4C;w}vZ zp67SR@p)oz94dm;Qvofn@`edXnJmjrbO7t6(A3wb^J9uP>NNoqc4y9HW0QZmcXTs_ zHuG+7#1yFmp?@WMw^T9;?OZc)ewRf=&0zDri&83ZSyII@u-d~Q@TpGYtJIe2+slv; z#rq&_Q0&3nvT=t~vPStK#<&s%l;{R7+mP~BDpuJi{3qaT!85RlaB2ViUuwnGczldt z@OCSuto1F1>UHRMwFWJ(!@x{-f@OdYnUN;JtYN~I?+J)~RLiArnpWWQJ;Cz)`}Na$ z5OkfP&r4FhM0Ttw+v+f+U_be!&V+HV;{qEzSiJ+)&jw=5VHH9p409DHf%jr@d@fU+ zwy*qcQ07txdq zN{Sw?ESFXsU(SY(Q5v%JanoaiY4)u;P)S?enac9?d;A#>sNHMhHzhpW z6kmfJqCpH)EX3N}y6l12a+9ZO0Iurcxv?p5Im0akY4b2Ety?)^U&b4D9Arg4pNHu*FFxLx zF1m}~g~fyr;>8xsibO|)@x2GCZZ)z5e!|R+ipz~vY-cmT-hp)O^JR-2MpdPI0;<9Z z`2`3y?oc?|82geSe;Wf|m|SJ-<}Rsf+o!EnQ2e*0{fwO1-AVij%p@NUCEE(;0-ghQ&QfxLh^QPmmcz49-A6B% z6D;a2ytfzKaxzb^uPS(g(q`v2^!3bxKK(y&Gw=gX0O)$@dE}Dfq~#p7i+!W?bF(N$ zMzc^vT84uiqRx0m+(}0GI-0*Ex5_p(ZD79wn2~0WdGB6n=sE6Nk zFo}v3^#dC6pya@#uYdZ+SZdRb%sh8c_WC9+N-lhX&i5_uaoZlLt@1Bc5xXBgdi0+oZStnm z*fj(%=Nke4oj9wxbm5uKlI?ieO=A*y6hb2{gVhV@A8bU1Gs&b0hrOY?5}YfbhVlxauZ&3A+%KdmYk-P0^ngt>{666ymx11$pvgf@;K_T&?46>0Y48!C51Nr z!pp1Lfoo$gXiUIf7lDFat7XSzJQt7mzP3My>hFiHPV?N$8jZtYp)bfkRl&&qQ_uYTnq1 z@hwQk4oY*xh}``;r48D|5_vYT!I0=)&06MZc$DdDHz1Vt4H#4y=nxUy4gLjcH)rRY zG)0Ifx(PrUC<~q7+LMjpC!tm9Y@&XQTDF z!F~`55F0q2to!QNI?SBB!Il#xwr1H~*V8cN7ClW~ZC{YFo|dNUcWeoow0iZ*_T<-8 zVV=!h!||9NtoME%g&<+y$)FLpsCOzwPdfS0e*p#$sLDRR02{ z`fJ+VE6oik>+$BvET5a6forDl#QVpV1O?9zB>2%IHCeA}X#KP6m<#Bp&ymf z1Ghjg9ExZ4ye_jCRKtt$@$-9hJIzh9>EyiJHtliJW8omDH z*664kniR;y$1m*e7|^JulZ(S3v(bjshP@6TDOX4|Ln;;mzi&po-?Qs zxs8FWRf7o2MqGlBm(px=1%wvoG23>A_a6AlW>cFgOx6vB=l0{v0|*{IxFw&wwG}?V z(X%siBWJBesL5Kbg?6UQ8r;E;1|T{;F-1%AXah0nC=M$#ZMB2Fh-BgAN8HIVq?bBQ z3tHf2js24t6`^aa644LYdA$Pfd)i2e{oQu~T^mrX@H~iF@7bO<$VP0ie*Am+sa}Td z<3QYJ@bSD}s^P2j7?RA^MrL{JAZF644Smfe3TJ4^*09CPz_UIDO1V~%2Jp54xMYmR zEf#pJ4DyF}5iM_cdG^zx_x58cr^65R4=_!gJpJ4I`^a~>{iLc|iGn%psA5_L4b#xx z-KO7T#W|X@HLkK$C(Td5&O{mY?rjyM{Bhg4q#s-F7%)T=6A$PMA1p8>im06uDAZN- z0SFVnsZ+_5YeU`gL6~A&f5`4W(s51$IG>bT@ptF2Lz^gKP9Nyi{36zgi6mPem-pis zwKu(;XChc<(?0rObU~_RkC1=ROQkvZD2e$8M&Ja|8HtIKHa^Gcv)OC~mU6^yNQ{1p~<&d5;aZlr}qb z>*fSi@HYl*_iCY=x8baNc(zh=sA{Zd9Lj-2qI{@DlBsSU`VPP;2E!i4Sy@zuN9IVp zH6O%x6F_AAV(;Wo+?2!8sSBaZ*KZj5|CEzTJh8d}hpdvhKLRuHM!CP{`;!rK80USM z;C{9WBaXb&a&z}!ckL8slrvC8@kgMw$`q--<^3?B$csUL2*y;A@hOm4p1ZhmvdtJ5 zP;Mo>ut-B-ucc&EpX}D`Y7f~ho`tLNOsEGXJ4ad3i+czBmm~h&OG6h`)s0Dx(6b~Erq2Lj(yapG+GT^Eus3amr-50ea>L_Z(e`K<*mf3G z>tT|=7~*wInvyohEviXp8H<7hQiAvTQ?sqr{wY2~xz98UA32E~7m*avD>L3bzE&$o zbEH}RSP#eF1L?cw`;FXYOJ0!VF$_B02h&KcaE_8Iw){*4+X16O=!5Z93vi> z+Et4I2A7FW5DJNt^q>hx<5n`OUp&|8Mk&1V{$}x2%;NcdJH5wNmkv0cB_xTm5Ve7e zZJL!;P!Z?N|=*A2GS z+!4A^WjV0x zwLDQ@HijhKt=acvi4dq0A&y^AkoX zFzHWYB$HeoN{tG*d3lpN9PUxmSl_5rvjmhzwW>dCD;-Z&Rfjy^)B(RpM*-ud*ql}* z*?4Xg7yW+4Qp%E2K9oRGUtyb$Z~~0Q#oez^67VkvpLhULXL9dZ_6TtfK*B42(-$>c zl0cFT?oR6=jxE`RqBpa#!+g>n6PYDl7e5E%@_PLXm)JxNdLtrQ`6>`%%g=lYah0Gg zxy`|LB}a9n;aXCZDH&+S1cRg4N3En*zcio6yToYu3<5Sf+UfmUb{T@}mhJlez)Aro z*xi9^=NK3Hg<+iGU&o_B2bCrz7UZyPSx*w8Z&i*)<%7Sc#x*r<2+yQJjRBrIC}3&v zA8ejdplKY1j9Tw4HZQ;-TeQlE;})((W1hrUnJ2$RsZ$<28OtU)ad7_04uf8-}23$EF^VoAMK{U&?7EwT&8NCJmZMOdGhvj~UhFC(E)y zLoDH*g$)_x!%3x}aDpr;N5=g*{q0xeb3lfgn%?dNF*7Z!s=kM~ZT)8S`(@=GIrXp&9kzO38|R3S?E)wqQ5CMpn|CA)wyNXccRqP z+s9FxB-E^k<3qCiTpki4ZpdGy)gfRg%c^o3JXR;huk!2g)@}ulc zfgfF9KBp>1+v5_VTNXAN7 z^qNf#AL{Fb>FVw?c0)AHBO5NDQ-n8ei5h?V)<<}5H<-nv6bfZgnMyj`>T*k8L>C@5 zJalAb@MCbIkfR6ZWB4xGA#H5&s3EYeVCCk_`vu6dOP>djyRb2kTVKbP*~#fSTOniG zbSEg_+6kfgo_T$A^tXXCpvMQGB>R}7(|9bO2MPv=YcdkET1H+YHaFzeSK|E(#DE58>--DoX-%Qpfx_mTs$&6x6g9vW%J#6^MxMVo8)Hj1*Z>4G=^7KyA!(MbB12hZ zy=H1iXB>VhrCF&-K$l@Td5>H<05cmMjIv8S#9-?}H zf0n#%N~x7ljo{+0K#)=o1xm?@aW5BR5W_+DxN)yR1BXOkGWJA`J5AOf5yc_y`Qa(I zv%GgGzObEM^KiQJBH{DWNeg0sI?-kk?pS=u5Ju5f&B~TTQE{f%$N37{2F$`|XJteCboy*%+xr~2p0aaZ+ff5>>lvi;D&mw^R7V4)q zVM}}$jjB}$AV&FE_mkMO-gP;ev^y>ZjE;cqyx3=|=sVPofY$p}Jn4N;62^tqkgH}- z*Yo?7|77J2C|cwE9a#IrBUxnsCpxo{lW6No0TE1>KPOvm>d%T%S?EumN2gMsJv15S zra)*x&68=?xc5sgn-MSC+c%ps0hR%XQNn20I|ha~qLHk1^l51RB8`g8}R zg(H{BAJ($z#V=i-|~89~}~PM^c@O-sSC?C-!Q5vOB`aSO~xsOdpr3DE71J5`37y zt^qyczmHjdF_q#}ojSc5HELSyCb)qmkeP??8@)rd1Z7#qhftNUq8Oa$0QOZcL`d69 z6D>ZA#EWW51pW<2kgqDTi42_}E7Vr~Zdkg}_*m;xr91Z-aUu;ZNY_9z5EG5%1e?o-t$kfQwt1kvn)@f1V{z)2KkU&_J=>Sw+!aoQR~2>DO*9bCPo1mWdSP;=+GO zb*om=JarKlHa6!>vLW5t-7pU${A-mPD#4TIK0WkQ5ej?gxz76QWtftV<_w=#Ul>qQ zQ>#;j{DKa04=8cbJ1no?tanTo2@Hg!tRGQiD4N^QPY$~@y@`VSgU7UeFKrfTa8W{m zL8~23VI^rqysMa>^r}RKT9L0kI&7;pt$c{*VY)*dXdY;%sCw4TRNXM~R}P5{eM3VG zolAB};VkjCfI}4zB&ls`mv^OV9=DuZ>#tMj(k?2Z>jiSS zBp{AWEr$_@{p*wWp0~W%{QeKZknyZHfOiLg9DCiTOm5L&JgzD1AO=njr%$oG za1yXm@G+D}soUA9dw8;2yAr(FK08; zW+58kP1Q|_47EAY-paBIs0WvR1K+euC!!j9iGBsZ8jDCqa)A6`JouZz>_IHnv%+=*4VGo7R2ZQ-c^%i?TDQPn#Cf_xq5QY>NyI71#?NX-<^* zZu@1vlr1&W%#z`Grh4VWxX63S#Alw0ZI!yQ;$FnYiDtvkFVUjT7N`@J<`)Ci%d?^b zE8Az&a#rOL8gg?UZ?f%*<`0ZHYt2;$ubthWkFBZPU`lWX0IOQGqk=CeC<~+vQ|Zas z)XJC4Rui|Xt$g2dED8VTf1NJw#CD=hKpvo0yEq>jFX7B>?*N$)is7f)tQK=a5|N9U z+T~ZDD1W257f%prz`ta@SS&UtFK#Y|-{Sn#xR#U!E`xAjAW*9rbq*JTK6ivUPwOMWu1NY6ko04(@Pk3qJe0Y1JjZfvqz>#A7OgV z3wh`qrfAc#U?FE>XyJ-=XY2{Sz(4iJMh)JF>A(gIdkN5&tnU$@T7z4*cubz`MYJHe ztzp|z-E0CcFWN-W^LLk-d$S{>NBjy(Tf!l0oE~(*2ze$htkW#g6B@x=Jcet zaw`*>*^9X&+?o3m7wwS8y|F*wo8ZbXXjMiHVDm+$BSOImP8;`|%sUsWDq{x|c2=S5 z4&&=d9M=@!4(B7!>G%_pBKvC})c?#(l5LQ=XWQ(|W*4vHjW)W(%WL!1n_kmgO)dJt z16&PYKY|1>gMOqyc{_+%k}m|@k| z;LTJ)rrtH|jWgfVg%en(sdDVgD~T`GY$vfQmzSsy3=GMRBBzf86HYIvdF@ z`BtH1brAm>BMG1lMgHkB<{ePFY}3^}<;U4_+--RSQk%ee&IC_3lrZ(&l>=@OG(2y3 zmtogR9P7Kt6TDyd2@=?2pdD-2iFcmS2KAPBzDi!94%DrSe@!;_N@wtAHT{VgJMUzk z9G9_$GAfNZ{+P1e-5MH8VkX%(!;7aQ;1ObOw}-ZO74BxUd61I3rL^Yt#(w zVyITBTWkA*t-siE!q!w>kDs1CaNK1GM7m}*ZYfQTXwpp2f!*skp-J_` zs6PzBWJaUO(W`YElWnOsscKcl+=o_<6!TQnY+;R&okBPvGmOsTY~+g#GDlphQ6W=S(qKIm+?!zEDf}i#9UG8IQojq;< zUtDGhuDnId2FGBS6c$Bqf-h8pk~v=)Vnur}4{YjS49!29A&^Zm4^ElnJ7%r}pnUF@ zJ(##M0Lp`8Ql=ARd!RK%4d1}{3UJI*soDj{i?}{n!CX!4Mx_o?uRbmdk+M|sb#pRo zDGxN&DwC+;K_<Rty<6gvc{F=+>r?e5N^(4}(WA%H*F6BcSqRiTP=!uafXu3y*Y^O#gy8WIH>O3 z!4^dEfi@H6WzhaX@%B^<0RKi7T?tjL!JMH%xz**}*;RN<_@&zKDz}k0pJ>tlw^LZ9 z-D#&NJ;ZTTP`xGE3zB-$NHL@(!2$7Qnhd!8SKel!p0=eP{WLd?*vJH~wpCcyx4|?`G++xGg;O2}f)#-piqTi0B`qcya8d+>sTQUKwB%uA zlml%2RRm_w5bb0YAo!cOBfGxB;1f1JAjYjvNB{D{Tg4c&m!S$S&+BI<5J7%0{J=b& z+gbFx4;mYuP#hM$CcaeHiNTruvW}mCD=p9ap^mH!W!%&Y^wK`rYNiUMAK`sJNlRB4 zK&IAfYrFMqh~08m6bJVy2;}DEL_D1l2nT5;vy#KA=8OsD z5;Kg;Ciba4_v~krWYe`sCYvIEAu% zd>k)x>u3gS2v{(_J%VXIr1lh&ykneNz7YZz6DaZZGk5oqH{}?vqbdGyZnrk?oGnX-QKkr@(B94dOzNn0xwXSjg zBwvY`EG8jubJh8UC%wIx!6<>a*3bkj!aZ`txe6!I6H;+;(rW)dEBl_vHuvMVOmvBQ zfbVY3dC()@W-TC@8H6t}J7fZ%ZbBc2$+}OYjs<`xl9eTiwN??pkx4fCm8uvpR`FZO z!es^$w8>O6_*bho((ownmNB3k;*x zp~im*6LbrA@G3%%_vTlh z1D0r_%N(Kzq{_ioKQN&nBKE#YF^EP{JlpIn_%tdd`jb#?=&_&Trf6d6M(6kTt$;W%O++5N{jbhS92)Lqk|J(PgT3AK=^W5%m z82|vnw5S7dpL%kbOJb7kfccm=_KGn&%N#b895GL1pULi>pBm5gPRALg+{+-RB~}~R z>nG}yEOAo&ne*}%l2u6CpY%Q6d-r4Cp!n%%F_~>rKbS7Z)MVGvt}5h9(6^>1CY`n? z&BJPi&4tlGH%02Cu(1HZ75mC#Reso(sM7)%@KNTn+nURh1pbJP91|hUcFEb^P~{B}bKJ{kDQ)4w5U)R$nZA9|VCf36 ze@Ti6In*k=#b`Rd$+0NZbTdS9N;c@)HoW4H)rHyL%bZ5_coeg|jMq#cYzMeL!ebTP z|IMrgKf-0KOv$1NL$rV;bt>7255<=AI$e*ueRG&;k_Hl0H&Hy!j+2=8VlPy44X`Qm zl4Pjx(I2X1L0`ZvcgUi?G+S*lM$&Re)Aa|dj9?sP$*~t3g4>d$o_VWslGDxrR=Vfe zuY0#FM=H_LP@=3Ld!%|im2H)r@B@Wm4ADB~LqB$i|8CO+DPw%VK5PmOGNaow>$0u$ z>*RJ+Vwt!77O6eiFOx|a+HQ#k;qfLcFFHnf4?N6~>%9gw^}@rEJZ*P$ab-s9aK3YX zcDRVbjK1RG$7`_45IwBuW=W3Yjrgi`B{FR#NQI29o2|)6a>{`$5pbXe0iq- z#$Ad98Qgb+WDBDj%`-ncR;x#qf2nLg!WOa`LKp|}g?yjN9m&X+U&a)9SE;sW(mK)t z9wErQbKe)9S2S2}^+$NO!*|MdEDa#z&A&87v||}(L~&C*Rxj-l-NyV`LxrKpn@H=` z-Om@p^uIXsd~PKq6Gr!eLHH-AG^zmnRd$>!uWEj?f(kLzvp2$022Kb^I$Xp|MgR)m z_qig_)u|F+&>W9g{qkisB}mAFVOsW>79xZ=VJuMj0IjY3S~qZh?>zqQ0C%S)c^|X` z7Tws5%LhLPn6D+zcPBMr;KWFI0a7jiTF^h)s}bw@3ws?IY4-@7iX z#I$Xtj{__+X%32oW|zS*9pe^3K8D7YSY``lr(Ty?_ZUv*`}Yo5=eT&ywE3GI%?Z^) zSm4PLt07E*rwgXQu9ZPC?>boN-ZfADxjwo{x!jne>y`QUm7;9LN9<8-$qeX0KOiH{ z+e+)fJ&h>C@owy_LbaJ`9rgmrE%15j(4}f%77821P$}(BNMLchNZXHTUD;6AH18}p z;@2Q$Gu;uPx7XP#*Dk;Vapn_0OOC_a=n99B@M~Nlwv{@poMD9Ouno?Rvwl^``G1=um47v9c*VJ?6{K^`a!|9U4AlxVpXE_=L70`*Qa<%w3m1FTAPDgrzfo8R!Zt{8fANFwG+a#z=~pBbh^l5x<_B2N zmYVNByYmSrm5F1MyuVKzEFgiv4`a^4sZ47O^F}o0wXy;C|LYlbN27JmXqY;OrRa{> znMF~oEE{~Q%agBBs8 zD_R3yx;dG6Vz+kO1dMWxSK0BeIX65*j9M>d z&Ct}>N61EZ)y>p!ex0RHO7Z{|Cco}Z1HI1xbq<=$Y^`CY`d6raG!gp%S$E()1>ud(`o*&k|ed|8S3lOqfV&htuTl!D|6W#EdxI*Ojv zzqmsI*$r1)_KijRvg(r|I?sRW`qy)#k1iB4x#&QZ&|lq z;;ZD43ur@3ZJr(h?%t0_9|`xN>Nj=@9WYh15>wS=5bhyX6_j36*^kA!$Hxrw6CBQ@ z8Iuj z5OX;5*>Ee5;zzzOZm0Ict348LOHke@Es8(sxjE~3uOfkTD8A{0mjT`(%UD{Hcrgc< z)wE){rE0k;hSQ>s`dBqZCED}g zZMnX%9r9hHf5RD{|NoF|c^fzz+|Mujw*-l98b~BPEBUn`AWer0E-2u;PX({bA@F@R z%^omkdWSQl==X~{qXO~TLzR@bI;L_A|3b4!ce0Q5b{HQnQ%-Ga&+zMlKkZQLuCWw` zbgAe{YEEHO9H8S>?dH{#TS`{%^jyQ)VRUMeQM0#@?Vo2UD<4ge6tc{7wVPehmr@Ym zupk+IY8O0EWzxox-uxYwMD=9WF8+YA-D#!#olU9q!lxok6U6C;)v34#`0t=c?=#Bl-$rjwmGIgXRJEpB(82BWGfNNKuGZ^b*t9r zGn^pAD)&F0*bHuhs;Mvguwv$jkN=Um@+zZ0!e|hGC{Ef@Ehq)`N8vzL~3dL&cS!4;4JZu>mZCzy4jt%cE)dWuV*n^e)&gd)T#e+i!A z5Z|g=)_S)vSAXWIp8hpEIN5+7kaq~fY^B0s%Mc-XVHHABuC8ar1AHr^@7w{s+37YK zs87Ld#o@|>2r#~8;kQ)IquK|1-4`umH_E51&i9|7WJ>w|_e`Nx2w|sq4f)9gQHiSp zWf?tEnX-CnZ+0{+Tl^$|GAAA!6sO6-u{J2{_el8Y-c);90smffIfg&bSoXJ6zq+jt+{*Mh(WV<|Jba9sPZ;dQ*KveSS5{GndJK-uih51YhrLX?8{@LV) z??#&3KLQV2qm#wwPme{+K3cE5u=qb~IC&4ZY}lEe7lpoN31~1lv}D1MD4%Rk{WR4n zpcN0U>*)RuqcosT2{_;`2c?6m!$URe>SW6B0opfp43(Pu2F=w%= zfoA)kvCWNW@lmRVbaufoi2~#T_okV%SeS6L#xU2mVPWWn%W1pr*j=v`nKGIV=M_gB z7RvFh65#lR|4SQdjYHm~AvtUcD(r4O}ib-6uUHefx0;7;fm# zT_Cl?(CrO`j;k$Y-83^FIMS=tfml?II;L`5hH5vf_eRoH zZc}pfsg8jz3jmpOxMr7FcsiT#tyV%J(C(B0uk%cRgrQC{#%u2Gej@<8n|y zPfJ}(>Jqi^&8v~TcFB>jZgJ3kZwqEzk4;l?j*h?opfj6Ku4_;&hpD1icez6maIF~W zfrW1*Zt-BhZlBrwOwd`JIjQ9nWigB zm!+4r7IHiVu-vc;Ge*^{iB zPP*-}RhXOL!-#?bvv`#JY(rDxvPRPlh~qi@M3w~V*h|=evD1Ix?&At*`~Uz8XaSz8~SnPmm-ozfe6fd?x9jrtpwH0%x*W@pT$v$o~=31v^} z!ytR$CfJggR|Q#+QX~cK5FPMi!lW_2>VmaD@rIP^TOC3IZpBob`Z<+^VbE{_qK`R= z!M%hqBPQ?zCIM9;On(s!S;FIW9CTv>D9#o$6My9Im)|%XJGk9gzDiJvw2-$yhJqf9 zzgFOa&)Og}eh&@l@&aLH_zonZBNWQvWtH2(zPh}NAar0EatgvFB|FI;L)CH4ktkeH zQ&26qF$vi_VP!rtq)vgkw4VRpuBgha)CMz9~oE zVl!p>ViCslATandA=+U0WqNp?-fQp)-7Qh2>DImnt}#oxaSUC9h`VRD%PyEY-1fU! zA{4egKZR56myfnKt70*^fNSJ5p0z+nbp2j@Gepc51vg?vBk8$_YRP)9I{9y;~)g? z4^;Kx}^Ax^qg!Op%Dgwt0VD!u9gKpF6wN zDTBM=!S~lTYw5@qI@EV7dG`idYHCvo0woP$qPeexb46*WYNXXIjBsoZq7Govx3SzF z?PNyGo{>Zg6AZ9`go;T6xW70z+(eR z)6_G5|BKaPWoFp3r}l2u5Zykf<2a9{A6V=LYT0pWO!G35Vh0RhJXkcLjq+;7q|Ct- za1AzH{e$T98OO@+1>Ps9h(u|=0cX<06YZ=N3f*=~Sx3{oc$1@6U6=&tC>lXFQ{ZBT zl39Q4&hvZn`Ch*8>O#dV-C>pGcO8{4q67%oBm@gA_xt^@7*@9Ik*Mc{J-Ll2<>XXX z>8o=Pp}r>C6aL}xdFeZ-URnuBx|2a}MQQxDD-*U3mX>^+d#lX>pzr`wV32^`STcg6 zJZY((LfT6jAll5DGXy_IysSbu>yN5+W}f`Cc<*l3+6n^9CuEpKSOBq&wA?>{sISd| zPB@gS&^R9q$={43yE0zxJAQvGcqqonzei_MjkWq({tPMrliS-^^YZ9XHWnlRDXEJ= z2P}`82K=pLw1V)TfqjA_MOdiTK>NX6lQeA@4S`BTJ#hjNv+flFRS!gVTf*Xugmq6? zJn+4ir7Y6vp`me5wS#1P^JaVeB~-H5Cdt@QJ{Jt?5IhLO=Cb$*;X}-iHBXU?A|H*#Iw!Bo_SiN6JI==LAvfH%D;eOkY9=27PxaEh}pQ5 zvBF4e0yFk#@m*FeXZq9Wh!cAWyTE|nXb9xTed#Qm8LXXJ1D`&V-zc4j$s_|>RL-sr z^FPpgk|Vb}TfBg%P+pi>@tO*7RY4Y>kq)hk0G;$qPJ?iB+s7+%kTsY5<*{Fkf%8zF zpv}E@E+eYCNgiTyr8IL}F!7puI@rcQ0R+WCBMHxyF4k69;^9d6(sf}4uZ-qVabH1e z3(O>z5thmozjIcuq$SzJyT4qgTOGcsh>%!G_s|C%LE~QDw~g-{Izc)-M$4A9x$pln zRdE_+6&Tfm3?S751d{79OgsV?C5kYG0VIlv;x70F_3>PdB zL(DBj^*}Pj2|R=0-8y!pyy5{7ZS4wpb+{U(hkTkhBC?Xuu;F4{-B1El9|#C4WC5f`MtYU@-Q+1If6ZO_D>xCyZ>Ke{wA{vJu1Duk+iM?Pqt4 zWNjwi&iooWmO*}a+5M^_5bfto_6VKA<+-OJ`UoHWfjz5o-c0a!Hen|jGm!5Aw~$#X z-;xY;7iw@^abnfdul`%fM%nYmy`*L7N{S9Gfb}K)_IgeZ&;=Lj#_+(pX)WCn%tE5C za6#p8#*^Z|M5Hcd|NDo`FqGas)d}OrOTnK*YXqT@n_|kF0T_IV@uQPp5A2XZw%~_b zJ}LA_?evXK3_1h|k{jqDDw)CcA+6A6r7CLH!LnQm_nNw&EpCm2)zGc!a`|#XfQ8$^C12^3nRg(Zl?5HYRnB!GtT^gzX}ojQv_6^eHIAI&_dI?D8ij=R>$`veZiWDYahWdJHi2Ilj(m)?R9;NGS# z;Q##mq>4K!uMQ8c5+6zZV+Oq$UL-0GjHDIWw4{U7l1vNPS{^APJR9xd<>EYX`7`5m zkb{)Xr9a%EX(6d^<0{S{#XPB6hQW#YRc_*9gCH2kO_~*tm+&_UEenZJNgtC3;ivii zO@elSUf1x9Pp}IK8LS=-QreX2((L?KWg-agQBsDJc~s|lpg6ZF4>!8WZC~;f2cOT_ z-mZ7tTTAQ2u$WdDJYc!o1XpEkL*c!YMuD4Czu7LxK&aV@Tg#B|Ed6Byt9(xH@c^8A z75E?&pPnIYwW#>|-Exzp4eX!_`p8uBqvEPc&70%{eE$rgCP`UPiW7>kMKJ>P6duKdECXsC04{uhCdJOBYZ`oYu17BhLTtd2Ubuxhdo35qb18- ze*!UeGZ|Nsxy>FsXUp&QS9M}ulSq!2GI_iD7AJ^7kOy3r93}oR+R=0`?XP~ln2Y^5 zh&Q*|89{o$#fcw`exbw17K%=9_R@X3K2Lwn*>^1Xp>|1RhwY_;?9K6Wcrm2~3a}5y z@&9xV3wrt;jLqiF2kwClq+%d|c{4>Wp1ab|n%q~$KC$0}n?BumNr3A>Q4pz!Yfo>W zJGZ^`$RUCEC`=C4JWzfQlzS(5wFvK(pu8>)*XBDkqoKvVcK1vO3->(-39cOBkSSs8Bw#OO$XRhX{`B}vltk(8i?0O^8qM*DHK6-ots7gT=nY&cza4j#nh2> zMaG-}DEnTt#&tsxZ0_-17#ah9=7K% z;DrZSB%Cp$>Zsh}b!2QrCs(Yy{%~d!7^7@)e3y5Vps<5Q%5%SXyPsArV zS*}s(_UlgMn~yfZRTZuXr+DVU!q@WUES7yyWy1905rNUoq~m<2K2)d3l9_d_60`WJN*nxvwM0#wjFPE; z=Hf>yd6s#YB;(}U_dt5H!ZtUfh4aK4O!6LB~|{ zH02>?!hw`H?jaliE-fn*SP%*2G{)962|X?G<+5#p4B=K&@PXnj#ffl(q>##P z7ci21&4xVrnKoQdsZhoGBOJDceW_|U`O&;duIrU|h~c`3R-7?@`n-255sRnJc_`sZ zF4$ZF?*Ijd?8!vM41D+F@`GR|*AJUXy(Nxeq?9+~hX0_nOd|ik!K7WymFGlBz89PWD1 zPa`2yP*vcrJndaDk-l@r7?%Z|g12fjS^RA|+=op2e&UW98D~ZPCNUK`Xnopakk@Tc z`pT-?$#C*=!MF)>V}j~O>yV`xjg`}ouD#v77wLeVA1IDUE$SMh3CUe4x(?U16m$5H2H0OP$(i1Eb-fGcA$o~#gD6>oek_fY5szH&KK#+y1BjdWR9mc&K7vJ@hyA_ zU#Rm2#mF#&a8t_kD#(k|bU+U!h7Z=TwazHF6h~)%B;#2sx>}6znbg#X9qrKGPW#baF<2}VSuURnJi47lQj00k^U0ao*4sVH*lv+2Vxya?eZT*j0U zI=A}g>+?3?J`!$&nl7A%G-6T_@kFq-`*gqkMfND*;*KzQukejvyKmh;(&yEH?r(9@ zg_MBH#qGC52FDA0QPe}?%_%JOj~7Ldh5(QycEZ*$9;6@2D)bzk{nr=oV1l>c)-qho zv;!%daNw_9wq?qLB>g`G0fT`GoWRZ#b6)+9k3~w2tb|a9V{_%U0rj^=Z-ygMqd-h~ zztu(KlGA*M!|2=|7O)eSBt3-J@WO?9F^(<`M``E((m11?P!Q4!{yx&DJNV>zJF|di z9X}y`7&kU?vBDdvWxYZxq@Ivmc#0rQ9FyLgT3!Y91N%q#1nbgzbbH@ z=tYFNJ*eoNceKjj&VW1L_(D1saT~`+9y2j1%UgIsIZ4e)ZXX42Rz;?^+eMAul--4@ z)c8&3(7S15M`*!6ED6_4A#l~db-$!hqfUGPsOZOLlGpE)tKv^u~Tn8Ruaq`QW ztTc=jzHFQQs1+|M&K%0PBR8-qMO|_g`XMh^xvCe?Y3nA&V&*Z|oinJXaxYjCibDzj zLqNR0Hdq95>K5L)*aYN2>h?6NR_fWBcooxI;y-88)Qfj33wCyCZ6~jHzp40lm++!& z0lae+gZa)9JZ6M-Azlv8ePj{>Gmdp z{q!W?^ocu-OdU}#sKb)d)y^gGUH>E<+lB)(q~H~Un~bOU|(0^iVRPQbP<%y5P~Q= z#qy`@#wo*GJS{D%TBW)j-u{@IxW#3;b9CndA@XL&V6j2~YvlWcQb5xzmPCEi{1mip z%Kh^9lFid#!B@>elDSi1ZC3n?#fp20PG>omhpL~^Wv+~vX*D$i^#n(OvsW#Ihs7M~02_P-C2?We3=&HKm z4t6AyCSPW8C(bZ%%3=l+Ey??i^Zi|lRF>E6HbPU$$8FDt1LXvm=eDZzC&r9|4d6%= zS>lL8Y0Tn3vf29+*$___X|Zgc#0E51Q2dojN~eu1iK5Y6?@vx-4$AlN<`aiVK;yuSL$$AkfvL-6VT}uvCN* z2aX?;U&37es!|OVc_S~Y8~{0BX;ey8NmZwW@T-k;!`G9IkBd8q^4xxH8i?>&xm0)h zOxLI>@krm+M=g(mznqYNdFq)x3Fg^za!wQXDDY;Z8E$_+MD&lE5WOrvrZBXG$Qm<* z6Ws6UW2mmm2)YY_<95@B(HS=?ss(DEhP@ywlW=PvLkV4@^-*Y-QP=weFwSZ&Rod?V z7_y>xg@k;zq$qdNgHG$fuys|U-8;%>W%~gB3{O2&^JaxJUr|MaYo~o&cMb3Dr7|dg z{I=?V3xU1F1x^`#3+AZv&2J-|U7925yrUE=#kXiDY>x9-{G%4A6LfUD{@O=o6to6%r{#x z$VEIR(Uu#`7ypi;i1u9%vCg&9+VTN`(BOm;WsU*)!(!TcU(grfXzQ@g{uJquz9Ld_ z<6_?GP7~{&BjsLtU7*!j`LCodlvo5p5jkp=6#cT}fshEr$!8c;a9A}7Z}_Cf?@rM9zn2@QB!t|DHB%b2e9Hf$-mmA8n>V|{tQZd(;@hE#BN3y zc|Z9Opr#II*EiUhs_6WD%9MTMw2>KBKvC*P+$g=qLm7jzptr-Qmhm--s z^a)NI2`e4OVS`Urdblr+=1$Uk2J3b&NhA0w{{q%}RB_IfPK*8hc>++m7H|nRHrN@G zIQJOkXDokV)P8u^&945f*Mf6G`!Y{Lx$ifHJP+|JdJ0eY&#_1vH~eOZr#1D%l0mUXARWTeJyfdK>j6#xn6f z`sOh1k9$m7m%g{##f+YS3gi*9**No5_1mER$`pF zfBLy(k9aX&1O*m474$perJ|fK_(Ko^g>;UNWvMbQdxIqUTy;@qp-3u{XR^vL%p|-a zBt;P>*cFr^>sFN$74i&KH%l|%M5%e19{Lf&IotqZCB!e=xw{wma4HEb(_a*f7vuKp z$Bfy0v`K%q@h9)?Sl$A(zqyJWr0nV;y;`;bBckh5Ma0Y09PROE+#dSg=J2)|xF^K% zCkonCAmgdQ5>+7QPIZIfAuRKm{aCkcJDg7Y$q?Sy+3phv-JxU6~y0$CEU4vD6{Z_mB%tA;F)1S<2rc71plo|N^HPUm6l~c z%44fXx^ih#B{x)6lZ}z-0w2sZHfQ9>CEO=rjBRQF0A$+>$P*@S&?mqA*X~P~Pyg%{ zRnMrIz=y>SZMqW6Io7iS`90$rYWqz=;sf+vU=qAkdYngft+5)?-bZ@3f9tMj_%#imK4x8CI}=Yoogs>DcB|jr-IPXZ@2iZLcPT6i5Z60wz*V4YSMRyS1^uPdl9l$3R3qeE5K7-*@me-~?fHfRXbC9)sZ0EyHfs{Q`1C zpv&O@9gi;#k~8`=I_*L)UdR+`eVv}+$##e6N>R{%70MrUnP+1X-`Z$Jb1gNy=hKq( zmgnlwgg8yMglp6YkHxXlS9Z}j%H*TWhlSr=g5`|{HLbh|=T=zRqj0r&0Mk<&JPs^- zxai%YwDxoHAIT-GU5bR>_sdwVdqs}%&p5M-?k!r3@h)GVSdR|~Vj$We!a=!?`C7*Y zvUYI5$!g}WFlx(1C)i{=`*d+;9u(JE9Fd0!S;PJAl*T`sVtKzT9d^ zEKzDC*GQhutKL{}+!pWKh+;6f&z)836HUqSmD%QzysH)!YL|%!0ai(%Eu@#ealO7E zbhoy~SXRLTpfri`%9bbu0+L3=)WnPpp7TWTMHS19$8nHFw10H;rM2Lha5Io7qy83> zUMl_06sBr|D{e!J;^O9xq*m~W13+XS1it$eI^^Ym=s>{5W%d^5%>`c==|jeY&w}|Y z^c|C55pp-P&p3X*KXSHlY=bo?82XA=p=Sf|v{oo}x*J4LM2oxy`{}M@n@(E&P=TXNIev4jv#v=IO z)3E%0b=G{U*14E(ZVWjoQDA|p1kOI{ttj@i)vgTugMAXCLHuRSQNVVg87ONoN&Q0y>!Zl|LE70v4k^jJw(%NhS-fM>1~|dzMGh4 zUZpPdAZ?t?V{um2ii%pTW=XI2?p-eq7i)2^TiZ-6jbF@ChppkhRKez^;J39GIXv5q zFH>Qyv(%PVi>QmR$cZ|G`dx+#e;T*$E<9p?qM~E5SR=6V)V=Vx~7r0G(sV(VI;7} z!SAOIRDwUa=*Yeuf7>CBUhC--;;z3$L{IH=wQysEGF<@Jt#4xiwyL-$FMSp>zNbFQ z_Nod~(EhIQRO*Naz?Y6jrNjW7#2yot)EiDC#`9Z|`%uRxhDAz8)n!b^abyO(;Yzp6 zBX?!xkVn6}d^6tNC|Xb?_)Vr_vKf4prJBY=&FvTtte-P%Sh^srnyH?mz)gnOIxUFW z#aXniwXLFQw^>^KGSu;a-vpGqT_g=!>TU} z>rE7E`5KWSyr2Czp>wfm6t9KV5G4~$s2Jg(Rj#~qzWav?$Ewy@>4_8Eo{R8h?vjWe z+=@8!Bal$KHd!hGW2YL6-U=U8mva@`+fE04$6w+*II6S9LSYVlt2&ge1;? zEb*3G={;ZTI$nuam9I;5L0}zQRw|~5&4R-&nXJWghvJIT2sb!941THyMCOc!Kcy9mbe(k}vAhva{KR`Q2}RLyAN88`(tYh2~q^DV4*6!=v= z{BzOcs(`QS^t_z6*EW_A2}X;rIo{Mp4UwI?(|vDHuFPcElQ2L){FcVku46P3v+ubs zMjKG0PUsE8?Eb%|^19i7*$pmP9@9TN_I0ORc0shRXr=#WyfcyT6Y$N3o}axNHHju$lsN^M{t_i(hd#NIbd zC>>`mreFZ0(m86qRFA(eYtaC%$%DT^La@$!sE>Bg2gA&-+;1n~?Yq>g5h}@1x}_)= zx?_U(J^my=qnh-8Cw()sTDVnI-M>IR*!T^0V&qJ+URgH3bqM1%KK9rT{sByT`BB*F zwEC(I&@SKr{?N&CnDa>AB-6YF&oSZsVJ#iF%fu5+B1CXKKiJC@a@fU>OyG}8{db6t z#u^?wK~jC|IP*~e5y_RWeYZUP?y$fkOK7-O+b#ftcE9W?-grDGy9l_^nl!R$uNuD+ zGPed^!Ix;Fa|{)^hKA({9|jbMw~lg_i#(Z5*cDJ}g3e^f4>vt*sBND60BNXhM^;d6 zeC_4{8r!|-T>HcZ?LohILeAS|)1t)2K5rHlG-lN2St;-8JL0z4sSM|68qC5!D3@Ec zw^>agju$ZGs$@EYo^1P;m_Qy;pfMBp2AGz%cBsa3OX`&WmJUxb0i=I8N_DUA2C#i^ z1j>V}M3LZb8DrFYRyh-J{p_Z9LUvZi)<@}{j7xJo8dckbf-p;bgG)0;xxp?f)vXc=4T_uz$$%BvGstGGs~E5t}aNl>_e|x@+q-8F6=y10K znsOhCWy^u$A|GW%Km4Qt_2Rzl0wMKDIBLsr1Pf*`b0KdPZU=^vKnlGFt!%cX5aSWU zmAk*~uaJIV?bCpJy7~;Dy;WRW;GC~LT+vj3xS_1xPd0cH8kZpCBjGAIO6qj zRl>RB_bO69Km|+WW~+0Pr~J0N2`0TL+NC}_KBPmB!*gPZCxeJv*3c1YOMpl+>6fj@)d#~_qvp_V)DbwZ7V|QFqk#AHOtnYz;qMl5SxkZu~<{c-tH#)p zmL3)C@FSGT93WR%hbf-opfs`c=$6Xia`=5B(W2F^+ zPPToN?e4Y|BDB8yjqZ-M{^Xm~lL!mE2)m^ZO+K3t)z5@+{53ticHwDHr&)$PA2-1D zy*WGPLhzAbYBc}Ea?#sX3YN`ELPjpoz%p@e1Y)<275EnIbaHt4R_vHQ%+b$4A^HvO zWw~Pq+chJV8njjT<{$%N004$z^k%vt@rQECOJ6c^pFvZ1`>ODnsqNxDl=MdEl`h*D zBGm#nZW%=kW!0Dl8*1!bgW*VAw_n~72Lgz7qYfuf3d5+1s@BQz~*F zXE|TaobWR+`vveqH>hFji&vC552(@hD(D1+mI;%>*)ZAjn+^2A{c`YK%h{c82QE*OjJC zVRdRrNH9?vNVq5CN?iHR?B#1v+V{Z5}Xe8axinju9 z0Ss3bKvu7-=JRLIYk+&20d$<}pJ@~9;&q4}I_NPB!nqXo z>VpRT^n^E!v&;P&uNp-DX@f8XG)HAFI_QrO@^6A{wIIJL9a}7PwY!J9C|_1MGM5%% zy@|NNsamBk&8Je%jZK7%5===J;O%h5|CH7fG6_C4nis`7J=h&3VNGC$8!r>2u~vJ& zl0b`+EB<$QVUt|S-@R%_p`2MC4o16^JS0Qkq~HceCsE|{3xQmKLUs()n-DckQHWlU zhPy!eBhk4F<)de<@sTlHqDWMoAW+wdPKS7HCuqrJFWBp3xvvZ7DqT)Dj1kb>+y0;C zsUTj%k-kg?+Zm<|Js~uvz9YmrWhspsIl~fG>2&>lX)gctIvn#gLjE2A3X}JTZSZd4 z4&i54A5*$O2!x1#9ldr=aPrB0q)SV+na1I2gPc(r#gg89I4E4jYzBbW0g%DCxP(45 zZt^mPaVSlipb4W47M-9C&+s7yzD`|&kd_0DNO-h3doY!X3&DQ8rurh7vDfx zniZ3#5A;ScMnC{D-j2D#70*xqQ_000@=L7xsq6)d0lF$S`L>1xf#gukERav^Mmrk77`XfUJ{9i!EI3hF+O~~Wr zA?@T&UK{={TWT>vwSfySLq>*_OBeZF_kQVgOrg3dQ0m71(R*Z7|HF|_`8Ml2J655Z z=TR_>YX;BZEut5z&LxqAx$W>Glw~nhe6iNGvGQ!rP~{icMJt_t8~y;m0Z%$P)Q)1v z$Kl%?>t(A<5OxWrJH6a8rJhw25-N;eITL!a+(oj}mD=N+ZxWpEaL+&+XNP|c;=}0& zvE(eCC}BC#GPUAQU{m^%>Lr5LGsKxJ6zz?Yra(5M${gkrv6Qyg`TJnJ-ES}rB(D7u zE*PXT#Ar3#sO@e2AN6gdtfOtMdY}Zun=!Lx*A_PDY;V`=R7V1b8ApHmqS>rab|MN066n7O9EVUmrT^dHPZ2PE zmf|Rt-8TC1s5nz3UaHJ0J`T$|l|UQ=EioQJq~$qbBvIL=XAbDOfdk%#ROVx7%y>I{ zJ*j<55_iBc@F9+4SZy0mHOb^~rx!oID?;%zjH7{M?iEetc@<+y)vN5%Db8z5;cs>g z^kQvXTu&5nwr?B{8XsHHUDd!&zTiR-aMnn+`h2<;2ME7G3sN(YW|XG`>QTX0T1=`& ze?{;VcZD5|VWLD|EApxn49ONZgH%8B#5E&bQetD84W%~CKottzI{rL-W}9`5HWs;| zE~BWoaZv+Nbx~xpu?6fJ!&mZpOJw_TF75!1t+r;KL9~&z9J=1e3G#Fs-Tgeh*mz)8 zTASuUPh&X3%WloaE~#ekXt@D&z60&_vz@o~D88<&ne8u2u_-4pFsS+f=b+#frlS&b z#CDEbw!=#q{Sii!HBkamE|~$mci47!^9cNbo&gC4{~Wx)0v=b`e+$P#X%$W*3)4;( z@$c+*5?}r7k%|AbLFz+8=g%xfc-+!^{-`Jyzn_&e7Zl+c3YxZ|oQOmK>p($H#yDMl z=Sfv+sim^{Hn`MB1mh+of8sxfK_EeyqcSOS4 z%hLLr0f>;+&_Gr6N$Z6_lASOmbsZR;d^s!M5xwb!FN*vR%7`2vCGsqsT6qX>?i1^#y&nqZGVlD4W*X$=K zq9}s{KV$hR)^y=FG_eSZ?b@;n|g(mC{@F*LW5xxF!Q zgtQNysI668paPs8<>hBGNS=cs^n2)27;9h`Vr(>0{#9GacT&Ii1cF?^bEk%0Ax+st z$Gi*lNr4vqOQ*T{uQI!Wvilm0Ob$gAdS$p%_8ygD%IiuuFuzti+_ORM9~y6}ss_P5 z@CEfR?0N4@{vH}xTz36#Dk=F(2(r9ct5a1>f7Tv*P*4&=Y^JoD9tL0D==9q{YRduL zxo@?w4>oc({vvTF9Yuf$a@m-Zad6m{j)oZ-(bZz{fEU3G+Z^*jT zFAj(2J!^I5W%IEQQedj$&FrsDME!_JOznxJH#tQ5=|-ObOxdVYDS%V&5;mN#Td(r| zYE|VScFoE@eNgU1+>?0Q?ZOjZhHt~1A}T`=+GX#9-AFx_PZ`|APmbnCzfqBurEC{T z`PN7w|DC`^`Mc+fk5FB8a_YOv#d8fSBbn^4(@J9@At^%EGMkcG;OT${BTRZ++Zts1 zuykMBJq_wW*bQ}C(FWT;^Y)fam{S)!`vc=(UXDd+50>PF46aSboiC~{mjcdU1J5jm(tnqI@p|br472eLwjP4PVTK&EirLb<}nwCqjNW_*ICIncN~is z$rM|*1gZzqz-3)=tMd05V4(ab+6}L=R{c61X8L7PnQQXntx27$p1!%hXNR%&sY%AS zRfia%n*%9q!4rwQX5X`!M)Z9kc0RMszqmmSkoJG+=pZ7FeDHTm-eXA1Dn2UUZ(Lb_ zpj3i;kq+e-c=>8Q^bKu>;d+Yi8X;zWk#oK7S7F0Zh?hhF(Eqd-o3Df-QD<#Lfuc%0 z#3UPy&2nrsF|y>Y2lKjWekcE1e_t~(jw15{SK;c5|E1lTt5PxmqAg5m#=m1FhRZ=M z{znqBxzpAS>}kJ!pe%RGTwNU%VH0Nd-sd@+g=8n-HjaG)g>1bbnfhR(BGXzJh*XR= zpYSs8Ogw0cf{fnaNTr4fS?6(ML)#A+7-l=Spn?R=4sBOm$~o)*&es8uT3v-WqoC-= zRcSSdP)4L?-R}<}$|kvQAytx!-y;LF!jR#soVmN`RzT2CG$t8)HJSd%IOlKxA_<%6 zU3@-&mKXGu=4E*YF-RxtaMQAQLOLjqK~HCI0S<9UDyhV0@aB1niTapp&YH??`dkDa zRWX~fNI4i?2h|%osPa6jdg}YP9Y~1guewu?M(|!Zb=dWP2wu2}XH6!-8z~H%x^6 zKS=yv4`uDfG8`lagD#%#oFa`pBLdTqVf62dE#6tX%q?;Sk_IP_^p;RjrIgu)iotzX zG~B@Wx4BwCcyrzJ)6M5FeT$3`vNGK$9e#_8@Q6cBbO?>|VyRvDtz`8^TVE-5GAWB- z3Ph(LmjzWW3RPx*>H&A5h4-!2Mu+c^SpcseeU|(IPH&|`2R7_yPSn)i4pg7hyCaKK zv@?GL5L+&@6HL!h$0OOFm8&@xJ`lQ>oRr5=7AD*ur8&;}7L3f=Y?^{T0Oakn!KlJ) zAo#;JgAF4ZWqH0N&vNCWn#|GYTU45En#XRrmr$3GXd$T-drB$zFD`cS8GV}IsvEa< zzqu>Dh~tW+U$#Rf`tn5GgRf%%Un+Uez+~gafah_m36kD6d6=X&+bR$_8$FIL2imQh)sIH9jWgT2VKl3;2Z*p+p?AJgC4?HqKOUCji%` zeI&*eGxSB6=|W1D75G)Q=kMa%VmTzg5HT)9Q}BSG&6=B5ia6vD1DCS+-nA*liT+ah zAcB#=Gwsc4u(*?Nl@!UlHA##01UXPSpPzRI*g(jZvU-3qkjb}kW*(_&?3kFo=e{f= z)$X!|nh0X;N{urP#^O>b19uS8i<$GF*E87mQCSO6A=zR4bAp`1T zPo+U%xh|0h#u0xj8ckBasuOu5D)WLp?GQ~71MF3!e`O=FEg$2#$Oyyb`I&gYgIm$N zG$72}0adySjIw`>2vZ^XYsrTy{m+@yk2Ag{v()^ohVC2nZ{Rrp#S2~r0V4y6s@X>9 z?+tIX9Aa4AXFb(E3#w$?dW)-mXX1bVA|Vhnb2bo(9YcdLM^JV~`yYJbwoRuKs6pNU z?F!nst%;K)?A_@!jn_taeM>>JnDQu9DM(Y&-5lmAUBbQFR)1lL?o@%vF~~#v zq?{4su_O_Qdg&0d|I%})vNqyj1Hl4dyMiJKd(d&5kz>`5Jv$0o_0}{o3hQhYh~@vn zR<9xK(srDz5Dfe6^0z7uy^U>cj-KbBJ>yP?NL%3`o8=$}<$av=RcERu*~{lbFZ15@;{_i>FN?hr?>_{2h!BkT_8SDq}@5cPIVDu-N0LNl)V?YcBj52i4VBqlh=GEIDIR z=v-4`i=M4~(aBn`QIkqljWuIJ&Zz{$;{LT~i}~2`zrGTbV^k{)d~HAs(_&69n&b^C z_cWC&PrL(^dZ~SQvJILkwADN8Ki@uc%ikkzN&|#H_wJ9X6Zu)Qlcup{GK4yY10@=J zit)d0So(h})QfltvlaN31~p>Xn`~@cL#g?UFF(2<=ol4Bp?a!;V=(z-`nGV^bDMrD}Wn1zBk8M`uA!s{VW>so*<>+2x8u}ch65hE7v-J z3S)~D7LJlOk(RmhT$nQnk%>#ajS(6086*_$EKAtXZq++NCRSKX_htRm;H5sjGXOH}n)J*WgoU@HK}EKWD7UDV7@CU%KG4Q!j->A&I$>VU@U~owr<{R zqL{bXE3Rc;L_@k^01Q&fN+_!eSvM8@`yS6cburKLh@Fe)R15L#o_KzHKko7hGFjO} zpgjT3(kM&9jvmxRnpeLp*O}Njf?~My=FhYse*LY}L2_Zlix)1Rmc+zO4k!o!g<{|1 zFh-DwB@#fvLxWXVPP6^yT@@`ksyFvM93o|`&F?k=<}38` z^EvU+l_`{#vaZS1Di{Odbhn#&8R%ryneL?vunTTcwxU&;PG1;KeTk1 z+#r!A?Wp&_J2KCr6t03S6Y{>CB8lOp_iEXu5s!DLg|^DG$>|8ct6bPz_UGA3PbcQ~ znW1UWT`am=nXT$Lw^Zo}Mvuia?7j8z|DKACD`hJAu)x&kn<+<3udj$(n+;2raPByc z7ydd0{*D&@K-vj)2lpC zPcA+M;F9kIZrk8|V`{rYjG|ZuqdFii=haJ)e~l-#7g$S+xuHttiojjG^-eL0=G}aQ zkO_zzD@kv+B|tS$;c3eq&!d#k*lP6PwSvU9mg@&^(#^JLKsHm{+21Z%bwTAffHCl= zD`TlezYpmj^-v&|BSTm-Z+NOSuRt|{5R|^C?_h6N*W=w3rE8H?C(lR67G-tq0*JLg zC4ZXy>W92mZ^p9Z5~KjXkF)LFyYa+y_gv&uudAG{@BXVLxaWg>(?j4J1UqXc>ujKT zh6}uP@TKw9Qn@sOvhf-{- zB`^};jo-M}v-v_k_wpNh-YD`b3dEIiiX1_#?mgj=6 zvX9d{UX={oy=lp57s+eYO~f3}!F4&i!1n$~5MJG}x0@D^+U}GArbSWV2xrvXp4ZEj zBZ+k1cVQdaR^H@Z&5soGWXkqdEQWi3;o%Da+f$qFYmC1PLgo!Q5Wl5nb&vByY3w@v z_v%A#40DG*a}I60-sPn31{X&(%)LAg1}nt`yLzV9!E2?F8kk|=PJR9cr~I(f|K>VK zRDL;wbTt~dBj!yt>*NnKdSyMW21l10vu5u#4gAI`o*YQO?R1)sac<$a`6?`v8(4i; zk(8OqVc7r1@G2SC9?ahU!Fk;j04Xlv+oe^xKnA|&F;v(MmD0a6!c?;CSSW_N*Ms=S z67#j1O4%LMjRFxdY@FR9v|Nj%2be=A?ES|@(WgEjZEcu%2aq4r!$Xu^J?qSGLR9l4j+0o}G_d;0FDf9H6uLlL2L zE(cpYl*kq-O!P?{D-)soj?Fmw0E3E!YA0Aw1a%J8oB!yCKx}n)7p(z3T;ov(N zbkQz3nOx{Zej1leJChQeRcefS+o*OS+6=fS)6OrW=5=E+<0^+D5?>v5b|Ct6G`Zla z10aQ)@XuPV$A`>Tf8r_ev^4GY*Y((ikS|2^@-8b&gQbopN5a@g+|39oDXqvvfr3vf z)KQ32qS%c5)&Uu;p!ioUQ#ha*&#c#Wo>|R5EA*tdaKprosb6_fQ?H1H6XLR=@{9DJ zNMF=@#8;%Ggj51!C&9RN4oS@FKTGz>xk95bj!B~Z4{AscCuUs)>oCKl6;?&7g#Je< zf{fo|8(1vV`fg;xgnL?;gM3$ET1^f%j6){RjR^D*oAv2>G&>TaZ@6NVt!>p4$L>8~H#c8b3z%Xe{M`71g*(Csw zEh%&K7ax(^+sLg8y()fQ{pS#Q+$_I7gcV=GL~xx5I{agC;0C?>WO<+3#8?4~wao)6 zb67N4bi(W0?W?|1T+euC#2}yEra#Lo&VzR3t4568N?b>C)?!GPO7a}*K4fbn-?osy z>=y*^V#0Iqo&dX`h{gOjnr?Am%-%>`!%FgwQBUCgqh2{}Uk~$uy691}+rfcq_K+63 z7_51+M?QfmvXt;`8ao2tbxpwfJO#i|@f~?CdHRLX|Au`8p+6f}V&MQ!mD2&@YTL^&?Nr^f-&@*#vqvE>2rE|1_ zA%44tlem1}_)NLk0Ik)Xd=HL`LNEU#Ht>I`?*F&5*LDo3c;-51Z@#UDzG602p<%BF zrS!w;2iT88cVmA3McfybC}kW_<=>Kn;?awy;sp)nR$t2#nMV!*fl*_CJnDFO8(d?G zL}U;9*sK63z5{GivQv_|PMLmrwTJ;X7aDPI!Go*AGFXDa*#sy}uO+9KI$8?HUMN3O z!W$otS1S7dSwsxg86_$KPFbMLG_L4&91&u=$$z6#R^Rf!Hy&ZIsAy!$vmN{6e_CBb z{>!+{V$2{AjB>(98_GkZ=DAs+y;UUd_gAHQvk^7GYyE&_eu|{_b<^m=8U@2k$hf=R z=fyKEE2sbSt7*x3DyEKd)g;-ep|ah9hc$bxVL_OMLyotUMIT6Vj#men#IxyRXIwX7 z>&)+gdv(dG$5@NBUte-~c=RNGzr9`UZ)O(G$0pi)|9~l;jz;J*q0O_P1>6JFzhq9> z=jgZZI!O~5QMZTjH$hS(o0Y!gd~OlI^xg0>-@xcdgd*t}xku5q#FeifvC%Qh&FE8Fs{5Xh|MPgE+!+mxeCFK$M;mB3Ad%m+RJLsC33lZ_5{7-eJPh zx{Ih@X>c_+_xaxB#~*l)9G(gc?CkcsuAz;|<)yeK=VPg`yqK;hHjF>P!hop@jL9P; zAIuio=IMI+tgPSJZ-0zg8lS=IN~%!ojyx)ZT@$*eL&+V+d%5;|4O?aO8LQpu5{sX- zbhx{u+_cXrCcJ1yM7k7r5gcq0`$oek4=Jz5bT+Me!5`mGn9A`dWfNtQ_DSD`#EX(n z7|G0Gzl~GPE;NOYqU(U;R{vIYVGUf<@w6DJXu5*_DVAj>s*171^1w2%MgJ zxyg3(%TzI0Mfon@2&iM#sBxcu_a#U02<#pt(oK*@B4vmCLfzOi%!Y|e)4x=6-V|Lm zFLtpGDG9i|t=iDwJ~*%gBFOOignC7v6+>yGs7alRFhMc|llTiVoO1`OK)YqMOb!bC zh;D`_{kGp{7dCL5@OmZByPHpPGQbnr5HH(QNc z0hLioivZ{6ZA@k^jyMAA!hwcw6=4m(ue+hU3#hf)i-tSU3~WU~v}@bf==Da4{l))Q zug-?*0q)NzQ9T>eJ>xSm2MP%t-J;~HX8=6&H@S|5{L=`4E1ALV0*-`*Q0qKt3A3!j zyfZPRy9LW&j1-bZ#~`A+I8j@eOKc88A}ApFQiofOsuBAz6B8M~+4D=#AT^TT7&L+^ zo*!N8XhR9qCeK{HhlT|IBtIb5Gir7;?=NF(W_ybtHL_#P>G7H@z0cMi6j1`$eI)^e z#W;>hj%)!J_~`FP+uHWNv+gv`iMd#sFqoM?Mletzu>p-{;3$vyD?}x-6ypNMY>jTy zn?A0)4fSJr>{1!#^?{pC*DJ%8wiM(gAa?ubg#s$qu}-idxSqU)H?3+H*=I_VB7p?d z%jq<3-GJu1)wwD=hD$Et9g;caB8+s$-#YsPvkjQaK8(q&+ZbF-GV-YE(zCj_Jc$XZk9*d{J1J;pKek@025|ZoW<|m=?Yg+I^`G z@yyNg7~R^K9^|``0u~2+>agP0FqdudiMJqqAgvKYBOQjRKXSvG{c-9Sc3V3?;kz;_ zbRmAPauq->GG2MBJD=y;Zpl3=!4avdd3{b-hPY-QH~yIjABK>X=uq=srG34A44L)%2eq0d}pP*mfrm5Y@1422X#m z32=A~!$n#;=nej)W3it8t_#96?G$0M1qMwY z)X@tR1TMmLccNY%$N17iv$3zAJ8I1@TAafgoghw`Zs>VP%ZwiQpp9~m&;-jveozh9 zDs&)j$l}|G{2c;n5qPPFcq_#2CICcIK2%lJySe4*O9JjKdCFGJOH>a!1YXR}<_o*& z4NK?$7y}PIIkJ?tY?qM~|B^X*t{&~ez@h>d+~5>5C;cULXcwNGb_6z+9&Oq%JnF57 zo@e;nQ9h|_NFn!r2Q~nZPJ(vLqK$y&^<+L1A|e=A zz{Wf_I4^`xK&IH0pEehOnA=zm|L$K4i%>ADuNA*J|1&^AjSHgfxY-D#b3|fyulU$Z zMSIqS2|)ZjXPrjF+9&Z}Uq0tfh45_%F^C+CGy3P|BH;HuY0F6C5iYMq=2qp4W(d+u zc)PStjcjLoHUVd=a=fC=qW$UCR7M4eH^7LoWxEpegk*0)emeKo?g(3o4H8z`$`+vO z(j($a!?Je35Umy-AYJ{-2zhoq0j~oXi&{{gD|;4!p?Nb@cMLRilY2KAb4yvkyObbV@@jvKz?TC-ZdQWDH+VB z$X=!NKTHjpe2aZm@xsYksz*D%?RUSxbSq(aE5AjN7;+XYA^@u0oo;dqxx0#u| zhJ!1Y-CjoL0<{^l z)bxe_Z#Z(eMR4z3^4&L2A~g8+@!!qiG~+?Y%Dz`3D%5QwkpaW){e>Q150It5yOgw1=?FME#R1e>r`U{eyi@OQmb&j zii{1IlHv9IA6k1v@V1qd>yrW@J61h<3(om|!Lv?` zBBaubPc9iDk$8VfdEi0?UsFCM=yBUbhwjwU9$5D^=l}vyY@g=zczk($vIfGG5*(xq z0kV7A0)6rW!P+6r_N#-~ASs5e_&8IiNqC|74G02s3C3@L);$;*>Bbos&lbIGZl5&U z<@W1dF+EQX;g_QX+)mp^N_JA1=u5lA9&4m0 zwXIK+I>}rXZ0={MkocHt$OAuYHT;uQjvZz}L*Z$gm(jF*v+OODc8CYSa?Vi#Z1DY> zrb$#Nzy)?VvI+RPV3Fl*$Lf!@XqobqKE1Or0okLt?Xt8Gn>WVZtdV*`k3#Y7U!gYG z)zGC3nnxDHJ!x$cJ1c7M0{l zT|e;9))>IMK(YYk64L1m$OPmlA)k3!N#;%*i0Kp+K}5qRK%ToKiC^y#{Sr*F$jzsF~V9 zeaz0uZpQin%M@w_%t0XX#XsB=DgpgRqwJNWDc=k0o(!L8{ zmni4ed7~QEMs<$6-Mr}Yvi8UlBNya_f%H^}H`peAh!l$Y(p)Z_zyJUvbwQgnNvJ_= znM?`4{>`xf(92#%Rsjy{k_4-n^aP!tJQLz;ZkmMeb-=+aUEXRO7omMPE>b(S_&4%F zhX@XpR^U@D`KziMlYWcEItZSZLWw0Jqd?`3wH6QozL<0(-n$nfb-_md0`xpj4d9B< zbg+2SQ|!s?uMYV)J+y}>1%oC&xZ2NV$mznV?a}izATEZbz!gM*OVtlBhoH_G)Drv?U#4mFm?OD|{i?kuK`N4w zI>WL{<=eFI)w+nBcA%E?mUE14IUB966rM-N{&6W@mUV;ah6?;3&XRlV zGwW_|%Zv`}>Nn%({6ePe&bw=6eRe2ndjP6&s9CKROf4IgMr-z9Vm_8{e{<`@Zwn*e zMg0}`9FcS93dKR^139?HNArs6dFyn*M~IEuJWX*QQ;3%p+?;4SlhPJagjzHAt*E6m zr`4;X*|Ce>A}pzlb7STUY$Buc{OraxG*5X^3>*MIX#E>m2^*bTIAxqQlHY(vF!EOd zT;yx|lwpp8b7Wx1L50DiQARo|ri$Pf@Gxeh8Yr`?r`bB0D%#Iz!Vln8Uy6L_k2)f- zlbqZ*wuiPYf;pe`gODzi={Onf?pG^~)3sh_q~Q?wZ3f0PdzIe9qU1Vo#%<9_TSvMi zC@&4!AU~^FcKY4`J0(74032RH_qF)Vwz)u9CB8I)-*K%%iP1n=@0Q^&6~k`9uB2?{t z!VHoPyRc`ywZl4Qb(vcQqrO#{PNL3S50i^>!3J_)|91&6bH<`mA#c_?&n#rEO>WRYge|%e)ZyR1n81Os=L#3m? zC$1&Su%0~XmC^SE(&?PE8@Z3EpKnfNTkYPSXua4JkhPn0|8vjl^y20?$&S47D=_NJ zp~FUmXw`QoBL~D)Nwa$~eXABoq)FQEYAZb256V+6Akb9)vc++h7MQQ2?SzoE)ZWXC zc_*7ne_>D@ZnXgO!4 zy->Njf7V#zt4Y*x|9~v;1X?8-Xg&l1eSeXo+$cJ;{?I#ME`F;~jd%+zChb-WPUrx< zjsJ6W?XpD^LZwqe(6_Q)+nyQ&bA1qZ2*7Xx;ZhcqY2(7bG%&%@ty9aZ z+kQY&hGX!VZ`ggiW}3}qV_Rhaqh6Ohp~pgbnlAjz z5Xty*>8;55O+5Raq}UN6jbf$GU8>e7phe;}t0MVF^3InWM6%3)qsb zzfkht0XX{U_~1z5V}nc9(xgjX<@VS<*Wag@dg8 zxgti;yz&~L>;DB`&fu!Qg~#QLj0gWfpYO)P%^Q>8b&vPZ$T*^ZEDHo?_K>x-ICsN= zGjaFm39y~FqQ}#$NP8Jyc;w8U;XR%EqYnWy3J&N1uEElc0O{8CZAn7lu_QfHwQQpF zezy^yCFUN^@Ycj;+MMsfMI{!^cyAfKal~6x$fTs`Dl@6OH_uG)E6d>7YoTMC9O<5_ z>-lF%R7n=}B5x<+HMcnrT-IWR8`&(rzGhC4ys1|s?IQCNdqJL}WGTvjHs?NDEt-Ky z97iY)Iy3T@+^~hZpMheO^FSU_(iw^NbuEi&Q%0waq5+v6R{fL>?e=> zFS5kDFTehl%vitG80)U0xM^Q!o9%P-jswk<%u(AakkN}X3r!Sp( z9$RimdHG}cSq~%hlS)QL;q@O4Oy}tH*;5eRIH5AX(pG#kl1dDjsE|3a%y`!ABdA^# zeS?aFyGCLD`YMcGj`5YCq8CSNt&3aANZ(dy{^7CX(fiTE zJ$GSlF00Q3%SNu&*`_r+!K^E^0q|F~Niw-WAkL~j?ZU8(B2vYWfs9F36i|U)V@TY$ zx*sgTV(QBLVWSMc#gk0bL3<@Ehn^6DrJ3yTIY>Ql%huH3jVU&#l0osL&u!LQCJl5> ze)>YO+qYoVUp$X*{Y?Vbp`g=D+tJ5|FcL0GsNuX!@;x@>*etuP25^CW=&S4&3=;p# z?Dy*)0Lo}qoa7Hl$75`N98-sb{!9RRwd->5fZNh}^X^#K8WJyGet!GL0A<_juRdX* zgTR8#yf1@4nI?5)0m9C~?o^;?HbJK>dnTh&KC@HEGe?6i zvH>Cd`HAj!Hj@|pCtz|GwD8^uUW?cAXywq-P=Df8CUOIhmj zah$3*w{r)#3te3*xE`9Cqk@EM%L%j0{I;TCkrQ5{EI-GGW7AY_Vo3RaCMjXFYLo5= z9LKi~G$$Fn$9A57=(Zp3h<#+Ofph7@`S_Q;!o+vMf21A_#w>z3Bt~ts->eS ztu>WabXWY+tTabkMmn#UBvk9;CDBX`8kBX$N!miB{E@kS;(;Ca@jAJGKTl=?$oGKu zmpV0JDUJ+8kQ#Av?NCA8(<44a#I;FQTRPEaM%>Ga$0_AQnhM=|E5?zxf@U<49qyWbJ+(34(RAwa)jXAAnD6p+<6uI#Sb|ZjJ6*Xm)$Cs z4Rh_I$!Y*t_r+4q{^WE;6R$4l<2F08nQ}$1#!v*Qv2-<mk4^_UE{zVpx~Z^2KC3#yhx)0z=1<>34h|ahTG=J#J~HF|WCObzLj`ZH$lPqa z(vE~6&bg&6#+m(2Lbp=Lu8xWOBsTk+xG+eah$Y#w{si5=heap_R zRlGn=16^*C)l;w>N72@XMi%g|jIP8xHH<1ctfqeT3@NcizyMU~ewR&4oj3^7L$Nq2 zm2O4Q8$`3koK>#G^Juxf{;wnAfMrudL$+J?BRKq#JY%IO3|k&dB1L8s_ns`PQCzra z-|85@9WcVxh7&A_Xp2l@EsnH1fo`*4aKT*+c85(wnsBHR)C!U-|0?1rJCN}HK(h;! zYVPZXC*Pg`7;>(|P0BgGhAY|-b{=CHq{u?xHZ(1t9+TGpD@k*miGK5xJ;RRg+9lKK zu%yeg;F&A_{@o{_4^DE0m=jhQDx38;YoE-B zRu+9V3_N8kmJ8x9!7s8~cngAHn3G~?$*gr9fsCg9cYBnCQ`}MS1(V<5=gOb&2c0@K zSlMwai$zb1W~EO54=SH_G&Rr&0wAFXm(e8Oz1jm7FYA(&hyN&lLhF6L7 z>x_-e_HtWyRM(MI$4-2$!)%Nsh$<-dMdpg?6~ ze{z~qhGr4gEj6_Y^TG*K3GK362t!DEuyHs5s-?euCN+#L{`+Sd_nT!K!NVT&m#`3? z>Kep^ydMM(W^A`T^L)@gPX?l9wy%yTA`KuGjR%0i(3?i;Y7CJl#DudVj^Zi=+vgo@^?}wMWi8EB4M>Mr0l=aM4 z8*;iiRr85k#+fJg(HaL`f%u_VoAv%^PrX_%Bcuw)l{-?)cxT@zA$wL8#3|F=1ie4I zo#rodJG+ysThimI@OED8ja>Hr*Ky6LPSEb80jN(8$L1_v;rOsl=^T2Z02nI+9r;iK zttemF6N^up_k0#as3Q{;KVg?fsDy6kos}If`&}N`q+q>ybC$OQ$2nX7?+gt_3`OTN z>}O7z?E_N6DzB2YXdY2+-i|SG7k#d*7%2M~TQ%$S#4>o#i!@j_@rD~@>?g_G6B)fG z36nwhZ9)Guqb=E+jy!srJ-lpy$z@ETQ8F+S1j=3#mTp1DevS}Wu)IIf6+JTrE~2%{ zfjz%AE|=Tat{*&quXR#T^-yNq zds76g(=8cvvdWEz{-b%WhaY@|7r;{ktd?#s)gZ#a&|ml)4L3DOn-S!QDL9VUDAY0U zqScIgx#%=QkdM6j`O;_Q1H_n7F;$)IfwAnYZ_|4U>eO*FVtbql&@pi_lLwac#9alu zHF7}DbvAGlq7#H5rkiMU#Hi#ppKaDzx90vzBp`L*em9f{fo0W2p(a85oZFo$UT%Uj zfW6L4KYr!5VD%4eU0$2pvswLUyD5E0v?m#K(6>1KrxLiWsoL> z7vfcS-bH`P6lPCNvERz~3oa?M43Po5b2ZKkMuQE1k`mULW9Dd_nQ!n$wO#iabtb%@ zl$}kW;D^ttcs@zHc~cBVD@+}8R_<}t%zyOl63~N-TI(S!yz*mC*QE)7w4z)8Cd{m< zUOU&vFjK>fXx3>Df!Z=AK%Dys(co9OlQCIvxfg=OML@I^X7+9sLL&R0PuRcmQxTr9 z4ZXtAcV@i&4O&ZK1GW$I1}*liAHQj>{_}yfS~~pTqnFg0x2hKZ%aZZbQWzy-IM_f@ zu9Isa=sdOeFN44T-G^9gh;|e+pgy#3VY{?vcwP+e;kFZZ{E^s>dBSj<0vBL{6c5mP zAxS9s88yU)6OgRqz)n`m3YW#DbaZ@?G(3*VW69y=K)XwOisYrgSEB~a$RMQKtCM+y zmvP~Y@0DOWgf{k`3{A-M%#efxs3}ygt%+IfW>xZ-|!d&Ni8*0rAfG$Y=9)9k_ zKR(RXKC|`6n~SF2BBAq#m7CI$^$CmX;8%hub*1FLu`!G(#0fiPsH+FN^At2DMw(8T z%qbll?VW5llJ;NSa}0Wf7Ehlkww~LugBvNC!Y#lX7oZyV^3-7Y(F(E@tec`XGrfcw zgSnBeN}t)Jq^P9`8GVdA8EA%}C)YT1@sdcy;W({QDxcn6V8YQjyq!_TlUg zG-M=sGM?F~M@@{8PE}~!Htt>9MEd1AASbbZfY1AO*o_lAnEplFalp82=(>XKNfy3 z>nSSu@uXP6@R1apAC!MMs31xszg_Nww0nWRck}H~D*56IrT6yS`iJZWiUJXT&l;~@ z;ip!MF<y6{X|?(_bX?6SK{`IM#YU`7xpT zRy|i*p5v>xb1XP&`;2jH`HRf_Qas=th24vP%xd+<8+sc4IWEfesh(NAZqJ~3+U-K` zZp5;PxS3Rjp11WWeo!Ax>=~N6o#y%5kZysJ;vPMjRoyfEf^3m5L3L=_6|lY+q*?bZ z;#UdfpwmO=cdrhYs4luRyvOIIyMHXrRk&PyjOMePJFm3aDy3d z8qY=40zs&ra;1|Ex={GA5pNQF+mWcLEA_P&vh21tK$6OM+A}gV=%0}*;cO~DMt_2D7`^r* zm@gCAKi-96Z80>5+ib{@g>P0Q-j1E5!hJkOz-OfpE)C`05YU?wAeOd0lgrI6Fm#fT zLRePUcWDn&8Haxn8t=-{DX|s%ZcL-&PLjqnv}}b9pV9&X18qU|C0^Jh@A1c&kE3GY z!Rd(A${nka{|#6E*Lo_%Jy^kG>GsBNrPoWLwno*+yu(9xs=PGTWdv#8$09Br0RFGU z71q-ecKF~Riaqw+VpE_H8Oo||la5*L_9o>PTC@Ew^;l-;J<~IvBpc4o(IUVaGLCxm zpi0YjptQTf(9rp@R;}xD02DMGAMX>@&lEB{$J|ZA9E^XxVfMrY%-?0XAlw4J$K5?E z=0?ttnZ=$ow*&jy3Dpd(UbnMA666?K^Uf5?7>B7gnKmd;z|ZS zR^&hF#S@`>>_rNBX;ClPX(4`TFlDN4%L^z4X5kl0DLb;A=ihfZZQ#1pc&sO^`0+lLWZ`wGMWs?l<^zTthHOp zON5g-Zn?ER+{HlCElJUD88aA+`Wa5zO ziOtnm>DqQw#!RKoH3TWSjdPdu_6qEoE6mC}hOQEXU;kNa^`@B*D=srfO1|8;V(} z*7VE1WnX#m|Gj2bg;vHXHLb&kYyaSyYIvx?YP5Ag3EXX?&Am!yH7ndQ{j0ctUzqB&%tz%Z-F&pGL z2FWZ68&~JUZM;o#Ow2iI=#riyrqT$2yit=*4bS?0fAMgrqAR|B`{5rem?ESnOrr9~ zCV1m0X5EBqJFTROAG;jLkjQX;{A}#F`z*Rm7#1@NCChx{$i|H?0x^_U@;$`EZn{S_ z6jns-wcT>Us%%|j$LG@3nNzE-i|*!7@hpWm)HxTdg3ivT{U{U5zlq-{m+;&=5A!lhb0s z6lZg4H_%vN3wVWv-*4zQNFM zkki?}nl-Lm!^6gpr)j_~q~2hK7+Xi$Yma=i+Yzuoy!`5sgOD4zIaRu*>Dz%&D+X}M zgJVs>>XwQSU-O zrW|U+8yr7ZCeL=Ff4^*Vxkx;z+|g?o?AwQGfe#2??&se3-x0DTQnnrTjA7k2jXP2F z23OMiN2(U8Kx`#u$#hhe!>M?@>G4q5lNR(4R0*o$#n}iRkjd2%et^qzVl{@mOi4dJ^Irzu!OLLb!=cE6@n@#Km@m2Y=CqtF zT<-SQ_B5TVKI13<;5DOA#p9s1ClAaz*MTwmB?wV&BZ0IgigaT*pa$%FXaMAfGMu)? z@`p*BOR^l0 zU*?9^ql1)oGz{ZeMMwgmi-Cf-e3o;jhy!k@B1Y@G?Fj1R*eSy1SD9~3RvxRql?@~= zMBYmnGh*68GUHb`?{O+#ilFqq4)_#zQ+6iKt9Mjl_VN?VgT&P2UH*Dsx{)}4A@|&+ zp4NbS&2je^d7YV<-gob#);CT6mdE6zk2fUeJ`243KgQ`UoW^nPXo?!a!n;l2zFzJ! z^sYE0#(D;}hBl(D=2i&gppeLsSvjPtT|BG;j`lgS&PmY~DGZ{5JydM4CSSyfkX%So zFIW-t_nBgsqnuF#`{7-+ZmA}iORWoC=B2uImk5EQ$2(M?hoz+7@Y@{|K4!rYog_R% z+5$$8e>-K1j6g~*gN^M=PU1p~y8Nh_?3Q0v2_)L?IAavS2chjSs5C`QfVcQIZxzd9 z+%;nIY!Q_Ru+j0lc_ifP#~H*o3&=hFt~P_VB@@hRZJ!f=G%Y2br~}ih`*tuG^3$gG z%R5h$#xq4jP=lR)@?o6xHIggeu(NDCP#U5v-68*9Nk$p8sN8&bvQ}Dq`{8!k?9@D1 zSTlArSAC39W1jrsR_j&j0X^fL)!B8`71qq4SKBbYl1J48gn;(N#}SFWiYECR;|o8` zPeeVdy2t-!n1&J|i}dQWgj2gBv=T0y(r&4!4dAW(Oo(L|t^cW?A8rLTWZ#eK%9on@ zkV|PvDZDEQoTut=+=NwO*FYQb+?LAi#h~S}^6&T&6WSMZsRo?gc~8e5M*7Ewa|P1D0BiQ_*OL5^t?Ov!S=zUJ8(Nwl31^S);(>ie zimADvjcM~v_NOU7Np|0&&ZI#{d?kzW?TFrZYys$FBF04Q@GmH1r}(20;7`#AqsBB+ z9@1w(9YiDP&1WNZg`{}LyjamLLLmTlkJFJp?btA|IB8+oQ+x3w?!@6NyJlW$ir?%*(D zoYyRr6dc}ntWb{BEY)-joK%=cN*3a~m}S57O-GX96R!!oJn ztb#ah(Nv}doTV;garQ%s?Se1eNqNIJST??WClXFlCfxqsfeeg{A-H?Sl*wPNG0f!q zl|7ii@UqZ4w_>W~2AOFrfetwBxUvuBKSJ=3cK~O0lA*zEjS#A)ZcbpQvf~I@ND=$c zJ@#@FwdnC4eEka(%CLz}p2N{pRhtNR_oIzBnrg20)E?&8%fj^q&W{P}5nnI&k2yJN z7eD`KYmlF^NTpJf_{&#qR87FbmVb*TN%LLzQcvQDXc8V^xJp&cI-s$wA6s;(IyVm5 z9SY(t7|UKhJNB#B>qDl<2RJ^9k7K7{n02ohr}peFfOEb(6-CP@z^=D{_yJv~vHWteXUi;}*eckB(d z&MAdWigx9^CYlcbcgR0~ONA~dKxC~gD; za>dBr5nuO~RFVP}=18MWc(^9Ai_~^JA6Jg^XTH8^?y*(qMe>A-y zr|k&}DJ?leR|O!P7gKOj7UTx zu|YZWB&j+s>vuWR<#;utJ}Z`Ys?jfoRoKmV$5F6#AE|I@{;=FXRMtG6xyAGW*+bn5Poc&57AY4_=>p$ zU^!QGP)!L1Rs|_8>GUz{+I`;L=0va9@Y0RMKY)|!aCyocdtr1rV42Dc-~de-0p2ZE z9H_PX(;Nxz)d{YMCK`-RKTCRqM0LzRPJstTUEF}7@4e}<6q$-qI00P{GJodlF+lzf zSSR5W?m;-wf_v0_6*(HbsUP=w_1P4s$8f32{LOnGlCYUbwL_DA*&)5ZT|g5@L|TrM z&G@Me83VSuVQiP~=YhQ$FL_xysIa>??{NrsEC=<&4x@p8-tQAxG%B}>p4RUG!>EJd zr%TsxHw212Qz!;xCnvo!q^?|zu)0%^g}f5N>1#KI-n{DzF(!@Apj_pf{}5Uu`p5$q zm@GH1Q{gQqIWft^X%z@;4p?7tno+q*%iMo)`WjDJ`=>yNdkfPA@CaQ*%^w&Xxs*|5 zHkGj+5^d_=LvkE;h1}FkR1nz6_kj_%WkS>v6}E0JnZ*`P(rRLg5O&>(&RfF(VIabH zrDo;RTS+B{ujXIX30&ufsg!=Tc(u^4YK?*C{aq#IDk?kmf!0T(?VD`itG;&5EBj;^ z^_V5N7~rg}T31VWW%BepK8Y#~`sU+D=My{Npo%L!qJ!4cq%BFsMzIT>sXC_(|5}Tj zjvUTs*TeIH^S+5v!}?}zp1XkoACc>6{&Kmeo4eKsk+Aa2tGB?cJg#Z32WM54R@z$! z{|4V2+kB}IrKYGDz6YriUM19V`OCa=n*}1K_TgkG$*V716~%P3)~n&f;pJkBo7yDK zK-kI`GJkFZo$&4GtH87WhnzZJ;s2e}eNZVSWdzK53LWe9ia0A{2lCz~f3x(d3!)s4 zpJd=rhJ4GsA}y;jQ`u^$sC+CLQ+ROU{v{v$3ckKebsRUaHU@WkN(pX`fZ4>lEv6b{qo;UB}T_J0efta!I#hQ+oDf8nTa}*smuR&x7%jJF}59LiQd+U)$Bcg-Xj&N8b89@QSwHZKu!=cP^a^xlsFqIjY}Z7uBql zX<@CY8lH{EtYc7y+-EhD+{sXYz=Kx{_u_~dHy>VnD#I}^IXMb=w-vYv3QcwF)ueprnVv|v>h?{SI1Uz4Jt&0L{w-@3 z<|}srszpU5dro{eOsuv)U_ynB6Ioh+9r|skkcEpUMtn< zQDe|hjEXKX#7qh*G0C(P+k^|*nJ>#A3@t`)rq=CJW8M}g!{N3B5h|0JPImTr(MFXn zn<$JqyX@>xg+fQ`7}D7$Dc94bAbbH<=vq?_gW1C)aO*UU;v8`M zWCN6NhxnWTQ4BFP4wP32FLIC?VY{p1R{DOb+-=`CMSI*7*E9T7F@fjW z!Iy;RAIXG(llQ`M+A~8~u%6#WSvg4UdLFojr^NRKSYbhvIYwvcDgeL|9^xKP-nk=) zS6%R8uw5x5lX)Dy0gI2o=g{>eP}JkU8z{AnBo;qARzi2KFhV6J6?QOc?1uYmSuKm> z#&SDDrt#!`qWOxrL+-`LR|P}L=TYe#xf<*X|`nY?${8JBY`&z@Bnjq6x@QUf!5GkNZkdpC?y=pFkp#hrqB zAu*%JF8z!5dqxHizj1L!$|t6oknY!7(&K@P^N!W30;^lHh#kbAca(ZObz2Y*l}(U% ziHG_0J4~?RMMxyrbNbDF!JYv?vFK#sR`uo7h#Td0fFeeEe3=H@Qc&pOn(k|eJ*SGG zyDJ_lHUdLag?>5p%_c7cFY+GBn6~xcP6C`EBZ#Jutd|+>lih%B3|xzAf{Rfa2H~Ju z0yKYiUoY`0DG?RTS9RVcm2^V1(HrfJCAPgs8Dj*7AGtgw#}4!M18oMjR*|XvD7-Eo zaOa$oezduI7h^9B8q5-Fh^wsSF{|X{c{0V3%Vdc`JDRGREsHa7*p1$@qx5qwVH}bGy7o&&mjZcMZ zK5}Z$BnU~=nK|{ zmrL?nOlFf4E!}eM`KW1V1Jw|-JFVc;<@6K!|7}x-A5*W?2aKlka;3MZw2otkWl(+D zrrcQEMB*6Wrx{bvFOR9ZzIFpFD`$#%Gn_Ya)jMpFEI@lms;0t_2wm>NGDcEDRS)C0 z8tRaL!KF)()&~}UVuIiaI04CjvD!<8Q#sDQAgwPk*yr9|g7OAGzkY7jbN}7gi0z6_ zP`zX8jDDd?F^p(gGZ*u^Zu_+}mh-~`BjhH&0r5*sE!TF(37Hg4=>+Auq7(fudT!z% z73^JPu`_k1^94kr!$(^WWTe~YF1r%KBlk&Rb@8$b%itk#Cf&oKMD1`$en)g2jh1@z zxe@(DsvIK>XN|6-@_f!I!!n(8uJf?pKE)^(Ufv2pOsbxA0)5A9y6!1s>6lR1vV=fc zG*#o4QQIa66&#rHczZ15vqtWh`a+usKaO%R32jMOf_ukhQj3oQ@Fdnmub^wM9r;1n zWAM2UmDnFwxYL6WvcjALGOI&r`;yRe+E6EC3xjro0;){?aj`!z;S5uo$6y!J6LN0R zEjzSq{(U17^@hRsE8RZ(LWI(Y=w$71U=>?y;Mua`ZH4(BnyXIm0K%DboLHmGvpFKq z_Z;@n3lRi{76DUJtU%b^9|F$oR)sHu4FJ+kK+5}Y4Twi08x~tF#2zTUU$Svo+(eEC ziYyZIR99hxK8(Sv5OvyV62$Z?>T1Z^O4CKJOGthxDX1AcM!U3yLIN4%gC6mxHGElf z+sKUM*a}0Y$~O!RIE%?R^4;31eEr^g%6nipVw|@)UZCS{bZ&Ax?zQc(!#KzNPrU)16BmDIuor`Q!vZS;|qSC*XLz6BXsXLe#`+J&K zt9yf2>V(&J$CuP>_LU{Dms`;O@a!>dRbhp=5|25vVruHFUQM~4iEZW`T?r7mxei(| z5tutYfNI+IzD_PGj^1w$bkr%#P@*i>Ee!(VwIV0M?JztZ6W>ANYI-R6-%O>;YlB%0 z-9j*Hx#ZBehye8rWKW303>6OwmBkv$Cs?(;lRP82g@cF+T+CeD_{TneNAm#U)M)`V zX*4Hlqm#N#ujU-@VG%*QD`;gh8s9^!`GnJ;M% zV;QOd@wN@3^(`rIf}}kTQz)kW^M~#TuR`4Sz3K~p`TsR8`!qu6Sst|pN#FIDdpyDS8vjm zNJRXTHC59^w15JRic3^Jx9{v)Kz$%nlpGa({^FkSX}TakJs+AUFhYS`Q(@%y-+8?G zIy|~)BNtY@Gy~h3{4XC*Bj9=V!K0xrk@WAxxiECJLz%T-wlmt}BOKB%tE3OY{Ev3e zkwTZc?t5wUlhv$+>MM}#Ic%poCP=j5TR*$IRan=nsZFFhnWTK3iiIft@}i%wZVMStPnN6R#cblsY%DjG5wC1>|hk5yR@8(t+l1^42B$k+A0EU)PjHbd8aH_teW zKd%l@DHv0VI{1{-rnIr>uX~)_W`XG@lZ@Wx?mP`I3$o0+PY@x=MyIa12>_G!&e|N3 zO5A`s8ZB8EwKosZCio%BxMEj1Iv%6=#Q*9(BYcCr0M5Youg8*Ti{3H>DXqZW|3*QE z4_E~iDP8BOp{hN$H2<5mn2DK*#H_-=s}L7U`cs>?m;@YdZ%W^2dG>%ba-U6F=lu|d z%e=!VwFya)CIgL;5HvW>W*6NaOU>haf5Rx0V)dPxmRk)j#RNh|inv!L8L}b^OX}Ry zWNh^sX_W@r0gtcP;8+P;MGB}hD@@7ekFrigx7_$?!5xNwwm5lI+nRbY3R-J4i#6sF zW0d@6jM;-0!zN9wtA*D83FRrBOG{}2kL;=!f+LJsWcwpI$EuGR{gJ&EDpQXphbPCX z|MW%}anuY~Bp{-b(PO?im>)x3kbG_CSqZ9@e-Nj4<8LbSB(hHbXHGqFtM^0kgU*eEaf!zF`+NVa#cw0c$9xGHOo zHe!jlGK^?4FQ?{WPh~X8f>qFg$pbo5Hcp|OP&TKd4j7`Z+kkQSv1EIeNw!UJoQ9%; zho5!qp>{%)@YwYfcokU$0i{bu4X!BfL2YJ40)RRplpf$qFQ7HB92p}*F=!JbKsBp0 zDsqKTtIP0HbVC3D3LgQVadbt0^61@FiFH@EJ6WdC+JNz55xt5~*2AoOz<7aiulrWU zzlxy@1RzgF5UdM7)d%%qfH~h|u6JFzc=+z2tiiYz>0Do{hg9=7C_o7(%dU}9<}i`G zbP2FUi_61m{>;bfb{;JjBJ*>&5=4;A;<301lwo@yDqxLAz%1*Ffy;TfG|4x#12d4s zeu(!TemVNHzfeli$o}afRK)`5U9wFtoK-)G*<`Sq{Iuz`=;{QJ09FFnw8t3sHyxuy zXVcNa)wqis<%7p!;wT^ zE~P8!anhXWzrmbVj-fml0t&SLzk`pD#0L`o!v&eAE^n0_itY&`j}dd%*EdRk-if=E zqy(Bo`Wuc7t$7F$q?0gQ76);<$HpVdms9va`w7z!*IL0jvw^v^!6D3o;%E;ZP`8bN zo$7RLS8gYbXzz8uwMHZYI3{Shk2V`!b*l_AACZdp!BXams3Z3+B+%$B`KfLaLik6| zs$Ow(B1}74LCdH@`RdL6^zK~U?OlTgEHF3)QTcH8%f0J;*G=o+EqA`K`gh8QcbfUW z8j=1oxUqY%P@E=hcZ5Ec2;(Qm$WO|S>%Na4sRSe9TlCmcG7E|}r!(6e=HX@9nD0o-SereqqCk>Tvk{QfVg(}PA-K)Q5HX(5CvK=(?&G4Mv6v22UCM~u^G(1t4eoNgvA2Z%u;o%mH zh*YTOo;HF2xBnOpO^PmLsh?2P{|Ss9Qr9>Ey%0Phky*yU8W;DYBM1u{#{Z zzL{P`Z_qf|c33PJ?G0ZAy~&{Vs^;UUosNYJAREhHSH_`f?uGQK)3?W~oUo!V>xd;j zGpq@IV$Z<=_YTVe`2Z^n16BSmJ)=!9SM!S8!ba+IaE#mGhX0 zCT!P8u8oR4%;1>i*AIX=aVyzn6wWF+{R^S1V$B#<5a&*TM5CY{{=#o>1+)s~y*RB$ z;zfrKaGY2ez=G9A;atH;#(Q zgT2>Tw3FNK1KB8FlB!WY6?g3YVDnM-8G=zm;&PuBzU0Gsp|Z(&j`B{sXtH$1@+?CG zKul^t`Qvf`NIx?Lu5`M0kCuH;32t##3g0 zkL{wVfB6Ou+_mrXwO03Lq3nGq<0J{1lEveGZQNA7(0-3o*~*jc`fhUA1t%#D2gG(P ze1F!Y$eAU><&C6|Ej&?)qt(|3qE5MLypx==FFddrC>Px^8!+1W*}UtCHf9#brToRFG^O$C9PjI zb&5MOK}Id#U11E@I^2%NjkE_>jx3AOMt)UH(e0Av8&d1aq;mU3)U~5HK%A(dxe`ogE?cWsJe~w$&1li191Fykx#q(+8p_WCl|)=p^1c=C2K(3vmP{N|fSjOn zjJ(X#I+n&e9(}SEYR#}r4L-Z*h-S>kC~E>FGB{&h16&Mkjlo8AkA0pT4a9#*LE3_( z7FagNZ8N<*F*ia?BYEH-=_;37>HadsG0drl&w}$V?8P;=TA$2JNm{dGj&gK2@$(HGaG+A0QWBg~6Z~ z5IyEyf|Y^9s(A$+fqqH(wh86XG$;JEb7)V0>n~GAtyh}Pgimiu zmQ#{KKGtn;r+8}Np@qt>JA3U;da8%@+ri@0*E2%REbS%T=4(|!r5 zL4U4|A^VG|-iS59q_9QTe1ubU6ml02?!e>KK1=@q1@zd9*Qi^u%x0<@8(xHHZy@`H zx)mKn1FqOY321q{!%XUf>8MPLtJ(9(eG zp3XaMa9&OayvK7cJCN*OVVHaB%<B@q6QOe?`sq> zaTW+1uedEtZ`XanwT&e*A?>MSPPcx4Pda2VJaLh^!qP}+)1}FywqBdU=kh?mnI}|m z`(+pF+?x7hfU^umnt-HuYaqxh_AIFO@0lyiz1yIUvW?QvPJdB&5>OEp1nuCny(Kl{ z(*rl0-RToh8{hx{2=M`*b81F^`kZa!b?Fsk(@rP!-ZdUpg#ObadH$CCK$#bH4p+!NvMVkv}8bR_g33C7rrK((viN}6r(TL z*PUTh&q8fPnV6QbnqnIQ0N)wmnEhvAXM9eBR-rg38sY5>pX4@Jz_5WGKXMNcL1&=$T3C=U5#MCfK`x^ox9zZf4&@qCKg4 zrb0!wCv_C{F>2(R*|aiG(Lp{sJ)l`20{$BhBo~j4K6KIvfRz;JqsmTy7WIdWYbBZM z6W@yI?eDfO4{}*0iS(6dt{J({){RjIHIWglZ1#|MfYtvXA9u1r7)S|d{WeB*QcHu_xR5GbCl4L%(Ve5 z*4g!or7X_KF|qoHQ1A^ZbOvjL-_V^-2H*=AJ8Z7p__hZtfr;btCBE~ex85F)A64}C zqZg}a4FmH&s>Tzl902AT@h?B=5mxEACkZDP!EaX|ez`Jjh7f7pAsMba*=so42nG%N zGP6_J2J>|26{x0)oc30xAi<`j?CUn}A3dU-C+nG#mw#$e(@_Ug1P62etI#&+yFBrb zBdB9v=$m@!x6fRs0$_JCtJux!_i@Wn*d9R81lWpOyr+Sx7FY|s*D+PXfy;JO_0VY z3jJ>(4TA0Z7WIfmV6NEf;9i5nyGDY-> zREZndATm{NR~CSFVDEO)yET9CMJH<1ko2YO1hc2fGqw35_IB43?*s!Y#nuLmxpn;H zF>8pPR`(dLscHGT*;=FY-JTY9lkdft4poz2OLnrez#nZ(Y;elbw6ldhE*=jBA2z)H z^Nxf~gOpS*h<0A@2CWsQihC+-4FGBqhDjo~DGTgaVOMY!J+qyfb@bEGsGq;~J&jyh zlrUjc*3EQ15t~hIP!QcKg&i&pGKzhg{;}<{)`T1xCM-nCwFU`(Gw9R@44^G4GKAKP z^0gt_=5`=6u$_bTi5Kci_kFdZU9ySsr`R{2uZaFEJN7dT*Xm*s{Gzii28WR-&=_ez zCW+>md2cc;GI4b^rsxXaDowMU;&5JS-pa@!x2a8F$k|kX-_XFvx3w;H@OHEWUXX~U zo2oWK?AZn(nm8J@kXoLZK0VQF*G%+yYeCcjEb6x3cQpUzXe&&JrmKY}%vZ$OM=6Y~ zmpu)!6wg4|RwA0ZfO)S4Ws|Lh=Cgc9PPe{5T|IRdxE~lSKr0yDR$jwPfpb|G5ggG4 z?}$wCPA$7}ikXzpb|ZtNe#;Q#l1U6X7_jo+=3Ug-N2ph0m|PDhI3FrYS1)$mKHc<@ zGY?74>XF1>gJBGT4Qu%mFb(l6-W4#d34Eq*Mj3kb8;l>fXBIeYoSNuZTbOMULLDqx3t_G~;xq zi}ElAN2nL}E;QAq`ogB9#A0LUY8l%F97z6xgUMR0{v_e_Xe2O4!_Tc=k6kJ4u`IUp zIXLF+zEt_f@@)87RmGjJMBvB?pNbl?YZe;pRdj0#XWJ3W|D)=nGDidIxKG{iu z!Z8~WXsCLy0qMtt)2}4(?ipPq^c{+u=ctJn>^;NK(M}~%kb*k}L+m74$s@Prnq~FZ zowTVrT>u8_zNZQ1zc|kbo`rZ)C#_h@iL3d_q1%yD*>y1$xXd1uicdnd^|}UrJ5AL2TpUr?2!72aFIEWvYic=>v!~GKhuqwPd3oy0&CPhTM7YgSD^k z^kP~uIf4jQnYoG)TiOPXV%e;1rpG!~{&BO5(T$d_oo^>TS26cf9E9gkO)N+z#qQra z%MZWimYW*I+`uC_19`5WSN~rzJR6A;XPIUKZzTgQ^~#TP;E6m*eoSuxaL znb<{ zsn7|hv%@C_EGQOA?-D(LaR=mcG!+Hl)WZ}g+@3*F1Sw?7$f#L{J(+6Z3!PPKvtW;Y$M%gt zT!qDHO4n&C;5OCCTB;E0BY4bY!EQCllgh;zUSnw=De^Ly6cM{^G0;9;q#Us4Z{JAX zIiGdRO%MXMof2w2sifm-Fx~txRv1JtzxUzt?_(cXfqB=_e1hyB7=4YkD>N!m32{Bi zIPcoe)~?wVC5b#<1>h83tJTPO8I{pz*Bsu^5d+eSEO39u)<^vl)`qK|lQMtPwNyVM ziDP2s^5412_is0g8GCR504cXYn|Mj6L2Q{!2(SKbp#X7vY9BRs!~0(jGPgR!V_^&A zC@LUPq{AD3jtKy}SY;)A3xy>1)KRY&2A}l?v1@U zwZ*%m;(%Q&KrF2#nQ~vY>v-F+nN-zh1nY*!8yHR?%!Wps68r%!V7H!D0m8c;9R^LQi^i|D3c6-kZ0@e}ZT}jOr z`V04L7Vk&x_yk==(_6<8XiyPj6l0A%0tRb^tMf|pfLm?Bhuz&RIz)^uuS9HPqU$@sH7sT25YzZ*wnG;~+?`Xhy(j;$4Wses*k1av7pI{w>G~TW zl#a!m`BUtXRRTA`uB$=cYbF+YRMGk2colKa4wtN?aS9a_^_<|FWQ~{5mj*@WX!gVOaAaTDk(a^g$HS zdgD(VP8J!vyA+xTQ_5_94LRWiD_W+@&QwzW(~*9@go_P%d?r!ho50EbhxNs@w^P98 zpdH9U4+2mm7qC%hxjKW5dYt0u;+jBxSqRH}66~UliRZNQ9-BJIjoe?mkH>0$)7MCB zVY&%iE;j^UTnhPKu>(|g*k9Vpkj}HCk{AW8JrBbb+HO+0(pi_S8f!N+9scjHhZNjo+x zZrS86!{upt#A#=xc9611NS^dQ+yE7Fwj*0FY%l-=P7vQjlaFUmb`N6+9P8v?x5%{^ z$7il6%yA|NcBxHiTt;X8P*A}C>?Ma>4IA%$JDlo8Z>w7)A&MQ_*ni2>M$QmO84(1SM^5dCl zfdjbn>8$mpU%#(fIM$#MrFjxav~fLx%izrH(FhoL=HmD&LB;M@RgM}R3iVH5{65Bl z#;t1&DTSWzlALk}m8pC^GUX#t^E{dn5&}0dVyKQ2`6bD2ut=U{7$N{)8j|*wDD5{A zmO9$I9$m8*rm?PG$8y8BX?&Q9H^?2Xhlq6swj!@?AC#&?b-(!nKJGX!5(s58)Jei3 zXr&9hkrYKa6s*WH9UKwg(TdKh*nohultEa)5Gj$2OhK=+Pcohve+7#q2U-QjQn6m! z@Q_4}YvQZBpiG+>)1Rmv)2O{U;b#nLg`5s52iTn{O&+bE#MCrFT&86Oh3YC6IoB5W zqiC?94fsv)?uqLA{{CM8j{ONt!E$QGBD|yM+?{U{lG;9P@@2{^Mg)7 zeD<)*c%r9S0k3pQzC^~IW0bqW@Hs%Thspg4#uJ2k%%ifJ(zz+nkS5(-o((M zg#C;0F+}kmJLogA}9w1s!nc>6PABpg3c@W`}JNhZs@ZX)lSF-pO*87kMn+ z`RWf^>9dpi)wqBT*HB>-rNw^T7D<%rg1cnIDFMo%)qJUu4K}JpU^Kz(?3jf#&Ir<)Ds=e-Fq1h=_XDy(go(1Y>Q-f zFh%}di4GWk(~^_RZ1Ih8rj6L?k8O}s)H*$W$;Z?pQuGfbs+$Leq1&J1>{WZ)Ay-T45O{;8inQ} z0&X$5h13m1B31gwNRM@-UU%6?niCH~hykz#7$80~UMcL_A5p)+-WcRDLImg?IHDm8 zGZTm>to!Z4jaQB}j)GGUM4&z`Hu!iY(=@X{ht=@s1t#^~DcuVLZZS1zk@Jg1eRa}c z`iyG<$CQEdM50B|288+wCfU7jx$oGLvnw~c4?%j%?5XbFvK40po=*Fb~gbNj$)~!CC6+{vXPe#?Hj!G8RVd z!dJsFfSgW0mTBJ)7eTUlzoGQ~S8SOAJQdq`e|8+I5X2Oh%R1OTC8ZS#eQe0 z&6v&eGh~)bH5GMy@-&H#+Y}UV@skiMH45r^d20)6n9%f}(5T39@O;t~$b-pwED|zI zsGdXI7rHFicd;!z*tP$gh$lYAB8k_s))}tidR{pIjI2d27WIqPO{Daq-UVVYY>=G| zN$o|-tOjss;=5YwlyG#JG6r}HMs0wQBtA?0eE{UfPoySeDV-vwMh_dqwnAV=BIgYt0>8}k*q&wbu zLg9PRm%U_;EoT#aes`wMLP)xKdxGEfgZ5RVP8_?U9CJgi~a zVpMUc-uClF+f^#Xc=b{Rz)P+?@0q-~VgLC$Qxtjw)_2>*bR*1OSPRWrnp!i!w6;+| zyk^KkHUEFU!nTBxqHqg^%QH_;AZZg#c)riVB5n2X;WI`6r5|FvTOPqU($#UXA`D8gSuSKuw2GUe$S59)?UG)~|ivPbV03friq_HHQmI9n|y>M%qA15Nf?yvZYb4E=pZve?nH% zg5F;*voYSa1_hl~PBEr6QVnIkzVI6+8G4PA^m+Ml8<=rJ+6}kR?83v8*CD$ zW#>iRagp;lV?NY}%5(R8Pu*7Qj9-nD4>>zpa*sEFJIfZws$Q%sKil??k;kZgo^RG* z8I@ara??#n7YOIkIQ-fR9n~dF9zd=S6`y~+Cc1v|k^cwzMQ=SastQUC%2vEm7;C## z0>~~r{8NJvlg6G;(yaYp>^#eIz(-!$ZJ(~`cOU^%ZNTC#b` zJ4a#0pCZo>IsDQKLnPy|bCn4QNAspSUB1=cio)S+Y_p*6>3p}ggtVf&LWK~8|G7~P zT7~mZbX!k3AjFU$oLO#bLEZMhxI5F6KW6inVs{LlED&tO*mEhwm6gqD?j3) zmrpmQURz(T#ISU6OHkx@0lZIE4LgfTfAbt+dZ|N+2=65MOjBV_jx$D_lZ}GN5_!Gb7@ZgV_b? z#tQIT?8oB@f@$fD!fMf+5}^N`ij}#Neok6j$_bxXeZORwSjXjMauQQsUWw2Z8NCnZ ztn#kSO=PUo6bA9uVMgp)T<%_Ud^C#+4EXP0c<7bQc%wvqL#fvK5FgrnhVK1zu?emV z2xX--`_mL|2#51!CHQu9DN@6`C2N+-yU0Sxh5>rUkuxATFk1Yixj84jMP+oyeWv8& z9Cy0wTMF+qqQ=*D3{yx8zNox&FW-uue>kOHgJaf3x0un6L$>py-kWL%_M6$$9zXAd zOz{-V5WO`--;7D_#UynT5*=vACG@FT|1o)xSg@s1^~RHm4NBVlGsyS{Et!7?n!ytA z7|130p;V_c-%xeU7Ywt66-QxQRQ<5}t{z;cZHJOrFb=gOQ+JIcE<4g)#+a`PTjmkN z^JzuVlb3;06Ok9z2ai?zQVj-;U=}2E^*dFk;mI?O(FxBlC$KF2ASz zY%?+8bqdqR+Kq`KHJ27-FOd_Wa%WxML4-hv)Ss5Ba_1QIpM%^vb@u+2iuLDTOSOIS zjM{WP0!Q9TEkG|DTn*WtOYve8NJh)Y zYTMs0)zi@`+pcg!*eHn3OwbR*wl>vJ!dg~4h3CPQLrSCBm)v=L?i- za9=f}G?_{zU&9qAn=YpY&t%M})Bj`kkqsCX5=mKl8CJ?5{u0E&2?*f2o->HQVwtHL z6Q&5@3+5d$ZvBW3?Ti%5$ss@VGSAY61*Cy|i@@UDQzX*Hz35Wo4Bn4m!w0=JJOXJZ zCy;|;EIp_n-S%8B1j3AhvfuP^QMs}!bn|y3x6gb;BhB(ZRtmOmf)2*YJryDX*-7`l zTj61gVTU6W2@h+!QE}XFPj)aboucWm?T^yXM&>mMbXrl;CFp_( zM13UMU<&z!2RMyYzu4n4RdTN+4lu$6!4@-Nk#TDFf<4W?l191$VEI}7S=pgK}XOpg&UJQVy>C;FeOB?rtwB6tWJqnnM)RWq13kysA|G4s_XUo{ z4vUea99sB}oG;`bcC*#cN2{~KC}3ncG@ZVMJ-4Ix4+-YH#j4H9Ic@wDC#e}KRk#zk z;XmJ|nayGVjOivu9qV3=8!_F}dwA9{N?55Kf>D3xY8lbt*g0|O`=eaC-SVwEX7wdo zm)*aWs(dAi^D_VWjbIBY2&)*RZH6?+7IQQPfs5NU1I7P-sP@+9@B`?xPX>~27(AAor{z(O*G|C<0OxTeCeP)@%}Ir7 zXIJ2qq7FRfzKmi|6a>iVy;QIOAmbh4JRHV(P!HKi)KL@-GkunbdFxH#fLl!~tT=gn zE*1V~@pZ8^1xf&Lc+Wi`iR>&kk%8J}d_4k-DiM0;xsG;~Wf^)4qg(Rmg}E(|wy$Q8 z^GDshSCF`@+^T*v>;JE5u`sQW=LS_H6Ly=ghO{6l6m`R^0YnZ2SFdh$YA?`~k|Z-Pzs>CSJsYtZZts>`9(D2-`>o9yi2ijoAYMKk3C z!modc|06q^;PTQLJO`|D*#*6YB(9bibiwt)^Qo>+Q_7>J5Kw=XMg15Wm-(P(SKHb` zBS1Kc34fT8v&^Bupmd1l3P`1#o!MAO>A0;EYY(rxfn%qwou+Zk|Lytrp5*l8C>=RX zsln^BQiM@aKa}0X0SR27vB`=&LvLt?58xeo$43G$g>Ub)4elJJSvLI!+C?u%)ZQ$n z8B&dxB?o-Me{OR=Dh*g{)o#;>`W-4+>KBr?CpSeKAGQ~I2=?2(sOiPP!mLjg5ra%Z zpPWulTLnr&z8>dv5T`nDXeghyr<H2>hlAw>;DAHZ7Fr8sIMd zUz>FGlKkTeUAAxky|{R8n?b8~g;4B^p9$b1q*L9^s+XXoDsFM>^`p~|-c3911sKe&9Zv#atfBp{x#gIz z*xm2GLqOU5o<+Zn4ZM=eaaK43MVaWUM(I(WCarR?G zdTvGMr!fDh(#gZ7u8fMQ9KZ!*&y>QawnwM3brR!}?cTyF#Mk|bp3}e3iU;ulIJ`wJ zDBU$1pjlhmQC>+r=YLB;Yi0A?9`A0oJNf7h8}>>9u}GMur}NP(P*qX(v)o)+CSZ&2 zA;F=9d>la-E_O@m>Umq6;}e)xGC4qWV1ndzbguJJZgG^|i=Bjiu%}g7sDgKBa^CUw zes-N4w#$w5=$}CNg(-Zb7Ej`VaCo+tjIWhv9+}~`E5EPFKbYrdUMN44@dRNjtVbB;|_ zkWcP>5PZ-EHQ(=fZb1`IVK@zhgA8$u$ZHU8ClB{?(6O7G>9j(Htcumrn9K462m@7& zTZ6AMYL!|okSeA7@Lc;r)VmBQhR`dMSF^vO^5ehBxI!&XC3f}ok*4B*Y+57^y_R`? z!T&jCa>P`ryD}wvu|E5EsxVoaI?h_kgc$flU!DL^?z(#R5mdXX`M=VT1{lgHJw8CA zQB=y_yN&?S_=P|c7NxC?)D!q*-?9{+sOoP+#oX!9>XGW)?=Z%qX*|HENjc+G^5FA9 zQnsbu=-r6Z00LnA(h9hL@B8PDt}l1sK*PeihiZ|G16^-6BJd+sm@D}=zDHC9$>%QZ zLsgK<{&jw(O9Y7dL_T=x;0=g*59Tjg zeD~!L^7`|XC)A;h0}%#RsJD9o!(iG&S)^a1hND_`VXFOB`^QH8YG1zg3~0A{LV9$J z{Py#=Yb@u?C6n}x*1iq3asGf_5erh`^zN(2L?bCKx8wb7Jh)H`yefIQgo`A|C=#ka zV5uR>iE0ot(~ldrM{c?3#?XVWOd@)S;bvuQQ5*J}fg8V=>z(qK=111lX`o7)%1+b+ zKn|!unFcSKH2QDR~l>Hg3Rem+?=8tADn|HOxZBFYdk zkf1>az3DZB{r@~nq$h3vg=8d|+bYMV^DgGutC0(x$VeK#+eX3|V(w*Xz3sSvq4%=1 z4c&lSN42j9NG5@(qBRcRV383H>uuC2pM^Dr~HEi(k==2*9J zv9AB{B5Z?gH+=_>Wc>{y8sY=GUO zM?%hQ{Nbq(6cyY9Apq&Sbga*flFqG5)MJ4Ia;4(nnazt87=8tIvZ2g&+mD9B7 zt;M}Svf78^%h#x0W&QNu1J8io@u6>Q|OZ-&G9~<`hMAkWNcJ znrw-a$hg z}+I1E6N?E*p=iMyPv^MEXOV1O3ZJiFgm3HF)%KpJ~gGF6D7bbnO{`eyjJ1F zZsMb$NL6oa7x5Y-Wpv}FzurO^e7=%;(a1DvzWqblvW(O7({0LB((OBpV{*J|J;!(J z16lRyS}hOROf~Cb?QBr`)|FlnHjkkcA>`9kaD>Gv#p@k-;se+_Hr2ADj0bt)D`B6X z8u;=u((%nz3nxS-8T5d@|4GVJ4>_v_1;i)Rlw_u=4Cy&^Gmr{?P4j_m%;&0ck z*6uK%Bt1DGt}V$GlvR`I&^A+TsoD#SImU^HG9om8*BN?DS9^++OA@Rjra0dM)<@9@8$3F_4Ys75SVi>P{mRwR8Ig7(!YV({x0xp zB>6A)iumjMXs|Z_3M^r(t%)^P{-5_Wg)T$WA0y9G7rn7`5@#OW(NFcYm^^8TdWr-p z<~B`ySsHk+c$Fb2Pof$!-b$+#<~_Rk;Sohu9&pdbW%&m%jma1p1HvWII52Le8%=s` zmwWZ@%I{hsJ9V#igs%Etrpb|mbdrle_8jC5%kOnL=|qj%jZ2?KhEi5>M`ky$<|FC` z`6(%8Qvk_N*cNf@1$F9>GS3^xu0ExEM!k=O;Mj`m_a4*7z63z{XMO7=YEx8rrq*cQ ze_ARXq2kBGQYtEtd%gSR_FX<9)N-6z**h1>S}a@~C$pi){+#3(Y~an)IDk zpz&ijiz#=ZuH1iBJfijs7%%k)3T1zL;Jm&Y>~#7wA|dGBjlaIDmVf2<^@3t;`TKHL zO3@ES=d{W&HDq?;)7Amv{}_#KlJI?exA9h{4Xmy`1BnPTSN*PoC^ze{B;4_V2-RJB zqRd{{W!!W)kI?V8e-DOHefjoC5J;0xd8f1~l>e7XxykP~RXg$YwjuHzoCVYzzYxyri!1Uh6GgbELR(v&r7Ep0m0{26drZhBU- zC*YGE+jb&6LbP12_Vyj5`Qo!T?CC&s)`NxP9mnF1LENF$M(^HyCo~12y>aoK49b@ zsv?@=Py8qElq!0R%G&C`UCy!q(>6}}X4KB2BVBKbZ_sDuuL>)Hwo25w(0ozz-%S#Q z3E1~yGuMh+XdQoVrt4R0)%%r%q?d}pxs%O>$s;N@P%{}Z5}j=^$9^JG5{#ndyWE0VIs1QY7pRRea zi!zp`FijbCi162E+u6aCd#D&k!YcuV(nM_SVqR@!o218)z~V{~Z1A<2Y5# zk6rIa;?E3JdeRB28$tTw)CNr?u`rvd6x}oh862*Cthu)3D#LH``8^8}75Igl@#XXB(WSWzj_*Fr?@8NI4uE8@`Y)RUDfzJ;(>g zc|6iU*0rOIR_TW_9m+QkX6zP-Qf4BIV%s=jezBM4G;!$17ECDkX@z-p4~NxIEGb@p zz~eGCx4Ym>ZOz0iAsnZ}U7YI5+1wm%IN2|$&~&*cXT3b$tgaS;QJX=794Vuj=&wTr|dh4 z2w48PkFQAqPh@I)R%48jxfuY;#N{S@QtfC_;Ty5Te{HcAo=!gwcJjR)8OEXBB7Fbs3b3(EA#Vg1>cz1DBo|dR(m7Tja*3IPeiz3G7ts! z47Nu#zLH>gn5dFvwwLKM=J`bjCuyQAz&bDvX-GMw|DrJCVM2dB(s^R z()yc#;pEZ4G^A+D?99Xp(hzYy9ztt{l&6Ful@7?@$@B&?0q1e`*2`>owyoB%ES$@P z<>3%#VmL_b{XP8&xgkpd_+30s4(Z#-&NcXxKKs@kDhy+VPzwAx zZlB!c+qZYpSuE#526Fc;W=nX>#2xb3K-Ni#sCRQ2l&Q&V$;uB_K@+@WkZ536#_Y^t zos>mdMmD{QbOux)(bIoKc0J<}`pV;!75SB;aMM=XfEi+#)46H)Mp`oR%4WktSs{B! zkY9A;h%8Qa!F{iWhwgFhDHkjGy75IwCd($&UAodCRp2H64IZ=xrhfscMo9G3F6=qy~#HiX2rGb(%9KG&AQ+82cQj92Ew@;>-0$Ud2o z?tQtSDlS_XW5PRYMW(PAQn;Cv#YNO)O*S_i(2|Az*x(zB#obf*pKGSrQ>7x8@VM)@(qM=tn=g*JEhekMxg zL}TBTZLb0cfO$q$K(#y`vG=}0_T4kNC>;bqY_M}f>V+u??P9?xrT{Q%dpg~lGr*S+0 zsF#4d(LRaygS2aEeA6EOqBW4b@Z&`$fH}-t>2yZ zg>9~G9k4(p?xOkW&3{CDITV|>MiYfRam|JMZ$OOjt@}of%MWN1+BWNo zUN-xwNLQKs%>JTgrr)$)khuT@xPs9R%lK`sb3hXmc5UZ2^y=G&OIXIXxU+)6dAB0y zm2Dn~Enfo-$VLq>G0UroSYDGTzJLASSSE2d*MF*sb|CC1-rEzjS_4#2F}vMtnb<;6 zJZ#J4!#QVDOtnknRI>5VeZP293EUi9bjt@;BW<+Fva1zAA%y;Gri|Zz0OSdof5V6@ zmd#R%V5Xa|@P3F%(SQ6`#qlT`D}~jB>$FS_R~A7fxzq@GrwY5KIDfBQj;vv7UWGbi zjmb^pzy=sqGthpPdUV4BPug>TzY1}K-c?w0H-@-b3H03TI$}!G$G=;at(AvXn4(~I z6AA6+Yz2(!9$^FGnmJYMI`V9M|2LFg{iy+~xMqzD zcI9gsRmXNI87n-2ZD=$T_CK~L1WXv~a)A>rDc^j;y@_#%KnAvOuEWpHiFw!LZ9y2ZR4u{6M(dhO z91RR&oIXW?U_KJLK*gfo4R-^OX1lqk3gS{Q;7DY@31H#d6#2g*LK`h;pkuSVFgGa_ zYrS9ZB*14=x)Y)=Eu<;~S+idl^uM{tb`L{0 z@nU!Y`qS&M&bH^J6O%uH`JBl$VKesb2+4qa3?N#T`*sI>cR&b^HSLTGnCX(KA8k%k zuqSBHXO;Z<94e4$eGXe32-#hP077P7fvDTvJh9LOBqAfh1BcnYt*Eof5N(IluXZwh4?!LLKi7U3vu#<3urKI%Sja|3U3L263RWA>JLn<=M7CDw zX&{)oUHhVcVs6USuiO`ACq>BpeEa2;_gLa$h@D-Zsal%R;G z^w!w$dhuj&f}f6yp*W}0LBi^_2O&ctN~1`jL8!EFOSWRrj|+A_>22UVeLX6dkr#0s z?luscJV<4utZh@Lj9;I3DtBm&gQ>H&l|D%m8+u_9A2XqcVUZ!Wk1mm;(AQoO- zSx`bSArmE?ZE>GVEoG1fgN#cKEK~MVu7VF5e&#-@JI0oFDHV>G#>AXmeSNVKFpYpp z0vpFK8zO7*^u-TDa(pE+9AZTGe?CQilkI~!+-XlPsom;Cj+4Sg-yCd1BNcD%YAxt8 z%#7FCQ4PZD>T(iJj=pbe^QJVb>ea3_j;Sof+D_Zt_m7mba~`TkM{c8KnI@xI(pn=J z206B)$M4NIfg2>8qo%ID&ew);<>?cAk?Qpw{MpUTbP5ffPlz_=rtyEADXiGjRjAX#Mu}j5Vgn+){>`YFpV6b zVy@u3z}Pv#+nd&_vxCv|Gh=jAe0(OkUqIFqw!>&B$TE1JP0Av{9Vl-u z669I5-njW)P~1(&g(zKcO-{{v>Hr(E|ImoMVAaDrwqbjgWa`OIIG+EVULX3%@qB(z zdrg!(^Y?@CXRpgQ+oxX#hC@cvVzAt;LKoh%k$Qj_%LXh*w=?# z8z-W}|DBbGh~qf`7V;&)Yu_+UHrr)r@7vf?)ILVUm4*>o>s-teAu9v<-GSNX+(ir- zGHlof^oQLsryd^x21twnNbFupIfkoP`*vVslZln|t$jetOW7MqPh@pI8TyB#lx7Am zC7WoV)!9^ecgT`&NR|)0o#xjeeyzDp)x78L_SLY)6~--tMO=IDssK?$KnxXwh`N&i zG}MC2eRr`CL{s*wNf@WBhu4q4Jw9yuF{;sodicz-OQ*V+HDY9;4khWTQMa#J%~I5G zmSdIbH|6wMxrco!cBk3lTk^O$b1k;(;C=bDxIZ-jOC23RK3M?nWbpD0ohqWvi>tYI zt7Gm@#HHe5Ui`7(34a8fuDVIJ`hh;QAJ zqV6pNNXQ7CUeD6%Y<-$t2rB)XF8-#tc$I7H6-%ik5>uf#9c$<=r=*AKx>&Pc`4kUX z46oa8#1ctK$z77P-#ugY)k61NwhrIjcqw6 z!hutjEl{-Gx39DIsQa^uhX~x1_bP)LNmyotNbszLAo$utj+)mvkLZ-&)O8y%U9}Nf zCNV%XM~HumjxfVP%o~FRf*_o9{L!#>LEu@*(q6khLRS<{=tR^jX&o<-6wB%@d7`=?^z zr)D#I5KGYHxwN=x?XUYsm}GZc>km_j3aJ_BAV^-g(B)iq!3aQ!pq4{?lsiQraOI50 z?ZxJMyu_~4{few0M6x33w2OLj(1gau&}HO)T?GH3PuBmW93t}Wi5|*+HBI5^s$En> zSI-N}mb>XYOB_o20)bqyM)NoBr(#Na8OTATq~Oq+qp%LoL^<7ax5_!SnN)v~+i6wH zzLvLiiCXD-30gT&U1%oB2Iu6ITWawf`xVoS}-?= zV}SEK0!)ZCAQ{Rf)x?KG0X>@kHt1W5a`T68WUX6g>aS(Y3HX7r+daYqhar32+2 zNzwMgw!{(+`3U_sjg0AG;%}Fs+ZrI7=v&_#NcT}VU`lzBuP@CxZ6E6X06QOq)NU8) z*^az^`bpT-m02r!P%0?XVG7SZdP5M48m=;eOaA}RVSfP6nC|9>P2OF&q1TK12Z1jm zZ43^>q0J%;?8z<1^nQ#-5`s^^Sx=Jd)KHvpR&4Y?D2^TQ%srQUFrrSjvPA`Qtih^% zr-zw$TxB*044e+_vemNow>oBrcpl{xTS0Ku+DQDTdHg7Mi@y7_5M~P4I7q|*jK}mE(aO|9}qgt^dPlR z(`wwheolJUCm>6L;^{zzzquGyXZ%U_mN~wlF(qow_mS~?wZ}!&;z2QTW-PcrK$#Y6 zpleRyUzV-BeP~OG_~2#0=s}xABMi6TgrNMm99?AMfA{TV?^Me~0rn;Y6&tF3_ZDmg zOzDfXDmStr(A`#|H*sN$Nd|U~;1@3BwJXt^p?gk{AW;78?sco>v?y2s-?-P3AgvB` zN6twfHQwP^mgP~jdYwMY{C5ZD8BhuCLn)pE;5sehrQGU~p~NHh zExLs4ho=K>V3j>gd?_uCL&ILcri=g{jDEjh+d8yG%8Qbk=ynq0^VS-3f5eAG(yT{i zNP1_369qkTg5G;6mefgbXI<$&IWj9lW7NE&^weo(%>hSx?5Bc7b^u}mh+YR1o2QrT z-xrt^)aPd0mkfT1i1b84h$*6)Wx&^;V;hP(HdfB=NWwHK9i@PN--K>$nQ6gFOSXK$(3$i;GI(wNCX z0uyj?z-*d>KGS6SAqkU&YwX+fZT$ zG+jI|Yg07(AVo63fF-#BDKe4N9;Y+3vvl=A1Cci`&0z+#a*qFoEu$AlRiD1mFbb9_ zc(TPVa$WnFYtF~$Mc^BaPGsR*PS6>2z-Ml!nFNfIgknYF@v^Lep!!}@XEXQ1q)rGD zw&y?-bE;q!;~`H~XtO2$`3NMbnSIk{6nKuC$(?0m3Tu8I2{vTrgdP(6=Nz3q`j#@3 zk*ayTuyRXnO^w3UNief4%UYlv0C;F0c%bgV*TQ$=);D_jXS_2Tu4;x zBS7iC{~_j1{OHfu>nKW*%T{SOqhB+mx@z4FJ~S&=^-drG9d0>-4{E0JH+Ytyi^;q- zD@B|fd*yt1RM-RR0$YW3)JRWG$WWYckE8de{vmJl;h2l9NifEiL7hmkG`rbwQMT>q z$m zt69Zy)jp?_6fSLz^q&^RK$pCzNp3R87=GY&Dwf@-QMSl)T*~?HSu|TYO(5KW9IL)|#%xKI!W^0nONe5djgIl$JDhozcK*zs{BAVe^pxJPE%iiqgy z%!;vg*|k6Cu?E_WgQ=|^?7VtU>(w24C=ed{UY^DlHoQ7eMBD3~Y?8O-3tM(b1-1wK z&^8X#Dn~*wRYNOUA|b$T)9uj@MwqATpY1Uea2vpX6Tpi=3O?J^(r2KL-K0f{t@9ph zfy2{qz|s?nP6t}HUG%g;Te)8yP&-K^C*aruvZ5OGv`(L863$dTF+a;L_2;CG^?$|c zPhMLT!2T3^e$q$_gj`wIXwOnCqTVyktaux@Ex;+tFE%jh*>qr92yA)v{T z12}G-NWXf#%o~#t{^2~EE}be+nH?KMN5fq?D>6{Z`tmx*(wm3&?RKw$YsQ$DvIZgI zxmKJv@CPmfE<}#0)RwdEJ|m)@(Vb;UG`7+L#&;?CC1kz%PM1(t;^Bsh*$;rJpgYwI z_aM&a=?bDk=9laOfm(+@AwPsE14djeMsnW!ilUMa51y#I@rLQ)xycW>&aM|u`ZLOa zY`0Yi5&(SbmNVHQZZ)}4VN2yB67x*~=?1vU^>l2B*Vb7(` zx14el&v26;7ok4!>hPJgOqo81)0?uyZXM?vv6_~)WwE4&21dL^{laE=+fi4aAX;A# z(-O8lbkuXf=wN>A^35)3D9X&x3?H-~MK9aR34-^#KLQU3lD^Kk7RlKt0YP2EtYI)at3nkzea8Rk1g_xKq+~+^z|XB+P5j3Af4-oFc{6r=)&aU92A2U7VGb+ zEzIfdP$utgpr5qywRezwo#G=xnz!kP2w>~?85GZ494Xvx~R6=N+fK&%`~cv%Vza;rNudk?R)4R~{Ku;T8i zz5pwmjZp>)i@F*TR2{I2U1+F-hMmpc!z;g+vDCjNNqNLq!_L0Zd?jFNGkmMI=O^d* z?`2?xPlA&8n>lQ*6O*UboU`qW45^^L*!&A15f8 zHU{~LhB6E}f{W?2+)UhWmzz`~yqFSSfzVkHBG`MO`LA<%Ns^9lJNmYgd8L4ITt%~G z00)s(-ZF7%65id?Qr5~-Ps%=m)`|LuNYvR6Q_L6jvk?fW7uVZ?}-$Iq5f zpj~_3oB`}yt4kZto28T&Tb{}HGXfUrRf$Q4hDtDo=!EukVAd^=4)nkaz*q~1FMAvkT}xeM;+RsA-r^!rJY}FApZkeV!AIem zhSre;>PEwOG#gD$HSnlvcReex>h!&WK*6v$uK!|d2y7u*2;0Q|uUzr?zhtPn=e0?8 z3r5gRlq(&d*)eZl{DS#bYpStjwvZX9YLApuXhi(+s|~ED9L1{B_rw-Zz|%W2a-I_q zz!jYR^{KrLkMoWNGCWG;4dw!K{9&*mBf%ZU~#z_yE0t*t_eEoE~h-F8;h*)Yb9@xwd8JYP%GKvC^andp@ zQ~-3LD`V9)UinMWBZ9x5;}LEKC)iFJ0Z@#MSBg!(>j~aYqhGLyTPIPZFtc-{15g~W~`+B4u1(nqZCo19n@Jt!qV0;`$-0XJ&G2y6u|Jw`uf(bbWrAE^;+~`$?Hql?)H*JF zLK=-_pY%sa+`5M{reqA9#vBBO_|$iX3kZ?*P<|k%Tcy4UUS6^aH%j>knoPNv14eNAFPm-C{++nDTi z`A4)(jaFIub6ap|@ymN<=}{+e36*8#6YWD&*UcAk)K`wBtLbpt_sgX4T@7=ab%h2| zm-L-6*uIMH#N~I%y0@-$#wbB$Odz6L88;keg!d3#({WiJ?5cV<^;cqfa6%1IE4nCPA?Y6O50;}PG4ySVHE-+y3iDcHA}gNVb^EOov<6D283Dj2U10Ip^sKJ1_Qo8 z000Q!0iU&WM}PhjtFV0ofsFPSFZluTy5d?irig()8~cpnS^+8{R9(FA8SQ?SjeTxj za=TCO7eRPw%2wfuWI;H|o0m{kXi+KxX9<0>WFDGe-ReQIEv<8Y2t7o_%x`cCb5YEK z!yAw2qb`03{7bcIU0w8P?erjRB2!Zk_ZYdy1gHSxyMA^+acj;9hsh0=4@n1^^QOgk zcU^U@^a#FbTX(Xy*CVCQwI)S+TY{?)lta*-Or7Brj6&X~y;^=+i?XU*a!3&zS(ctn zj95YHP;Ep9(ch*#nIaWpc^BjaExr4D%2YHZmYxS50VfkWg7o?DUh)DC!r7m7E~iY+ zXI?ixVBXb*sVsl{TgNu6p(!!UCeFTgPmgF^e)IhNP?xeTt|=ipBJsA{E0h()_@Vz$ zYJaJ|AhmB>(;8rBzChF00upE$O(^t*p*lutP}F!CPAE11WlNhFq~a(VB@kgHno={* z*<4J|z^sWc@{cA>$xFN17$UlN9t2LdN8Dw+x5SYO9h!;e<}cZh&e4s_KGwvbJyYNs(!`ng?0CgPc=gSG#7WLF^!u9 z>C4%zEd{TU@z>39Q@`i)0&-e-$offC-P<cKro25sa=p!nvG(wJUl^udi^XGZ$j4D|VdjIB#Ncani|F$(7(HER-!b zj3bf*rB+nE+=W8-#Cj6SX8E6Caq%`Q zQ>PF(#Z4B2mZON2h&#g?^rS&yl3XgOhSgeeqw4rMic)>*CgYG@AKWImDk%I(!|Gxb zq=K~?Rey|?3G-D0aZ&a^*Oe-7=KbO49%#%ySZ|<_6*w|rj|UAng#!Z^mwF_+FY14M zY#NyRs_Oo_4o+6AgWeDPkP2+Vm0^`OdZ*0uD-5 z$M_2?YI8_)2XvzCl>`unLU=jm*$RKz8+0KWA7j%>l{cNW&EEXD%SIa}Zn{eIDPVsN zxku_*tzJYrF_+9*iAD?v3ECHEAD*6}Bp z9M2d+Da5;_{D~#-<$yLRADUW6#HXx`T{Sv>+-EJ&es1$yfKG4TnB*K6;z^X#ee%RHZCSm4h)Z)@nc}qBbhvssq;yaiy1N<5|?K- zAHCvR0)C$t_|}UC%dWa>`&&H||(YTD9e#|W8QR5>3fd=(tK+6DMV&I$=^h5ynAVlIdu)8yaU6q{WmALuA zs(|w8o5W}wAK5%DyFDstf7#XTxs?)k3f#+l!(FC;ChI92|5Vl3UTY0jV-gyh4s*th zByeA13CWFng}SbZ2n}~0S5GcnDk26RC4g8{l0Q7`sH-mcPQ<-k4Q)6n0Zn(NSaB#5SQg3{ih-HX`*<^A{wn^%bZ$u; z@SWu=(<=6p#OPQ46EPCIKNe4yec{ADP7Hfa4$dW2BL?=02(5f27SwXJ_q0>jq80vG zZp+V9d8ep9uxmzLa{Vxn%y$2mYD|r&R0+xY=F?q}--RLMj$Cjt*ci+NT|2lt{au+w zsvuQvY`DG2v6O#F0oP*H&p<$5{p)jjnt$lPFQxwVTL149zU>KE_Ge%-(VcY~D;m^E9pO zA>Lx!ePilY_>Ba|7~Nn4ZoDe{^K-oUr~13O;~{=URtI>f5Hus5Cb>!ZQaAOIWEd6( ztM^g%el^ndCJfXZ!JEnjIci&7vW!2p_e}u(7;w9mDTEp8lZYj4vgpfvN``xl$N|fG zBh@QgB=!4E!eWLTV;I8&+I-$oj3+Ms$HLM$61)w)MVf$)(gWb} zRB9+*!81-|IbJJPuZ8oqh@Q63u+>UhtZR;XM%$`wNgz3P$}0VBGh?w z$Ftev|0@aQ=fcx%<4&ZV>mcpFW1AK+mwum(R*Al(g6 z8Z{GX=Ja%4Dc@RRfCRGX+5tNc08y4vv){|Z(JF@C(Zdj+x?7Oqb_N5&y0jeZxsEY0 z{%A%wKlC$38_B^JnNerx!0g(u=qrn}XuWkK@LpkDF5Vfz7f#gZwOAY}^6DQ~l1aZe zhjKSM@8|bE%(-+L%rV^M7Sb01P3pql+lijGJjp{fxMUE~FL&C5000R_0iU;OP5=Et z-aYj&M!u>KP?LMbl+RYKLM?c{ADZ44?&~~GF!}87oiA9lL3hpxUS)heRtpW8CkB9J zyF?h8h^ttbvKM3!TEAL6y^5rMz*qOI^=c9Wr;|pGJ zgs*Dhg3Vjc^(SJYLxOSwboA=#AQdDZj49eP#JXPu1WNO+9kYyG#H%#%wz-9f(a zNHro=LsAVCjVKkxJ%y^#qWuiYBt(WC)Ste~9=j^U4DLUW$I0G#mIIV_qc;1-Zhzb6u3RZ}H-It;SqhX{(Td zL7EPC0ui<+NuRra>E)<`|K7WBglz$QAB4x7r2}~ z96(L5v_6g?R}>J0NrQ)zJSas@8+f{1g?Hci0CDql<5jLByK?qoN{rAHX7>LBp3gQo zqAMXuz1cRL#t!p?O*&5FDf;@G!I7;t%mplUkS2J3ImT;~T_Z5%Y1#C%*{`NwLi`dSfpt~*1wbh z+pcPTH3dKHF<7JuY<6ZOh6T<-Q9nT)xiL~!q1tOsFZ|+R>J?R2wFCx)E;41R&#RnU zg2l`iJwV-+-=y6xLo-%OQxIC<01_3PV~znP#i!k!FVIOBm$j~;=7d)xiqlvpwQp;( zDV8oD2Auc}GOT%}ndx58K>(olr8w*LQ@)zkQbXaOmWb z)Vkg0o$^&KC(miXKzi206sL3!WCC2D=jreLIp9P|jw9Em zFe(7Y&C&hjHwZX{yWH~pxH~d@cL>uYb&}OS7+DA2$Q{DahSV-XB~vk(0Y5I^#61lv zaMNe;as#({)B~zVi(LzA+N*cRU*raxTQ`F=0{cY< zykZ1@s{okNO2~PsLs0g#L%H{1{E#3Th46Re7ov2j#mM93gBG81g16cY`zf!E44-P> zS23cUZ*2ehYp+Es(P5gBI@PIu zEr;zPAEZv~!HMy;u`hd^ZEXd8_Gy$!AMy7Th~CFB{Sau1ec^^*p@BgP#TyTI#ZFZ3 z9xleo9YLv2DfhHc>p{JK-NoY`_{=kT+C}_=AACppCM?fAET9pUEG~b537#@crfyh1 zJLnQTl||HfNzCo5pr80&K`n!K7YkR_orLbj%gVgu#OC_~ZdA&R@5u+2F#MJNRow98 z7TO*I@^0s*0v&7^|;f~r0vGaFvC7tPEjo2W9Zqw?By7D>Gw)j&M6utf27aj`;&?)Vw> zPmBH&#-j3wV6eH(pP-GH&L&$l}Lj7r0yf#l8RF!Q1O;V5XL!dB>FrgR}H46()OhOW2tafho-5>P!nL|K!gMsQkO zT9Hpj(Z0@e2sHq1kYCfsMmeQw3O~LkRVNx7*N=ojrhF@RQY?+VsF8BUMfg%%wS>U9?1*j~L1?fU=#CH+C0yh*4*Y?(|5@AX){f%J$90>$%_ z0ro8V`85&SzxW>BS6BS#7VjeA=pzazBk@zfw`oY3WkyZL4*WIvf>-8hn7S#}wJcaN zpd;x-ZLLgFU70WOCJpNAsiW6gttxVIzsfY9@a)!-;6yx?*qs=V(%OsvQ{`OsqxGbh z8@NZd?~@Hh1jbp#i8nM?GDp%>+sQQHfW}CAgTRfLfx??=Z{zug8yW4;oYE_nE4}r*dtJ z-T$ujy9&`m4E67SAu&R~DvO;Q_PZ05KF&)p_jO7S-3QJ?PFX``Fg&3^XLjV|qF(W< zJ@2-vnBpOz!AwC9(+E)S_)YDJEfl_)kBmQKIe6_sMWW1kmQYvsI^meK)LV3O&#{hF zABU|GE5X2wuGsEX#x>6xFAc_~gQy}L1sGaMGfpgzMTJRm;fgk0MvumyK& z9n-2Lo!l;{8~bLBfu+e}5}do*27vL3jR%-6&q_$r ztM*kI!y)r_HN2lZp{9ip7Ts*z;NJP@^-O*)m}k%cn5Jl_N|Tb{oFCSS@WxB}_^u)J z;ZAger8Jk*^L>lC;rdEFKtkx$or)x4#ww~{N5UwF0Nm1!s|N1@2hQp)H;cuQCWoU5 zoII*ISQaSNNk<5XrT<<|$8}}ZE)9lJAJ%V^Lk-=uP}Hg7pP89Ni_Zv%Bw}E5p`_H< zT>8WrhyYsVFJ;^KcvTfL=S|@jlv2K@603l>-(Rq_91=lpzJ`a+(*MO!Y^9v`?^0MG zaBXZNOMZ&PYUByIT;o#)Y0XJGl%zsR&1>Z+CVe?;*dP0?TXOCx+p)R>bsYL#NQG<@ zNOekCFBo&}GcLBz4D4q^w2^{pzFxo3DV~j(l7d+Dd;&O5YG%6ER}Q?&9U?gDDxa(b zy7^^uw4H5}FTl0If~2cQBW)c|Jvp$Edp`(C)OzFSy@!zV8&fuOya@lMqeh$J#S-ic zaF(1nlZ@r|xC>W6(9@skZwzrIh2!14KaL1Tl?kyJUc!~gQ~xZHzgtM34VjvNi}}g+ zd(F^enak=vqgbn2(EZ7yXTmj8fsjPdI~83l!Fk#PhL_>pW@02yAjVBNtr$?6}5VRTI-%W4@l@`lD!+b!k#9%nzfh)qj#jY-t+7LFw7~Ish@Yg%`Bz<>;=5H>$-svhD(907BBKW$$G^ znVE98W+zrE&-HFZ7(L3Cf8b4&pc{ck~+u$I7m`A9kuS}g>DsKS{f2_=gmj}LNir>!Dp-8^el3zeU%9+$%Xg~iVB;HE1%Fet&+25g=BDQ3e%C#6Xcbbf+c9VsNMA*g%l+3BqSBr{n2_62_#0B&k#0o z-EKp!JfrnFg~I_(MqsOo#+a1uOBVuA$NreK7EunI;ozj^XyGJp_CPnvjAXVfJ(Nk)+=iN45>QInOHj&7n)+3;&9~`o88pJy+%@Gz=_nC7P$|#dRYvGo#Wi(wQ4D78vaDcCrHe%lj`345e^XM_F3kyz7&Ds>nxT0U^o zGt%ARJW=Jb>leJLk1F@q?)a9}QTJt?Fl5`J{+kmykxLt{1fV`Hbj!lvP@T1D zl^GhJ!}OylDGS$N-KFYv#J^+ki%yA0vFjUf2I8!>yC+?pD{0qQ+HsOJQbtt{H~2Ja zdI1es`bOIGMlLV~2cKFhhVEwgMFxf2Ph$#Y67j-Zr5No^^YwYiqB=?RGgAbl0Nx*y z(mHnrMpWee3?kMzJcl#y&8ttoD{*(p*}O+udiK(V-e!0$9bsZBwX2mKnV7Vq$n$bw z^UH4k`Qs&=U=a4G34v*?Y#X&xP#3^q=90dyXy4JB0j|Kg-v#I>%uc@UNp& zj+D*?nIIEcW^?JVLBS-_eG1lCC9NJzt>5-0aj03beBCQ@u52x61-(OGxDV@Py9}+> z8HogT)1%!UOSE-wSa7cF?%_D!hL;(Xk9oIo-ba&*~drZ=6`o}tgla1{>SdA6PFEkDWLRkBD$Cc~xC z-VeQ>C#@zIEI_(K4#vEvN@J4Ma6mU8XnES_RleuW%dEEipvmYMb;}NKWD>;LAaENC zEWMoIfo$%VHNESC5VBVW@fWWc!Co(MaZ^1tAv5yhU#k)d3lFmXk3C>N`|iiFS_9Cw z`JaX;OTx(WWzi(7=nX|2Yg=vHnW73L6V#ft&5D&OkthfxQ;zh?0Q=jK`Cj1yblLhU zUi1I0MU!<0R$7X6(li5z1gDlI-Ey)4Ddc29Fe-eZ2a#*KLtgX`sTRJ;Q_U28zjcG^g1LE-J=M?%> z{UQ6PZk$9}u}>cTTlWe&Yd1-*^#89>_*`d1b20yTz z2+G#BPTpthx!+h`B(v7nU^4#w+s0S$_Ax`gtHB3eS*|9jnN*kH*A}U&;d?iG*7W^V zIPp=rlv-X9hP2ozBp*285srbkVG~T$Uh97+7Q*VsBjR1V$7Ph&^DP6;-yLnfPA67MnX&u-T2u6iat6Vk8u& zK2az8N!4-JuBWDz>$}j`LO-ItviW=+!a6r?L;a|lmEBG~Q!7Gzxh1{${AR#PXnSI) z1JO%@vr-ZcxQf$d_mwGVlDFFFo##R_G=Kb$rw!pZ1D%TztvmI~ztqJi8cAr>Zim~N z?>4{w7K)N=A6a<+LV_Q@@BUcXAf8&&gSq|2`5Ubp^L3bhevUi-6WrIMUGd&A zoefD5Jc{?sI3t5G1#t!Wh!9OU`qFRJ`w2vwHgb^ufEp>XnM~iwHI0wUS%~4{8vBuImw$oDu#*FC|0RF#i(NEQLD|bhWEv9ZUJ0u zNc5!P%eFIWxR7tq;>WHVCx9QsPyqv~qA8(aS%$}0V*OKlj28_Fp#q$?T9;OlqO#ef zyyDr+hgSLWx4%7flE`718N`ANp36d)iKNE#oe2q)HN3vA7M$Q12b|1BEr0{=8@tr@GBX#|4|t%5UE< zXca@w!q<9>ViGS_uF%5_l9DoEHG$LMMQa3WP4*_yLM0pJ5p z=i^$er5fGF$tF8t5-F3i4tCI>9H&>MMtmhlweEBFlZ(@6%zt$ardvo%uT8RUBTm%Q zt?7bsbMRk&C4QNa0f?6`dU)M+IG~$5+s{kSHXvC>lmvLFx5F~?VB}pBHx@|S@^B1r z&x;(+^tDh(pD<0cpD$9XuC_3$>YI`eXfYB%RW z!ZA8K^69-@Cs;Uh_{I@U{@7gKZXHUoM<8crp+a39dI8tMtd)n&9NRGZWGTt)L z^W^fZS!(v@_fS75?(a$Ju15A*fx;^erb8tB6o;^}r`?n_QF_nS|CGP7pDj zk3`+{K)Sv?B(_oj3I=F3bISQ&+ zC0rmYQT#nc>#fh+OqOas0`Nrf*^lS&as@7ZoCwNQbdsf6C`|pyp`rP;k9xmAM>Q%J zxQ;RY3#9o<6rPqBU@;)|Yl*@#myRbY*(MtSga{$Vru{Jo-8a?+Xd#UKkDx9Pj$hq# ztu1Sf2(<~A6R+jbXRWp*?J&4|^t_s3w|p00>sXHZQ!h6#%>M_s1kwAUwy49rxX<)3e#vpgOk0Bw-R zLGu3i4_J&SQ3oaYdINbMAHQEb?dhw-nkKXis;R%OO`pJ0L`>~PorK1X)e&N$@VDu% zyBFR;UDNMi!_$(_cozO=4kr7Wra?pC=k7(1hyZOYP<4&D{P4-+%ic)qa~`JA{7QtB z+8Q8CdLr{r#B)X`ZM2&7zx;r)$juEJSPC5bn8C9e8Wng+b!x!jIXxrh>zVd3Qq-}7 zyT3D-M31WCJStD|t+Zx4J6fY#a7Vl*PnB!6BUi~^k7Hjvv0S}v>n%5e7gqfCi{BqD zvuA@NT~&`Xgj!C&j*{;7LI8PR>rxGGGYgJUS*bxRwefBvrlL!dU)6rkR0DGvr87cd zNQ_#T#!LgwU@TO^(?+H0RMFKE6@e|T^dZ@wL1ID^#a@{-X2jWo>zpe2%04}Vnw+(GBitRntoKq?q~KdYjRlj1+>Gv^6~H#lD+(hc*_KvQXkR^E&cc1)iQ{S--S9*l zO!28rhq?#^!rb*4@4>EaHRB)Fu- z0;wO73S zV6U%l2+GxHGI-l_ZvlC}s@1H=WW;(L&?pbTNkYCV3D_>Ji!J8S0y#|N_@2?%pj;Is zsv?^fZFyLcoyTO!A^*s69{wFchr(4TUn5>?kJ=m9aq9CdXLxq-;7#3zymm`mhJ7#X zA^k9o@U(IT3y~rcz{=pMt9sU}dHmih5%rhkJA4MmqsZ}Uy*4Mj3~%oTft*r5^<{mC zLncN1al^q$_o`rGpK!tI3)(VEk7KnaND24()4(z&Y8dYGLmf^l1l-~H^Xux(<`;Ar zL#^+}$yLCC4(qhX*Ia9-c3iS<^Q-UV4@uvFul@#A&>zY5M+*5vCZbF*V_ng7Al{DV zHWnn*dxBd}=EZsw5w?)g3)p=ucgP3*Q(@ z&S@~tE~+N~C7AkhTPnNRN#c|oE=9LRFgrwiW82Q*K1&>kQ%|Zd26dD8`3s~$w(R5Y z*{1Ki1;pD$|J)A-xqJV&-ApMcumVkd+G5LBS`+>vcmlXBScOgguI`5Gp9`7Z*q{;j zQ|DxGy0>pzu1hbbx^Rhhnz!T7el-%usPx%T`%4oIqHrC8K`dka1kF)1B{2S$#g?dL z4Ok3<1dAwCLN`+92NQbN!2W0o4-Cs6j>>rm3ne1&J5b78)~V~;VoEttTS?JB8H(1y z%bdz<<>4lG$e{v0Lrk4>j3)5#UxkeRvN5~lApT^2IHd_MZB3u^91b$hAhq&~`qrX# z22{ZQA3JGczRSQho0-setC>7WY_B#z>;Y6B)Vx=4L{fF-VW`%nUlYHYvhI@s@>P({KLUx?R~^ba%c0~I&DXsOY%bBSKr3uJQQf3^t} zif~9Kb4?sLT*4OlhPH>`3=aNiD%j2X3pB49>-(qi74SORi4C2nTz;)7GH_xKea>V* zwHkgqxvDlH38qAM_NuVHeyqWPkQrS;ye6(c_y>>B4n^wa;TPVqt#P7Awjpz<^p>QY zy<_SHe14Y7=1go><*^%zDS%jeI09t5t3j!;N%E0Yuvy={;-R5MfqSz@!Rl0ejMeL( zq!uhr*OuRoqpejZu^l>UMQ2aL(%qwE+!?;-g{^t)mxCZpXDjU?gUChffi|m{sRq){ zjET*c2I_E@kPpo+A5Uc)Y@V8kEoU-)oU z_dd|);hX_3=TsXMo|^^|U`s*O`G6P&1p$-Z=~zf&grj*9T4GwZ z5kMAV9Qws)=gyEFmbqtdmLK3Pk>OquBo}<^|Hr0BY4u@HRebEH_vIR^0=orczb+o^ z|5vwXgCuWsrK%*42#}4}dVYa}9!-lUbJ*zlQf$zBJsAL~p~NG+{8k@vC$VL@Ez?Z9 zCXP;1biS_s_r$MK?b|*Weges)gC0Ymn(7&zY&bIFS7i6%tvlY>SqdXu3o0PXvf?*HD!TjZw1dsVA|e#SH{G27UNxl^aK5^d%k zt1VX+>QP$UpfD#fPHpq@5c;COOAUQ({obF2DYJNj{ZbYW{1WFq1M&Zf?Qiy)QU7KB zmfhmi9n)|Np*Idm-o&Iv@2mGnt$l4A&+TKVH9a7pKO`7)fWOuJ6(s%ZVpF&XQARrE zuO;yoSeEccUOeyhkS;MzO8G$WtSibJXv+znmMg+q=zp8=*MK(W*>c1xLY&FAeN@AB zRjy;1NE2&L{#Y(`X5op=-_>UHpDw-qN}L%=>i!c!s$5ptzi%X>!v0|D^SfFNeAeDP z^x0IBYEVdV#9wgvbEk0KP2yDPAw(F}>#FtQ(hi1~Qfq{$Bjf3NIO~P*%6nvS8~OFV zno9(q>%`ZC+n)>{kJO;M=NwCXjpz{Csjn6u(h8@iS*KLIYXx?s_@vYqt7zbur-MiY zYeD3_xyWaZIX-Bc#MEC`_}X_G^Dm6;c=Z!eKa>wX$e0I}5+zFo3k?4HP(D@&1J<-E zXb-sJ_KQ|GuG-4Pxcx1VvBk=slpg={68}vjF8b- zf98d=AlKlDGv9#}yO7Lo}n0voaqE&7I zXYUKuiu8938f+XFxcOb*^MHW^54yOaDm~1*=Q$|Zj265XR$ex3OyMG^q_w)$zi$p+*QUir<9+E^qO^YTF>s`J@{6e*Bsf&ny zo80}#bmUW@BU%ObjR97!PBT{!om`HdCOB6jVHU0K6M>^LGw9h)eNl{c-0E~N%jIA& zqaln}dTm+h&esX%_u&@IV8ZvoHsrsSw@5O5n_v9KQ+G<~tLqw!9BX*`)Jrgdc% zyc1z-I|Z#Ir5o&6)@uw+!;4A2+_k$*K~X>$pqId!eKvxT5NV3K2v;c6LsK7H1=AK5 z8P2<+5c?#%lS@4iMG4CbchNJwHT44{gBUDYwD6K)HKxxdC{V3zVv!&y=WAd~W#tHw zL`L$tTGlnKtp{aaT)DcJpb}o>I?P?#vgWAvK(G2iUn5d}4NH-fq=s2;QtA$b#xzd| zZD)W{<_>13tS&KpJ=18I<4r+jo0!AigwR}gBUPD?xWY@EOu1gpSqi&>_ImzgpAs0} zN3cUoX-gEH;4gXIvW*igLCYuYzPxOOFvtKtK*7IfjMvp{O&(=y5l9K7r~9im$k$he zz&KN9Dy3lA#4OHS;C)R_Yx&A%~%>o=_? zRdt9@0xNjM(>JhNUDi)a|31&Q8P(k|y&X1WmIm1ybH-=QV5ZsG`X_b-pgl(Z2#T`p z4%zkdRON=pAT`va21wKp5cpdF}K@klh%`2ia#nZy`Tse33t>J|u z4FBWtj?}{$WWTlicn9bM`Q1|Dxbw}WE79EZm5HKsY>El1j}BF9nZZhrb6mVKIXzKu zRDbodi?&==$7*nFJ`}8B8&oAv{(`;dn`N}N4Nu*tnd_do$kAKlhM9B^;u78OXwg9H zCzB^+JqGTGyz;t5m z-G)@&9C`hbZd5$UN-#Lyn(>Q7{dRnf14YwC73$puiw3W#r6H$WW~10N&LO=?K7@E| zi_pQ`LNJnyetaAWidmLR!!@jtj%qXmDUcP~rNpFo(V+t`-^U&;ldDKKN^tsFXO5=& z+UOQc_v96Ux1nqo4@n!-$=l+;`0!wy=tAYpD@=kJ(Rih-VrM%Tt+p55_K6$H*G<9@ zPo&FGBV6?+Df)L&tiLYk%En7|+&351`J-OoF-TJh{7CUsEqzB#U#N^3$T5i$UJKJJ z24JKf0gxEy0m=i9a5J672Vpc&W!*ph2Gpq)P$o%#;cWOTp!->LB@NWwVt@$TE+A|z zf$w;vy^_i}yO}cx{WjF>tY_Dy_3rrn8=GPP$R`od^xe&Z)eUhtFczgkCoPpa}TDE~z#wLWU++e0Y5C_r@F7N36at)IGw>2NUZ(p_Sgm z@r>P^85x1g>G|S>S<_hKe^~SEVN3}%UL--xVlQ64*NYDi!_C8cn~hw$hxq>v;&B-j zCoRyo_<_=W*R#)Bx1d-b^rit6oPOs7B!|BPjK3m=TMAP96e7Xx)s ze9eaD+v(yvEgykijoO@+X1hK7#Oj1~O#ba$V%i@cKd~+SV38&5lZwCw;XSLF!uTf~n#n zuwwQ5JuoLpO!?rryeJ^0umL|XSLYa0EO#%+pd{g&UkXlLVNluV^%hFFuE`G2!s$YO z|4-eCD0z)ETiMfwb^LqhM*xWDqfYoMlM8X95%lK zYrsBc7|POXkbrJ(Uu>>Z8Wrb^o-%9^DYsOFgDGC7)}pz=xPC@t6Oqw2{tJJptWl|M zJ8VD#oFn0p9GA~T zc8+p8&(~J<&Jid|YkwAAcZJv!0$+QBWrg@szM7YM$)P_gM4xgrcPF^AZh6jlhSfz{JzD*1kW5S_}9rfeRxtSrV-DZc-a6%~f)LcxmB+Yvy&HbDRZm2%sq- z2c|byoPkv_J}u8`wfUuP_r;czsL{Cy3~z`<1)r zPa^YIlB2wfj;&IohGz6F>xlH>!QKfs(q{A=R9 zf<1+|qVv&@Sl1K;n%k!$beqvJ1+tj3U4Y>}_Vrj*KP+$sG(DXN`Dg|V|JQUnYBZ$a z=s;5e)DZbe=M8Y`$)27QuU!v1g{XibZLzk0qDy&JpZ0C;*26A|iJdy6yJr z0+)MRy!}7o_5TW7yiOn71}L`=A*iCqM+|dd;`RWzcP2I_08W6? zMO@>P`Wx0c$^yuNb%UtY-(wBz3wpOzf8b;p>!vIq#(SF6K8LEjZr8@$^VdNGhG};n(Bw>IgwtZTc06G=?Fzh7VuSxCDZOX9ym48{bja$V9)7Kjd>3^L{ z#`_8H0fvzfhn2^qp^+TIzI2H>|IZBP#mw9ALO`|$Hz$-~>3u*cO#gtQlC`Dm4mhD9 z7_=_q-a#v09~!JczJs>r?lUaGD#52)<$a)(nC4-KFCzn_!X#K0Sca|~MSQ|DzeRz= zS~#h@djx+tJA&^;??gvzvA_%=--1GTAIGw}u2?WzTR58)u0-QGLyJ6$7~4`<4arX5 zr>iObCtJTzwREbb43Idyw*X@`M5t85bI0G^}vM==ogunZ$WCN~58r?soz6 z$$7qT_~MW}>2p0up*Do!TI-@DK+-AO?bnX;1L07OLUpn9)zoFI4N!liozZp%eo%&> zZ&+w^1H5w`!S!g}P~k*xFg}3_)EK9oQ~6MrA+L12 zt%g=;Y5zRM3bD_pPsmR4759`Z*HTD^3XFG87O)f(f8L&)zWzL6Gq8s_rQ`8fhwww` zx1HE^O$UKnD-fF@0sdy%r*Xb;7=!q1!=tQW{Cx!atv&%N0ie`-5zOHy6DEUuv9PU! zydjf~c>1iY2J}4OV;kU7hRq#MIW^Eq%u{jtE~clMa3c@%bL3A!+5;rjpVy+87ba&b ziv?=@Y&%sSyOuLrq{@VtE$Eal4+Mu1ZBH0@;q9$PM@4{3L^B&lvV1kxLv61LCT=CK(6EhqiU3=5RM`fXba(XJ9j7bLr+S!+%O9_qqw%EEISJ%|108|>q_beII$qGp}H2dFcR#x+K0R3{CfuB+LTYe+vt|)EAEY|60pY+*~s2j)|@1hTrX2o)mQ>lOe)s=o3WfmAr zuNe<;l_`Q!(C0f~Dn2yb$BVtV!6H8lW)T<6sq-~aysM=Xp*RU05>0@xKUb5&&Oe_} z%JdH}c@O{6P#q!v#Z;Sc@_=EDVAuK1=83pFfD}C}~{{_qFY@f*1pFzD5 z@=4~tk0kFmq{3LBX^_;VLquIcpZ&b!i|Nua>q7%OGsi!RsB9e*EfA{V&x&_m_*7-F z8y9O)UdhW(f6(qz@J0Sw-t=qob(x81_&R;;!#hMb>s;WlRW#pSTvd1B#WwrLp?wX? z{09u+$&Sr!)ZCD;3@p-jWC~U0^E!Dpcj#J(X;5~@`QX|(tGj|xaP6nH&6NRwts_&@ zdOL*!Ju{iIKs+}M&FcOf6-34u&1pxn^aC?9ONOLyv)aRON1&b@z3=J4^BEbkVI1{) z@?pr?W1+Mh=ihD}S2uFzm?3qq{!i_qcgTvsP5R}Gf*51vuo*}5{WP8`*OSJ@3Txaz z*^qbdF7XO>3G@18M-_a)A_yk=C!J6z0X&6RMYS_lA6z5AKLchvz)06(&wAlHv1K~8 zgYH#itK~-C0^hZ?~>V1Q#h%_cxh+>fes04`}s%SSqqBy9>NFwk>sF=sW_J)k=2Y8CY)8 zEu&iu{op?I_o2=0!MH<}1Q!lf+>XPt3r-@lw>NPeWRwY_WKWY$UAV27495(W@>JmW+z&-Lsw1O|xHTzHzxCkB|+w?^*CNDJ$Hfa}yUp?Gr&1drRu9`Ksqm`?m zDb^uS3d724=x55!z*_8}{=!uoVMvFM`0WjXJOU!h*B5;Ij|0c0u(CNWRYw=ALKFW9 z6N=0YVC&YWNaC8Re=$^gh8AS2g)W6}_x5R$db0tLdwU6nPZu`DAYCCybR=ZK>wX z>+A3BPqbq?z_EBxFD-N>+-ddvuBVdTqx3_nR7p&xb^*cyBB}4unK7A&X8W2{`kXvb zg!0fOff_+NOue|;LBTkI+?Hq2x`00v$94baWEcHAE!Vr>+XA)o1RidsCoHAHnxgCV zzRXH?cObqB&?WP2V>0wE50u%1#Ev49YdF6y0ePSd>MXr=U=VI(<=goC3 z>Xxsm?{C~|nB1Bkc1=rqu`ACCnHa5858~(DR(S><5 zWiShs2*ZE%s7UXb(HzpqRaA0lS$UvZs7=BYwyz|qUjm$pE zk)ufObowQY2kG`IdxLl;XiA{)XUz~Q*4%)mQko2j+G4Or zsvRmyFXbAspLVnQ{m45D58Gb(@uSk8lkn%|Jj^&OI00W;hd#zRoa^7ejf(BRYrLW9 zh@=VxAMZd6&v=b-p{)cGIC2OA-femzL*<>FAI%m`+qBvTdRVperyl-GNVH_RP^%ZE z)AxhfEABV`!NfTX+v5mb-@eCWdSie=fCoQ-h_adK0Mt~cS5tnZgf=oh`$ZFk3wTA* zAM#u*z51xJ#-JA!dl0izE|rRh4ZgLXd7amA15vtr?p$%vlD2DPDSDuH4HkJrH!})4 z^nbFQ63RzXxaFBzh>H8WIO1`Iuo#d0pJ^riHuXZw0UIIbTWDZV_QRlt97nLwK;Zu; zA=I2y3ld`%YRPR%uy2b-Xz`F-+b06dX-1ejyzKlnl;Kq>Rc^{$$N>}Pn~~j z_anRTkmJwbV1f`f!`e#DL4C6+{yJb2zWjr2U2SRVvt`plPL5RrQCH5b^xyk{i!&kU zt6R8>!~Q2QTCtdyDX}F2MKd)#*_@Y)TIB?RMzed~V%luN6Y!~ztj!?E!LOYcV`vE$ z-uaVE>4|KY0o^8*5q5!wl?NKb>zP}9Exzizz`Q|~0or4YI$p2}GG-o?I1JU$us)~$ z@lEw{Z{^1JC5qQdKkj%(tNyMOJajZAV7Eh5%6K$qQG35#vG8&^UGoJ{4N6H&r5l>5 zy{eabVRf0ok?p*v*Q;2E06iYVoPg%u0wUo1=H>b>OC)jF=ymsOQi#C05)1a05O6LR zX_lATCCqf&i(Q$MRsJ4+9XUu{)d}qY^=C1_3n*g-bX`kv3R#9gv_93r)#1v>C8lYZ zBAk0d@`Vo|&L`QUD;ftU)_$;#xXq#&SYPinE-mJ#ybw%$7Y(gueZT1x8l=xBKzDgL zfl)@le6h^sTa-{z$$J%?UhvOMaox(yF6f5Y(7T_eh#K^Kgex55^U$F_-TRj7^v9L( zVTX{>cXk)JZ@gI7k@TV?Yz|l+r-uwU-Ig|&)^;#Caci|aD=|4V(HJ+Po6WQH2CGI7 z09pRt>}(Ahw6NP?BJOm3&ei(Nj|fVbPVTYsKUqFX_h9yAXBl3BCg?=q>YDgep?kHB zSl14lbg#+1y&DPF(3l-GE_vF~#^i%4{f&AU*o|)UN}sYhYASVYOI?(gM#Z||u*K^^ zzJY1-&!_9mr`{j$!2G~P=pR_RxVE7*Dx*OOT-*rRT?OI>L>BKp&wK4UPS4z(5y-v- z2atO#&^j2}Lf}w!PSLAcOju3(mQ0gG``-i|^y9T|{7DL4Z^N8CSJufmeBMS3y(Jac zp-Li{nk!(XAlsk4tn_W&AEt=eFBCe0&Q@U&sDy6t5-xM_2_43@yOg}PjkBG`ttX+1JB*GvJT(Yl{ zc^O(0x>xGE5^Sga?|jn)uIZ~8k4{7PDMQfnl5Q-szace=Ogd~FK6JL4E?0*b;o-;* zY~9BB$-{}NcuqZ0M|EQC5_(vD-Y9PAjbuT}^dnghj{R?r-+&JFQ|ql7E?&~Qd!qjn8{T!arzudt@xqP;}VT*|yF z_8-8@7PKhe@AR*lGt%hV*4)7qd!Fq8M1=EC7}?M}UnBGn>1Tf>2w;Kgd^=_xl)LCV zxL?QaeOK0hgz5DwcAI@ryR@VLOCk|Q!wgsg2AQvDhxKgy!0E%NhT$NV#Z65n?Wm?(|rj5t1y_8^z2og7VTmfECVyrl;RBoG|pX_Ri8%`C<9ql>|&&cv(Je z%IVy}%!gXY^n-$>G=(j=z35>F!)jdsM!w-bP;{5;Rx~E@zfv=VyH;PR(nOBgRvWHd zeO-H4`nBZU<_fZ*FA^gjQAOR85KvWl(*V>&DwnXSvR9NG3*}lSF|MJ4RBvnlo``yxZ%Yl^XoZsVPcDWu6Ns>+9bDW?lrsPfFpvl-5oIDD=RXk2Z*z1ovqYNy+HtTq@>;_T84Kv8tY*@LlxqRt40@%Kul6Omil3+9&2aM+KXPj@ zB?Jis>)0T+K7Qlgv2|mTNw2#A+cg7THNRcBZ2fKj&3cQ?y0vHG%cL5iT%{CoA~+P8p^Nq8wRWR=7h4BlE!_o6$P!yV>UDH|tfh zFdX^5HR0GE!cDl+B*y$NEC3an@tUrP2R!x6#loJ0DQ9*o;9UJ^6a%yCmmCz4Y zqQM@^DS4ZFS*tB~Bvi^)h%Ikwn3p1PJk*>%T5QN9^$Z0MA=Q{1rwo%%sF#g|w&AQW z++sauZEvlK*0Z;GK|r!yZy>1ht_nqEO9M}O19w(a<)WORA*uRB+=%D+{05iN+Dm`* znsM_lK!4s_qs)nZy8#AlW=v3HF7^!gz@a92?HtF}B1dvMK=Yuxea2rid40vFnfVnjSZV4RmRSHi`kaR#HF ztKmt77MU!$s1;_G5C2SoDY>jjtAv!pfEJU@&Lsk+HpsW4?m0e=85lDLV8~16;0w_liFyeWI5DJ5X%YWWLP>=H3g3io`jlJaGDb$|R2d?CCjmI$ z+Wf>ovIM5ZtcAeqjeYs(7g;434qg;r{RJOK06FO`wx%5AYUTdhE#HmK`+e*O!P7g? z!Ml{N&8{xiXHthQLy-*Ckxaz6N$6m2@IFzra6tTy3N$8bC^M8YZ&PZ&m0R0O6SyN4 z$9iFHZJ?1MTM7NYVx4B1JeTgr`pjz{%XFgeb&qod{x?#CRLpB_ulZOZgM)SPVAGnu z@wqB_CjTEd)em>69K>Ao;UFyETc=8N8qP|K4}AvmE;|- zk0I1*Ilmsr<)~w+#K;Fxpm-1n@`nTeIW4N2&ALlB3rUmDvc%vUQPE}~lRd;Umm}9X zPE>oNWN7tOG?2EA@l?Ug0lhSHPhU5n=YCmm!gal5@MdbNIOq2EiK#iXygoMs2+vpV zu0I&B3Y`y-jlJY3Lkc@2#XdgO#Juu)TqFw(o~QteVzisa;mThXjzaUV(YU|=+3`Rj zGa))B*1>IE$;7ZNNVP4*ZWef=5ZK55v;dm)nRu9N;!T|rf;=E;xnI{vE}FUfkn{C4 zlwQbkvy5glOU7Y+6-SD~?xHQ5kCPiZ{lPW34J91K-5BoJt_euDDs;$HbzyLn0UPh2 z%j_C(At>DCQN+F0KD@JZy$G~o3jXQqi6lkqdJpB3r^Sau_o-IGR&SePB-OXtjqbifofP2E{ABv%m-TfU z55ook(Q;>t^OESFLmUj2m%QYH?jE3hqS(Pd>ix;Dg;<#(7Nnc`axY~B#BDig=BofS(7^WSyUFUKeu{g@tOhR6E4Fo?{^qW(l%oLq6vKT{4`; zs>Cinv1*;G3 zabHe5w1?5hxbWdIR|vUNn3%Y40~-(>vO-z*8VLFbMm-p{TLUBoKgG4oHw*S1kV0ua zi3#<^@L?Mb{dIrTdB--ZRkks;^4dDyy!Cu*R|?k7&b@;`8{R59xO&Q{W9l`)0nqdt_isj?JM%lckAy^(iBKB|$zIQK1*xh|ov`mdLl`wJ3d zk4oqX=EbS*TV+RRAd!wvS{mJOxcqS5>N}j815FJxG)JtEA~`8)PfX{6t_0nc<2xeE zPH^*pc!T&_-hc=^&H3!Qdj_3M11PX$ZKmy?+sV3+RQ3ND@}tUM>d$hz7t_eKIo^FXqH{r&^U`n<@Zu z{<=dR&e{drqD!mFjdp(J^_O)-wA`*|q@tQk%$nqJ zTTd!5FVx)_NWsU0_FzmB{T8mR)%nMgx?p&NpvuLHr~ng9x{>gws|Lgtr#vftjF%`t zS;Vvdbnve;!iRTloNmRspfb9~UVZ?8$Yoa-3O-diQ3WM(0@F_0v=f#e%HyTyl5!n+ zo6amccxMkO%_x1I1t2!9#s=4RETL=7KxO8!H1JzC?#i6gXk}>ErNgjpibnuVlV~}h zfzhZIh-e?~IyXrg>laD^5kZ)RYD>gYZc+CM@B#L#ZYA$YFm!3+DU}! zr}r)ETEn2^ka3c111DSsRiIg7)f5%M-C>s6#-@yON7+LtH~@F4H?2UrnQ&*fCv+RW z`4Q&Nlv>7bWv24bALpU0(dE%(R$4=^;u;|`f%SJHjq3|WRH+~*pSi)njt{+W*Edy) zLjfQ%U+^EjRYtuj2q~OAz7l4$9C9P1J}3!n8@_JIVCungBuhO7jl*clw?A_!JaBN^ zl;at(=w{K;%D>PywvJ-#-H(l?#8wdmRno?kwGsD#$eLsx3_|#Z;qYOB)%=K&S*R0C znd7VN3vtCXWS-b@7ClnWUNj1(J%L1FhwKeu)bfk~5CmA*A3gS67W8Xq>+M(M?^Q8@ zt$v#YEnxZ^8V|w~@`06LNpnzonEF=Cy4*1abno^LXd$?=p1mhGbq;ar#1$#dj*=WN zYbC=SauPNqySRS&w#zTiuW-E2f{0Z-dLPG2#pn9^}h<7 z2wrs+=@o7cwXTY1!A)7u{t{|=lHxZnz5;8f43m`Ez9T<(#CB@R&Sg8JeZeLqeLLIC zkmFpsUuYDjsI6oUz(h$QVud=C)Qb)dGtyL!jE`t}5q{)v3>M!CgD0&pW)jY)$}bPU zKWBz`ZQo)k;wR!s9HaKl03vf$^&zr{7Ts{#Y3L%gDmdc1Z0X<=Oc?VdmK{i2EQ}sR z5*-k?Z<>19Mb&DXm{DVQ%W!P|N7!?xHF1G(u04+cnM0+=FnK=PX?wBu!6S9*mUEw9 z^W-?}W6`KBIB$ck9wDe`^4k2~YcGjpYK&&ZP6pe$x1JN^;x7`KREBy4!*mcdbRK!A_3RSj0lh;02BgqKZ z&Jjoa$0MN9rA7;=mhi9wT;Gm}K_Cvi+jb2%)q`bRR{z|l$!zBtn}pNGq8Ew3itt_( z(HYd>(RzEX-fsIo!OzmNRONQ5+EL6Ti;<`r#k^?@FHgs2KR3)KdCQIT5-$vD9w_g{ z@jQGB@c zv=z*{VP54KNMWPB2ZXFiZGVjny@1SuT1G59ubDGlzuNkyBNg;pnW+t8Y@^CRJ$65d z7(+%P*YqI@MUaOokQnE%0PbY~kxJL2z%xpQGKt1J<9#$26*sO1Ux7~fU;fmau>D38 z&1B&^);4x;mHOx9l-CFsNx9YGKt!i(9fg2}xLWZj_*eF+5tWF?4=A}~cv!M(*8 z^eMin$G3!s^>U$#pcB+gXCrz1l`;e^o0^V=d1xuSw^k)lqHig3gB-8HyJo8{X}j9* ztX64{(+J>;MZOk4@J;ir{g2v;tiiwbrk5-R*T?XFxO&&spSIflLq3~w(giqH%mAcI z3VSWDt#q1@{|_C~1< zl|1v+v{QAp2ePvRXsyqc)|K5=4wPDN+{+0rY1=>VfW+|RBa9C4qiiVLpf*L+{#+;# z2(1p0M`zVYQijv3mOFIgS62ymV&jr1D-tlbqe#mtcc3Bt9o|!rcdjUHU$*uFU*a~Y zlDoqk5LWX7=0*6@ht=Y{r-()j4XfquKiYhMljUAxP9JH%C%1VwIlfzisD@7MiL#V+ zat7uDxr+`fd>j0m2<`#vn1uiU3-ke>`E*8q^Yhe9*)Tm~Yqa#dF;kJF;g|fWZdvob zIktO%BYnnP`r4;z@K3Z|pKtEKtzFF0ZunC&WH{*D`o_{@6qmR(SNrf%n=6rWZ&ACp zT7F($#Bn*#hJXlj<(BP#YYYWl6sW>F2#cj?gFfoqG$&dO#D4a#yAqwe{Gy#i%JQ5M zKCpwDc-Ks0SsPdDr|0>eT8sZtrJ|*xkqCC*eKrV(K3~D<4}2^%kCELLoeYfj`2`0i z$qxF;v)38!j7!yT58HMB2O7A0eZAXr8Lif>I(j2#_{`qfZE%-pz{j9(f6qZQwJ~JU zOx%`w1Ea0hD8Dw<3A~0QzyIh`6%RM~Uz?%)8)aBEf~zOwy6%W1&+NCN+L=aCw9uT# z9IIX_BK9Ge{C<_&&0gf}0*F#Bj=`f-+1$lJJu19O`Qs`Fw~Y#vHa3f5zs1-P=o&yC zRPz9Ao@TKx#HdrVC86a#<#*>Ym^9Bov)=_oKN5#xJOFK7X-rONn-nzh4SxO^WtPF^ z?@y)-`jM4Flcs)E5vTg-zst+_!;yA%mT`#M{7aNc5?y_QM@j`bjG8U0M9>`9gyty> za>0Ibcu{Ey_(=RuU)_~lz)0{moD;r)m5)=<_7s0174CQ)w$qz{!ACT_*$_tWVQ32Q zGvPL2?^K(9gJYK{G`bo0L(&vt^g=!fe-(f8<~bJVGjZ7%n+pfut)Ye3A~;eGMr0Fj zW>D+3+4FTzvBl2kg==UfrqCBn{w8a8=>TmoY`o#wGP%PycKv(DIRqOBlN~C8t=k`h zUbBdic;NM?+OfYJbM>1;lEG#KW!-O(RU&h@+&Ak+k;Tt zAp<%?0I&N2b6$pv85@U%&zn9J9>7Ih%F5kppi`=PZJg{#LG9{}vF5-r1y12n-JYl( z5hSPiU%`1CF$bngK(Z~k;)>oeX@WeHWXizTap&FQTm;o6dJs-O8V#kav&&P~a6m9| znlO98HM^66c3SXq>Bw$rq_bdqGWMuj%7JN-bsb3M*8OrSr`zb!jA0l@ zdIyLNN75wW@!r5$Ofd$21<%QGp#yeo!eu+k=ZYtDWRH#T%eJEYZlNpwd8VBB^ci-p zs-7<7#DF0%z-~*Efwc@71udBdy)^K?mt(cf<5xd;7x77!o1sI~%c8|BL>QoAx3G%# zV)_^lGp}x=1e#lfKKQ9NtWxlw!#b)2*FRwOeHU(?`lYF7Wbn0Gd!nt$%8ps|V(X_fgWCC_%IhjQkv3 z?@OXlPTZ$kGLxp)S^JiXHNqEevWG@#Qjs88KTU-MBWPq$Jq26V z*Pea4w;5NQ`XKaZ=5wlt5^$Y{aieYz)g?k+H%5053Nh5k9j6#x1AAYUX?;*IL3eo) zL4)Hid<~&r(uy9|pR!_MU22;zNCX7l|GI+PkCbwM?2d5HG}!?i$>ZET~g%HAy-QG)ZU@7!E^F{ZR#}Z)-nFAa0oMM})5`O_UZN!alyr{K(A^ z*7~eTcMJO>L-QBc*Z(Qdon4GbucCKZUnt?V?#ejQH%|J<`2x|;fm%9jY|8mz*z{ib zYzHRHMXx;!5Vd;nRbJ^uQRd&ps%@2{B8fAF#t2Norj}u>nstlfz%_KVlsq1W@h6u2@P}0dlj~)><`ivvJZP(QG zQzk~ALZ7A-mJejGbNNARze0hs)DxF;g`Se#{6(Ou0$!)8_IN=*JfdEqKBFh(f@~)i zPeNUSNd6Aw(vnV93nd%hjl2RoN4Pyd1^2Fh%WegOJcH01TYB$L_dsf9mgc*W4=i|c zha%u;G1c|O;9SPyb3H!js%zA*)5Vt2sUMZ-Ua@Ulh{hSxCs4fZxum62DKx%s+ssq3 zc+?gbLN}NVdli@aHO_JVG(vIhrjOcH(|o|*PE|+yC7%6R47D6(qPZ#>E8gz5a@H9F z8tuu0Js{R2BaT8~V&1HZ_W{wXtv^H{Ax4sh%^eVw=gF?anof4?k zHI~~#L}_qUWM_o==XWWq=O>yS8F; zL$)!(45;{cg7DK)Y4uFOn9`AwyK%K5e=5Ajc2xvXtP)Fy_T!H}~2bw6>6 z#97nGAK;OW@fnbWA1u>WF2$TD_TTh~`G(A?;wa04T7g*8YZR_@SiG-evT9$>2lWU0 z@Qg@E{Q7&qW^(N2_Gf5X7%Owih*A-1t~xwXn}s<9MM8LbfTL_qo~n3V(O}^%a^nKPm~Cn zbYVK@S0Ufe^oGtMoUa3@Xl=7Vjv$*?jVvoSAMNc)-;5V$($JO}*T z1V^?C^dQ-RD_!N@0S}t<>5a@}4{z{92(#c5b4Ozk zwZzwO?wO$#+)k@+9s?LPiB7N}w>VZ{w(m^zX!%U3x;Suj)QPNu|MIGbPiyayAQUm> z#^;WWU8P4;!Jl#GWwmau&`nSukfHavb*oA++ge?hfPT%gIx}OET#P81nUn(8Z*=FH zAo8Twy5~?r0PgTEk2V6}IJ7qKOAQ_a*K7VJ_Zi>7}1lKf}xU4l}QK zP%>69)k1+;a|B{u3HHJnbruxQ0k4)Rk?*Oz#;_Z^Z;j<@d=nn);?wS1!$ZGe8n8pR z)~HZ1St>LYpK$}DIn<5nu^%Rk9du=D(4UwwJcj__M*NCmBSidj-%LpVpWGVAJy80B zLslzP*z{52OpOc(x-JCZv-*%HN%0(Hy7Z;ZA<@+?0w)ZTZi3t@mBw2(5eNBRWTU$W zNPsh6H8cBS5L~C|@lTzxh0G@pe_4bGkiVxj$2Myxuf5jR$2&g4I6b5?Jt2@j=)qib zn$ac|Sm@E&bhrfW1a$xa3eEwa`)Wsj`tSo|af8sg;$2RN*JIj$6_YZ*RTpPZGp3LT zgHJUJ_5EQLptL;Tcgb*V*Oa-a8WH9Phvi;_F$PZ#v7I?6XU@tr6Ufk-6ktVldtnhkZ4V*|;mjo~WX|Jl$6+L_ddBCbj4^Ps-y94aW# zduPa}urZ+L%64fH9?qqpt7B!FJ)I&~f> zFFykN4yW3binOyURO*Z`y@-8ymydq!mpLvju0z>QV9vM_nrK1Lzp;n_Ub3aeARe(7km$jO<9~)_<>Lfcav9?A;uxR=2Io=BfC4=z^+afuQWTs z7MP;g_!MPk1r}m1=fLRHSm=Wgu;$xpY#!heVO~C!i|6g?KaAD?nN{4RId~&_Txh$z zlynfQ8R+>7f>l;edxY%fL7&H*EE<@YoQc1BIflVf6N8!;{sfPL?btHP7mB_o**C^q z$R8Rs=z-w!zdAdmM_gq%`S|#VgCJtJgSx3DIMyrPWA`YSD2tMN?pMC5*%;eMV|;Rsy?#43UvM1Em`v-F8%K6d*S$RKWqLD&_XdaUq2r+?16 z-RuxOgnKq9^{_c}?{guXNxjZ?>J_EBj`ro|{W+^Z-+d{2ZEnK*ZLIBNPt0BoSEpAy zNvMo8{1I@vQ^dAz^8gd)k7MwjHOJHs_|Aa3bLpY?;!li7u2Q_v+UFv}Y9Y?-0aWY$MzrvKj_P%V>WP4E+?WcjGil2V-2 zM0@3fT1)d~7%@~J&pZKU0ML6)?Qrk?P8ROv3LFxSSh5Ut3H`{S%_VEdM$(t;%S^?dCh+`n8d5nl=r8 zs9-wm5LcBL7B+yx!PZq>{^VK8jO_oG*sKz=B~P$)cX0eqoAJ$v zXz}3lA{j(3KwA&$EWt=0AebtRx_3s;eh%%d>3ti646-iWXBQx48%j2UZZa2utM z(3eDu(UM@f$8joe>K=RFM<{a`FVGYAH6BiY_6!EK`kH*aqN1DRyH&HONjI1Ii_MTY z@h8LBkueE3vK|8WD) znrPJqORZ`4dxXK!AOzVL7qmI5!{fNuDfpni`_nF`@Xkq3A#ByJwSqkil1Z0mF??bSKfLhj8^` zhm0q7j<|6@xjfPqcdJ0U4+M-h1E4JzF*;0>rU5fh(H5DOJql$j89}|lpkLa?HnPTB z_El!Mv+m9sIw>glxzvxZNhwYu^0SAH8z8m?Q{aQ0^H9?N{-yH(hVvqn%}0xTq5;O8 zuE<)Xff{MS642s^Ir4MX?viFg?RMK(pr3cj1&)0{%(}6Z$93#8ltzg`&}LGu}vdUd4!(AKvuW!&1|-f50NQEAW8IgSJXe3CgJ6C+x`X5Lr%aRU${yg z?SeRRVoT>y3LhzG+mD@nsW=?r?87b*bZ4)`%x>^dK-B7Ig8@+aJXiX0$UeS|1KzHy zc7jj*c{KWo>nR7jeU%nQJcPErqXnkJo;sA?T4k)Ii~k)-;MOq174OQc1y%1?(q8Qv z6U_MRMVo3z#{-2qhfVqiKjqU^l)(q27cgNq>SpF{U{48ZT~fz@XlTL_6o2QK-$QbW zNDc6E?zE`2^|1G@q}yo>2mZ(SJm99m1`XxM^Z$TCP&t-5O5ffYd7!YBiZ4a2)0L;l zFWV73N?N0tr%m+&TE47*Snp(?jN+y&gn>_(ZH~^qT6$JFx|ivGKp1!&Fro1{n<4zc zL=t7w&{vbiDgn!Eb=w(LAvhM%Cb&g{55PVP+Zkk&{7?dz)aD}+`qE&OIEQBF+Gj}A zLu-?_WbH;~;ui>o z(6IPvkwU?_o)YX50=|su1(U0$dv4*Qd3DV@`PbRIoA=KP4?blmmJe>-Ttca8v8w>0 z1s_>3xi_5H7Nvs+Yy*pq)fVoO#ncW>Qx{~)$JM@Zo}F~ff;DaH&;s>YeE(r-LG5zv zsqT9gd>z-F=Y?j)m?r0{)$ozvNvg1$mKR;{qKX%qp_NJcZn%FdUOV<@w*?-~xK=+_ zmFyFsyOp(hyO>Hbs&1-mJIf*U%oUQv9SY$sXy@n~10BsH>INRVkIY)j^JJD0`5L4| zdT3MP#zkn`uI-L_+SiE#vaULTfHsxAsJDYF&P)?{O^A2eu zixxO|hRCOQ#P$@!8pW~hLU8Ace!5Dqy^cjH9{^gJ;U;kN(3dPQ>iPisbP_={Lu)dQ z7z-_mL>f2E3~BQ14B6A30aTu?3Q+IBOM?i%SGQW=jrrxAC4fbx^1u;z;A1?5|2z=3 z%&l@$t%kvs2oYyfV+u0ZMfvRGCT(J--b6l)nm!Tly``DJrYz$2C8qE8)nl1yxL0oC@?}$F9bS_UiDb2Xjr1VYiavpoi&)^7Uy98}JBjyktBd#=Ezl@w<&T?e$t1AE20;$br3_LAs8GlZzi)6`^ z;vTg`^5Qe{`iDz`v2?}k;auL|*(jPt6T;qT-{*}FvT1glPN8jb|1Vp$CAQHd&3jw^ zCa6_W%@qZwNhg?=A23vceIFf=YP&6(KBc#S(Otz9R2&bA?WX)Z+*DWwz|x~j7Pik5 zgp4f7-lxP}$g7dC(Y=X3_UP6ih$oE(^aE;ev_T~kFxV!=i!PAQ=u2OVrlm4+7W2sb4S_TS4r zZ!XktS_Q-Rf#5_q#h{*^7+xwtm+BZsMOkPXI4t34V(=?i7O*xy(lEjNO>==j+O)rq zbyZUV-TtjkKgQVNkQ%5Z@kstNd~%?-;SkiW@?d{K8gIRC_KSopp}Qyidn4zr59yCC zX1bxz22aa%^x>8Ua%LD?-uIq=*jCwWWbYh`NC0zB?iZ|VSyk3u@F*Uv_$kbAcs5;d zU)yej{N;Q?=1VMvU-#uL<);+NfiSSfW_r?{4pW&v^b|V=yO4#XL8qh`5CB0yzQ1N^ zi8+6MQ;Ge#205Yy>=(0=*cM;L5eEfhhz(61b!`#M9j3 zZ@Q-Tj6m;tf7{W(dV8!@nGk40f!@feJReO8CD2>=RWMtH=`Ri1&Vv(KD6SiII{Q2W zTWwP6H41++!|klzYw>1pAmMB4@KvlDy~{}RhU9s`0hRm;d#GFrZFW5t7%6HhCH6{W zLZA|wCp5pC!KpP)pmA6Z+3!?NHF_cz$)K!y{}sTcp=j6u$`jQnZ4nzxg_NQt&dApl z;L6hBY8R7)_SZ37kC#_@_=Lb&(nTGNN?|77AUe>@dkgcib&%jr6_x2wc46l^KsCLm zh54+s#6|4{y+oV4m;kLjS9T*DzY!iXv0+tnWJ72)fa=Pn%XL~0oKo=%qX5b+zJI!; zo1nNj`{+AA$P%1inV?q4rfcmd7$OoWi%ZeeQHFc5kQr%t@#;!LT*8omo*Klz0_@o> zIE~Auu!wkDYv2Sxj_o2Qh->(2KJav*N>5u6XzS0!(d50_?@~Kpp@yQUGqD(wcX=I^ z=?;6ESFPXtv`-cRym!n492xK2k+)a9xIK61@LW*Zk;FMS0UtytkfxYBu|m%DHpr)c z4HrnouO?P})m}0;3Isk{sR>m`fLNLp4iGp#+$TS8he4+7yqfZA-Z7U5Cd3N@ZW+

JRV3xoC^vdXCv`?uzfLE*Aplpv0;uVD=(sf<+Rfc~oMl1DJud%M0a05r{Dc@ipC1X+nhU~v z;`xHxt-B!=06K&rf1#Gw=mce%-!i5GnBtN6eOmzS_*#J>_AEjD*kE0LfP;VNBVvW! zjxIUtcj)>jy1vw*ch7q5%Mm$gU{4N$DL6wIO*fK)xt{acp09ggyp()44ZWAqRBcL7 zqGuMCJxJzhnVXBKkap?IJwsdM2i#BMfwt zN%29H-AkWy&>X-yrn7QOZ+I5Jj#?jn)IhpTcskTF_T+@=UC%t788VuhsHa|!v-kZW z@c=Zei*Y9;Uv z3h@PnWY85kT%cCb+S(tFFFfmGYmB7XvTW@sU$~OV^M7m&-*o&RW(MDEX=ebFZiuy` z+t6HqlZsC^LG!sHbY$Pls4j!5mBE$6M}1s|_ORi(o>VYvz*6z{a>GVFymD!?q{5;e ze{b8V-7Jj(UwCs4x(*T2Y~CAkcP36 zx^*_tZ&1HRskji+y5rtQ3-FBHi}DkZfRg*;8ecck3cNQ$Az+i>%G0~(c!Xfk2>Z${ z*83}+8a13~SXcX#bxj?RUS(rFsmb!_4SPo{k2BaPh)Wr}SBKJh)q{=*=s7ksesCXt zFjJfJ%Cfv(Y1;6m438x>0bRk&m>i+sugtWAv3BUTo=83rF~;G_K0*)VrCA9c1q!SW zd=@lsJIop@i@n{nbhaW)DiOs+FA%khEsqZwC}#K%YC&mzflX+#4ZdvB zo?wdDdzc#?+<=h0ebCN2ioVGettq-jjbNG-xc9}M)A|E$OtbP5-cDEJ@HK$hPqSqH1>i_aB`03RW_1_BioGPTwpkC`3nX|I!n3~tel^S2Mh z4nE;Vjd;RbCG^4N^i<)lJ&84)U5j0{8b2o=&e$WhFHrQ0d7zE%b!qusDxjVfUyJST zD4QK^SN`iLW6ny4;TE)TWMP{=(^Ok%Lc)ydojYAvyjtG4I(yt)3wX)Sg;Vl1)YHoEat&9V; z219DxHKyRAF$dp_*q#=BbZl=1l5Y6F}$r;RcrRT)b&oXSe_uJoBoaT@$Z zSI$PQB;n*HOGQp`gfxCp0owVfs3?Vkw>pBJpSS4r0$NLleLD<_N%=rf`d@%p#GpJF zi%f_wlc~~x$rx%}csyTVZ}{;?r`o>}d;}@0DjOo{`V(twhgHOX;D2gFYczN_>MwpLK9J_j|15D0i~uDSqcN_Z@#i6UIMwh9{hB% znx)8d68snha>P(z=khHxgKtb%k{l}}Q<icnf$u$Ssz8?3++7)fx+J zAFIJKq%4?nfVyKWN`Ll`uE)8xo1W`J-kcyIa45hRo{}uB37OB@E?WDyFs2gzglqj7 zu;^lqHY_?|Yd3D)RuM?KRb|rjbEV;yo@i5B9?o1WyUcDP89(jzjMmQ~oVY-_`rW~o zHMzhVAw}qpH{UussMHfh1{5CNAd8Cw*S;1(Yq%60Srj1%NSGEb1t79|2{$7mhB(3P- zr(o?OpdMij>X2*uIy~W2!Xgym4UTm+(unD~uktMrleQ)x;}~m${1P5h_|B*beD3A# zVZm^jsO-|#PQGu(8!JP-QGO-Gjae)6l8I|gY>M7~%CJ~>=^$TVxfrUclu3?>BLJl{ zF(JSxXss&J22m&~IZnJL)9hP~PHjy>P)>s%bd`LQBgO0p7u{?TZ*wSkIXFpUBAc7H z%Vl1LY_5^AA4PJVcE4gU4%Wv@m0$`GE0y@NI?5nJ?-IJ^#2TGUw_-$c@x1Q?1N`*( zetMEMWykF!+=NZF$$-|uscjg|;?z+FyRg((Jv9e<=`BsaBS3P&1ZFbXs z=lxDX+@9d7<`dB?xq0%_;xt)JF$+VP0XUK1hP>fDHiDQ6#>b6*^a#p=R>0obzxSNM zV3w}eEsbtTvxOr+=xdYySS_HHibN@PF-x_#uelc%&un<_&QGZ21?s;dIq6ZNc$o?a zGO2ZQc!JXL8;9+^546JT^9^I>EZ!C2`(?-O-TT~?P%#AQBFW3|{d-AU^RzQxt0256 z!^I-ZERGR5sEj*Il5>jsbJEe6Y*{5dyFq~aR5A_kUz?fI2z#_NRP!Zw~0vQ4DdzVk1aJ0|EVX*T|AQLI-DoThcBm+YL8*6v%3uer# zoj|bOU1p5Z2?g6@5?Ow7!b|8NTpRifi=`F3pD!u=;018Ve>@N_KM_#+uH^YD>C|uZ zr)oD|SbeY@`c1ew$HMtoG)iV|MkaIi^h&07nJj10tdy|~Z9Rk0aghf!IFG;dFOOEu z=h4B>zCS(dWam~4%8i!jf^X=zueH7Y(RQ1>4>~Zx8EIl~jeHy|=D2uujMxm|T(l2y zx<;AV*QQYVif`9~BIUG5^p8{Jvi8Yfe_F!vwJnPIrhk6OiZ$R=RZ2(#n4Rqq+LCe& zvPONJ{d~CS$$=yy&rY@%K<%FfOr?7PwCl zG#q!;#HScDE;u#ib(+r(olHcg#zUQ?I$KIzIafdK0xRZo47r{3*{S6NbzeNj3T?RKn>UM4X9DUkI zuw*nkJ_XZwI$MPr_v8>>OytJ?{8WYIP;J7(toZWP$S!!CTV? zGOb`rrn2{8kcO=OfWMK61MNN`5Sl=5t(yjiY!N>PL0|f&jB`ktts7HNmzr~s)9#QL z*&7p3e`*g8?S4F=Pgc!EFU+3nprY%HZcikHN;#s1N18?!!}{Xifn@j=qzI*WR`*y^ zZFI`ZfUo5J(Rn*lZk=E_*54D9{LB~>Sj6yGr{Xwc&`u+?I(&ZSR@2YL(x>w~iIJGyT3eKrYLKq3(De}4ZU3@gvbtl~RlG?)V_jECHjcs_ileP?c_mJ|U& zEftrR)eKh769p*dVsH?6EMDI%dieRph1~QsFnYUz|Fp{JC(++SE6YXA{|9cVoMSfu ze5wLk18nmV&j{vro4F>z3z&Loovg)YO)9-JvI=60$$kw*;cL-eGB4YCGx9vor}S3{)+OE=cE|M!fCop=wWj~Z8X z=9?_mLk7j&{zpxBe#<#WO^ z3i0tl2X?PCkWX@T4X;pB2)B*_{`t4}t>H>@4EM%)bP1MLW(iR;>G+lSpc(eQ#~Xsh7ZL-2`; z35}y|M_gg5Kx7ySuc!zE5b~~()kcwhET+_YKRf^UO4@z!pLi2q1uca{_f@|rG%vu( ziS^s}mJyDMIM>kAmp9TaZmHE2BdzQ)-EOLqx6Rx|&uAn%YUq^M*c6{98~*%TkG4$N zvx#g8GLu6m@-udN$UwW@VE$OQ_p$SQvnIW%EgAA$miuzE14W4}J z(z!Q;C4$B_{qjJF-u#wfw;-l4GQoSRsZ2a0Jrtp9*^@{CNVENOSVBPsj5Xh$QxC0-hInFM&NoGvb^`s1k~x$1M6zE98tQJAQG&UI$c0?}ETK3ET=)Wz^sA|(eM+9V9Gr}Dw{aCzKc-s8ch|X3xsXsBy@Z#o(#bKpIBM9+=(!j23Tt(3EBKo9 zvHuyA$PlH`ybl(>nPQmHjt2#4Cp2%K9UULgM!_c2VS65PrU16+XF74vs48!O2-m0U z8^_7klrQdV@I0l3A>X;D%n?|!nOjpg%)X{T%_F%lZPL{C(5&vlBB0_ejYLv5 zikrfBANu0!*xsvBd6Yc?uzT+Uo=qERP%1j@WX zxJ}JJ2gP108oe>?hFExJY*V$A$AS8*Lg`%dZt+;%!(?r9{-xYGFhym2r_eG(c#clD ztfgR~rNBn**@6D8S*RlmJ94b@&XgQ?dm{Pgv8xsRgPHFn2EB7Y^IdV#DK;?qSw6@s z@VsahFw60=Yd4cdrv}2S1-zqZC{YUwYn2=pb>94p$fEMx`+E4_o$2zeeQwk+OGJQz zz#CXC{Y(KLgnlPsWvr#Dky@)~);nQW43W8h-9hNaG->ao$f^z5L&2bv>!bJi7z~Mj z4fiyE1!uNinkD28juaEWY`tF(OF@z<>&CnlB0VS}owFinvixAi)got?Wk2Wt&WV02 zGWR~=$JU-~V`jI$SuxQP^Yanj$aFZ^Ui<)=oC8%=)Se|9NqX*8zSyV?mNbBywbSfw z`X`0Q%Pa*cY3q19G%{*}d9+V$B#!^LJalo4W0z}%n<(~oD85zbGFC#No@rX`-PLqb zuIl8wE>CV2PT-sgNe16Bt+RB#`rR0BKComl((mTLQ%ZlZ!l7jMMKM~9`(KMv2gbsH zi7%TRv~IQI$cV0_L*|*92vAwKX`H>TeF}h<@evCmIRLF#IvIlTg^pLD8{B8bc5|bO z#2S5+LSZ3e1t4znv@w%#I5xF6kXtg?LQ?8>EGuz9GnhDxhjmHA{jUbajIi|)siQ%d z0n5FXl=>_8cb7-z&4v%zF`4t<#rK0^{lgfvlS-DB7T5*P6`@A2Q{*=`b7%w7=WJm7VgskD#&nl}Mw`7nn{Eq^=LkycCX9wdeP$`6_8)YYRi9Ba26EuE8F#TI>q)eiGWi6d@ZhEClhC-$RZSNL$R@<_U9Y?Pwk9XFajWN4 zMJy!I(y@)Y^@nuaE(2EAy$koZ$UoNB)zALYnUhspwI#ru#g>Z8YRG>OWEi!gVv6u7 za&!OCS8t+h6}6*EFfGTQ2u=u#ZfQOXPg*Pc5o_(8lF%V|&Pcu?i}~S>>v1vT0cph4 zR)$2bh`a~vg76zgR_*D4;=+D2>BGg-!pfQ zDpcLixG}U*5bcll8XTi@F^3JlrQs!5jwpA888>!f6J=aJQ)UEnC1F2{L(MvBq&XoM zPDYaY!c4^H+Tq#Pa;X&f zOS32H{;##$jqp``qCAjkhbVxo2FRJJc%RZfN~mwri+-5L13xRZ&OMUaxV_RRh86g0 z)&0>@C)1*D6-SsDlSbO=G~pZxH>$W4wv;XLE^c8%K8g$*iXjB`)}^4MAj@>Qnh+xU zDJtOSG0c2LgQO1Lh-%h20%cpT|>tc&gS1B5iuF-Q0}Q z#q8t7>{c+w`0=PhM}Eb5l>ykVPxyR!ArE- zeQL>F-aNBs0RJ*PnzTb&XY6$5sX(J|(uU7LL zAWUZmtXQ}%j~ZkU z(mSl4mF?A5CUCybn2a_F@A)%605`A4et-YPc@*z!IKoj1ky<*EJI0;-6Y{t}ouHr* zRN4YeN#q}GRxRDzF2lrOX)(E#uva?)TLVI(!L==nm=ZXGOH+Nv#H@x>5YcL~goGh+ zD#fjhw)_<%TmJO5Za7#*C2q_Lyu&E?jI!GOs^v1%;q21VPN#*dbE~T!G&!=V;(F(^ z;6^8>g-Y8<2s89}-z%RW&M@mAxYQjg+a~{8hw42ywoFM^4Zp%Bw3II8ZmF>|2 z(YWB%zQ~;(&`VF?o}S7$ae!!=bdhh(Bzj_R=Tj+fZzGR0X)E~Sd?^%-5O-df20~v2 z7C=UnMdoYoas3;>FwcDZ)D&ys?u_nU)q)bi67tXR;H#-X66onwsub~(dpFPBQrBed zy^yT}SYn%JqJr)`kxa_m9=rBx8@CSXmp38Q#s{_uY8gNkDLNV~f%A?X10!N!bl-@x zrj>}2L4T>QdjB8bFScS8_n$U1Y_6C>O^tXdjpQI4&CF$Q3=8G=D9PIO*}ewr4s$pj z9nY!L1@#GI+*^gNjFHYx;*W4rE>1!cojQVWWIj-1b1%KcBiW|Wi4@`-h8#H<%k+Zf zz_bQ7A%`d@WV2qZFe@n0>EMSnBh`^KIBibkZf*Jf9}>$9jzr`pDt( zkCUhoZbPnf6s-q3rM3Lmwyxg=1Y#2xBnS&t?DbzdK|cB_QQ67z-hEYgmD|R&P#UKr zeO!0S?TRt&d;UT7$iEw2&$x7Vahjo4XT4`L&)GY+?c*dZc=qUY1*Qaa<)Vx*zqJsg z8iT1^D^=y@Q904e>4O)O1PAJX&`*rDQRJ*un!P8Do8vu2lxIRvp954D8I8j%I~`7} zHPwzuLun2a#7@YRq`fxB?+)uhU@!_DUc{6PV(Plh%b+2Q87K&uk>1tNu@X&$>pAKT zO~Oe`0r8sh#}%SCzGK`YJ+OY{z~z;}vf@NvQa0e)2D$!e=wt_eW`M)AeS55j&3|ka zQ+ly6)(;Rcb`;kdXg;%lS5Rldn7_usrALmLztZP2KuV**?Hrbpa(b4FGN!N`7tNn zV0VnjROiP_uvEuMhk9Jn?Hi)r;5-g0t9{mz=9;1_pH|S>YwFW%=#Or?b za1);mH=hG`lMkBWe<4Yh;W&eaKlK(JX(qj!4gh@Pf=lwc7~U+FO}zn=x3Bh69p4C! zlkJQ#=l_ps#@OdGh}X-8a*d*vBeq?V23Fw!ij_4xJVQ-NYa?cU>=252fReCx~XR!csNz z{fq_FEc@R4l)wO8RvS7SH{%FGp+JaoR4u}s#z`&9GHj_YIwY?6m=dXCE~TTDekH}^ zl#K;W;9C(jjd%;mNf{ph?pKc*;s`cm>GFOrpsotsfS|(v+TzQ3ajRE*x$3 zR=?X5nVtw8l!Ry8=3@D;E!!$%iU#?n7u}DTIwFlYC!4rlT&+Wm&yOm87ET{d8xL9& zEz8_C4dQm1=eyqR>+Cidb2WyaaFj7YQiT4Xx!bfULtGLShiJRd@(=nmMc`iKX!3T} zBXqWWUjA;yN?^V3Os?bS7Y~X`OlNLm+&dV_sGbU>uOlpVqt`Jt4eLNwEPBw+EWGUi z@6l1_KVtG*C0|`HdP1Q=Vl-i5zRU3<`au??te*eAghbBQO=nHJko-e!1)E09o=RecHn|Q{Lw+!>(8%R0*Y@$Bp{f+4tYq@e0F7dPuWrnV=^-NL zS*0iRB+v$&dx&afMbo}3x25fty$aY1W7QT@vMM~eSO{dVR)W3|Lqr^$&PwP~OfbEp zf-(KFMAoc&l>_t;4q&vkj(518GOmiI86ZYO2>pvIwctH=yD2It0+Sl;nJLG!j`BN* z*s>zLnL5+8fgjKOfd}xi(uuTvnMXB+zR!BGeqZ$&pTbX@ zK`x-@x<@s0!)r{+AKh0|Pu312(smLI*7Y)R>)Mk}5q!_yMUa1WM+YYKVns=zS|AkF zuIcWK-sqF}z>)kd0OOdl#Cre2fv8virJp?mm}U@<=hskh8r$Y=z8wT8=12wif?km z(#9Tt>^R)qcpe!YTu5RTg0APQUS;`gAxdGV(j5Rt!&h9nXS zIqp`0oHqD#@HHfP9o1w-i2KHv4#>M%@Y^Hry~}1S@>m&J&vo}t7>cC)zbChv3Zn61 zsQh6KwHF1kz)ds%u6fJ@U)jECdiWnAT?lF!o9c$yDZnClvv-ZmT>@%hp<=j~X|Ki< z9@yLLKK^nOuu#m6e)BjrpBgfciS`5onc@YT7BCRjjppDbnQij@Di7P(c@n@Vpwmp8 z>!Lx-ti64XTJ>r#P@pLbpz4R!bit%~SqNztYk^+JLG8 z1ytH8OhhclJO%Dkr%g73^9|?`wT6ut+N4vy5pHv#dgJQ38K*jsKzo!8S4hYX$qMog zW|?tL84KW|owym26mfCnIclOnW1%gxJ0!51k4~qyepyj$i5%>jNQ#_#+h<|hfBISc z{h~r$(w5JgKrD9Hc=;fW^>YA!6K4KM?Qv~s`RzSNk}<%QG*UrpWg{T$5{PSoNmtkN zLHD0e2qaO(W%t~;Gc`v#rT~@y!K<(m1MDzrS`(>YkDvlmislTz!x9Voh>iiyvr3O# zQLKOFs=zD^a8RdrXMy9I6JKVf4bRMzlim*z?mbn$7?C$3x*|DdHeoACFl5fWe57%@ zW^KmnKI40(IFQ^G`LP24b;C4|W<>0F`2KY1s-N8tv}ZpLrB?cyUQUx5z=0FjjE`v3 z^@3+?WJH+^;m;aB{2SKenI}+9Y~UKdogZ+JnwRlm%Y6rZ{b!?av7F2IXlW=V<$N?0 z{iu>nQ_pa6VmrEejHwSgPF<~U9m5t@k5J&g!uFzg*K+Q2I}gFsAcv{r9zfWv7YM4T zf`oxZOZdohjdve`g7Z+qJ#(s%zZB=fdGnqr)~BUM3S|0}0e=L(vgmC?TNu>d@m-f; zkkV3bK}@N{$N|D)mA;$%ek7jU?UV*KKh{@<_rtT{1`0HdIe0KohdjIk6sX zU;f-zOo3(IF4(^`a|V|<8~}@5*hj`X#JIxxOzTl%m-VvzsCIEs^=x?QYBwYufsOq7 zpC76Be$oI*c#$ki;zQE{t4`HU!EA#XwmfAr!7~^VtFK`Agf6)Y#ual`F7Ipz$AULmBAR(3;W(ITi0XFjJM=Q3`wY zN1iA6p(FY9xs(}92b8GwL&4T8zj}1_kKmhl-cZBFJCv{;%Y6j8aJut5?!}K3!50b( z_y5hON;6#{#3(Q{8xsBKq0#;a#&YBp*AZFxMNyOm$35$%7^Hm9bzz^aZ^e`fs;LvC zxc#y&vf%iGpJ|6-?TCQ|auT&QQt$q+zVzZHAq=QCu?;m#M2{t3M}YAyT$gW!q`Kr% z$k`_(BTGD!loIpSI-P!Ny)JuLAa*J!~| zucG2XS-`7cjbZFy^WXhNT8}AN<0!f4!{FT%k4Dz*hb7Z*R z;`}kSNy1Q^U~R+-;a^f+E1dMQQzsCg`1WlRg=z~_5qNos7A|P0VBxHakXkUVE4FZM!Vh*qn6v1El5yF&K3#?K-vh~R`P-Xwd zoiN$5dNAqbE={9}Z zpVrEVr48WH@ndiC4BJ-ld%N>AEfoJQdH_&;ZL*GeG`0CfqnIH!7mXB5-27`sq_ZSo z(LuHeX%lYQ{Qhyt85SgP;;w;Ubk}!K&(+&5*#UPtp_x+{I00fr-+V9>tBOJgtVh#? zGXL_pMDfJE6Du4Z?z}(rJLrQMpu9Guj8L735=L2-EC;PW)dGX4SpJlY{6!^#n#`GE z-Lj*hdD>cNs^8j?cUX^GJLC(&YRs)EpYjQ=mE^xPSgk=eFXk#p52N~@ynew+;rhOd zH3=5*L86{Fle9wS8FvE+K=%~GA}2NQhg`|Yys{)cFu({3{Y@QjNpxh`jk4#|@mMJ= z;toSRWb568pgl#%wRO2m%w>G*DMggqby7wGOB4~&S7UYI1Syul=AbP^DJh^m6D8|!jSbA4OC zQ*FvirD>;0Hu|0rCNKKz#GT!4{lPD)Nt&pBZf<*G&pGjm?HvcEvCLXpu=;)BiH_>t zR{K2q-C*r8Yrb85m*=K_HGHTWOO)5hi$VgFToRI)d0xh9d2KZT?hXdFI|rP9Fi*mC zj`oTx@8Q;xbl;6K-C!jRAVa+w9&y?EwJDjGdDpTr@HDr$Tc|y1Oe%ru4z=)xJnc)* zKj~18;O(6evos@QJ%1?^11c`2xrx6n^3#EnMFLGZ!^bMb=J8Riq1Fe)sv$BY`P+lS z+~y)~Uvi9|#eUA);ef%-1%1B`=H$~ITSu>eYV0~l0%=JW>E-8xdFC{_2er2u8d3u- zzK58w?r9lY$lL9o|74KW8@n#}icXH3=4Tq|n!#6E6rYU!)UFOa@&H6_Ix%CVN*3G7 zntHBbQA0`|;!Vq-=yNN`rM0wBl{|QtP~Ee5=|G_qi^spR{=e?Qt!08QaRCl!tj)+8 z>OCt%YmPhLNm>v}_eG7U2M;JAu*3sunX=C2NIC;+u4w1@qKhRU2OV}NKxFnJ^)+E- zBu)1|o~l3E0SX^Go0%H&fe_d4!AM0BvOj6x>463xJt^@+>BM*|VH$;k67LX-EY*02 zfIG@6(}T_MvXD-y4sUbzK(PdwF4te!0hq+SC(NSI;e5BxDAv2P@3$}I%Z z#r&VTIqg<$lWjWmuo(ZlJmMdzM6kqMu55q}GESnK+_TL+`SN$L%?4lVnUDxzCk+B| ztaoC6l$WQ?g*@~lKak_fS<@}6sHxr{OKmjvs8S+d?WKrxZOd@3UUhj?w7G0!C-x&o z)wgwg**u_0GQ~N83cn)=*r!t)227A_W&59pU{!*$;3-#m2k(j9@B}T%127egU zbXnB!%}beRcnuMLhNF1p66+DlO%@=$d6NZboL5ah_a(q8Jl&9W($|Kt1H&=jyzpOpV?4#!Gj38HrY+p*w-bP5(FAeKslc<;xuct_4iupwzL;u$jBK57QKc{ z75pHJ5Ob>j8$m433>7^DA{Y?^HB@!NGU`bHJ9Dp&b#fPEuAvZf;6->|#d!IL$15`-EW0y>ks0CAg9K*%>->6~M*lj}`D$z{aHptllxUl4%V8gt$!z7I=yklMu?{4J5%YtQ%>X2$jDwz%{Tp-L zvRGs6?0X!B8%Nu>hk`>H#loTa6&?GKqWk%L1gMMYefoS;A0rr!SH0(0h5w@~Y;Xqt z+5!FnMpEg`jn8>_Lr$e1R2X~p`MgT`pyI0-6WY{~MgoydesdW8@V&5bddQ8D(mF5i zjf+%LbhaV{zYo1$?H!5o$JgD;^30qJUpUSMV+#N!huwq&2=yQf$d$PS`c_bUBIy;3 zF-ZK~NA&=YQieHpf@&H?RmdTLYpc9~h=$!s75cg+ZI+w3qe5tj4_am3M+ji#zZqy%80uG>Hcf?no5R)5Zq-u^?!X)A zqQ~{+kg{zluh2@l39Mm%;Xv)PP82)RIG@E@+_^mYI61p6kTxQ-On^-ItO121OkOm; zQvay{C*Jhh-lElx?7cCK_%eaBDyQkx@&Wl~q51>f`sIWHFc&_Ox$Pe17X|%laRaTd z1t*+gD@dNZ6P4?OLR!S;9HaT_$>A_2d$lk<-7iV2& zs=;dZbDPwM$xOt|xhY|-N-j(Gv3kjL5W2c=qyPjbVibq%w6nu=f-vqRIb#~TK$B&G(RzcsngWJTp5yT z00=xt#?Vk()`sgH%GG%;xs~4Kk@a`g0z%}FmU+>XMpenv;*nmW*=we(U#b`&`1%It z29W;Ydm_Wr4E1|9rB^9QR>b#)k}ak62Z}8hU8^;?eCv0oND4S|tXDb?zY)1^!4`%iFAafh;!mYJ0%P@DIbl(nn)A^LCA;_ z&5bcdNfAj^0`&TjgnCQTZwqF?6obKy_Z9o!3Qp}g421dU!!Xc!SVXzt*HDnAlF0ct!Vq1+CjgwoeDJxKYxIpv<1C*0a0M|z z`(HN&2VZtF`O^r3Z{v^`nM8&FnA%(bjOCX6Ixx-H4M8S*3IxmtdSclZ2o;q9F;BKT z+}YQg3;dfLe8_S&Vagl{J_~dzF}Oz(E*g5<^Vy5H`*kuCAYl+Kfn_`w-g z6##HS6@fYc01E~Io;`F)|NS!8fB6jTMC~Z4W2oPNZ{0`{S*OMAp*emzR>Ny$C>vE9 z^I&UsX|Wjbkh64z{rTWOrPIfjQ@M9n)Qdb>Z4*GYhM#wL6BJSIHc1rz?9b1?XBHth zQX3t@hBOck7Ntg%5vk3XFRxIdL%gwEJ5Xwy>giXt8K#RasoFtC8< z!E}BXh*9Z?0o(Od?dz*RjUZ7YcEtwu|#VKBgpkzf02*_~+ux74gADTc!8j2pin(7Y$pqkV=2OR zxy%c-PlkA(d>1vJ?q-ZSty!l(!RYXGt#rTZ@hhP6m!KHpT81k0oUB=kl+Mr#JATLg zyzXR`5qsZdC51R#M}Gt~8_fh^Jxmy}OjTX*}5!sqDJeVNp;G4}_ zvr-MVZaA~Ar;JsnXSdh4pM=qL6rUiL2#5B?yC35(d8luf0r$uIYldK9ZB9`teSN)? zhgPoj?FAmnl7!vMT1;r4r)Er>Bzh#IBlQQzjvCs8r;4GRvA&)}L_}%`@Sig!0WUE% z$<-J0L(?{q^&%QUHDn9$?+`gqKmWN`vKLR>tkSc;P^Nu-tJM$haPT!;7MJn+y${|n zDbc<%g0O3(4%{yAip1!t30o_}69B_D&OHi;PrfuM8Mo!kW)pz|ml{4_G2+V?H?6Uu zj=3Vnc1HqCG`jl4i@>{pxD79mU6LyKV$Nav_(=NiGof-(DKnpu@A%!gjAe)1lv0W3 zuRiL=^g2g-pw-x*6ylwW73hL8xo55o7rhpf{uUQ-@eeAyMp#ixQj|G$7n)}JO-Nc2 z@A`%AbB(Efbm+T=^g^_ES50C+3lE_rwkNdooUKEjS8Fu+e)xR1mhs@j=j2*9+&NAK zmcl>kY;4BM`LHwiA7Eq`thNojmR6wo``+dRO)}tk^tHmI09Z`G=B(&)bC`Y`04YT=1WvB=7AZx`UimJ ze62Gqm4Ipdl612I{}K7(V8UF|M+*mabYj{+`h~P(nwIic{vf;_hHW!Sc#q2$z&YD) z$yh{4mLDtdCX1u__EsfE4|uPHt#?CsnQ=D0o5J7qw9dj z;lGob!WR^66TtfcS4R*AWZHUhl0tWWl|Z4*+aMQ^DiGL@33a%Bz;FhVM92;h*tFcQ z5PxTKd$#WEI5lJQM~MVXiNd7WZ~j+rFBJJ-6QA?7 zgZjq-3u<33c=Y&Kk_4VOLqLd0%}ZSOns{E#>N`xST!?9JDJI>l_tY?re6l>G%HM)3 z7?m*=a?ziai~x zilBB=@GPHiJle|Vc43;wMs@#2ds!!Eljq-E`I$GefJy}1-E32vc)F;vu{uCPG|yKa z*)h*-hFBBbM)WYDjJb_Y!1OMEqJoU0hPb2_i+yI=OuSv_=#f-^MoiHZpM794)qhb6 zj|ZO54F+z5Il5fF_lH*~G6q4&lcZv#R&S1w^_4PWn%Zfz!+8XR0<{bh^FX00-Z~_) z#JK1lY~POXEU(*%vHFIWJRoD$9_JD7YA*@C%WpuUqN|v1BKjb>ceVpj5nd@1A$#OP z65D3eW+>-o$D>r-D&Nri48N@ZJkMb%vsyPYW&yueR3a`PR!pLKm$*C6wT<8qvy5fT z;!lvEJcKihEjLKa#?Z*DCfOh@E+Ju7WExLfweBxhVjho%R=SRoYOaCWa&oyDrI(V4 zc6D$6X)?j?ECYOb$ra-OAnKICtUcgYbPifp{XNC}1}e%`Y>1=!iDRg8hH10V_jKg1 zZ74a4MimYM@QJ|98yy35f6s#io~xGvbw0oe4xeQ!G|K*1DX%;xr9%nWfEzwUG{m3wphL54ILs+h&c}GfwBa&c?Nu^p(D!M>!HtqC6 zfiMr4L^#-}Y>Wnk%GA@aT$Z%>_=3e@{>J5`CL*ONl}sApV`$VxSaH9C_1j}j`Yh6b zW71qrPUlOoQGGYdEz>^$N7|zpqU!R5KX-e2jrbBsl=vOiJy znFi&d8=T-}ByaCS+n@l<7D@BID!NXGy)#=d`X01weny99z zdKwxSu&o=hYcMWwfU=9;s%rb>k4g6oFHh^<&>0~-W_$ua{-}U7w!<$+}MnO3Afh@<^xzy#CBzDC1ao+#{2=M`)KWa(;{IdI-1$-?jj7;zy zhr0a5#Gr=!{fAGaEvADI&rHeCSQKtm+KB03$QS3FQcOrYZu?txgcVM#jT?logWE*h_BviX*eNnlByuE$sDShpd!|L zibG z9eOw)U_`IXbW{;SShM(OH)D{K75Q;M%lU&NGO>`l)0&AF^f0B0$bdw{IEp}ObZ4GC z5EnCiy5pAlLeb(S`Bl^)T%dm(&TV|m1M7U&Fn%IOrH8y9OHU< zd<5n!?fKu>*fSDUUsa8wF!4ifyVH!}`qvzTXsJkU*@=)stGeDa<;$ocaC(5jYK3p26M!P#| za)6OSErES@hX5nPXTwoM-L2I?&9TloMV}(6_G$I3Yzr7OI{pCx?BoL3F&rltID7ax z;Q@s@3e9BwkbjRN{JuVSU7L=rDUXg=!Ju3OCM;Tr9nZ@FBOJ*p+ZGR}fT$V%F8a&t zS6A4Ues2PyNO*NXdyf~~vj*R`nMg>I>CCl5i-G+%Mt9|2HJ(K%bBeWYP{A_@{mM?lxVM#`ZWX*Y6gYka20^VrNSdHTB=4w zsr;HDwybRoP(uWcaJJlefnP0;q|apxOXGNli0_fe;@5qxJb5(AQP~-0P%HVHPvOi8Mi~>r|apezSx**)}f-AI}3L^e)e*%ZN}uT&^c};!@bp2+|DI5-|PMsr0zul3>0Y2^OE;dKZTSE?o6X%5pWR8buZBEx+Gw*c;uppbv| zFI2xK7UNiQ0hyO?&_geTl2jnsq0wVI%wRy5l}ok$MzgqPb>4<9BEdcpt_1qolV>0I z2HFI;ClnmCfjoc+bGkf6YgZaeJDv*h@vM31pG=b$D=8&4KaA~(Xg@`Zv4f;{YF$o2 zG-gY*P99-YATAqIQ>E|j!mWYQ2lF%If*nxA1&K;Jy{GM2I5wGBlR)=&KR5U9RqrC;j~;I;k%HMAzU4%j!MV_3)$#a($y(j- zl;QX|df6|R1+!%eY{$ti1|Y#wB1{iwe&hTip75Z&5p4qoK?%}_T#N_%Z;G)CR}D#T zH{SFgEwz{aSYJjWYD^16jb<)~XY3tRXGObDatwaW7g3yq@I-L2S_5VkHcY0K@D3A$3n`*7|^nz>kZ;v!dTOwbyVKesFF zq@PwO)_^6s1d$N&g~S!bD)C=k!inA69dRJX!!ymQ+Uw@p)DH8C!j1Id7XF{*;Xi3XCGxZMZ4tw+ zQ-hfq41Pwr<^W>7*(D8Qt{v32T!F+(^^B^Sdd}#E5EvU$R`xvh4$hsv-FilMClRF} z8otyu`_~!Q@xD+wn7oBy;MRy>T6Z-YsRS&^1I=TdL1cRLEfSub zQYAT8BW?_nvXWbDZHJDiF~l}mY!mZydQhi6ij62$m;eI%z$@qnx?sYMmhnsQaViNq zNn0WHxwbg#7g^lBS8^dMe`6LfX3Otmz-gCBka@Y$xcs4Soru{OZa7wNC`@5`c99VN2l@m;x=tPQC|{ld6rWu$So&# zOaQqG6sOt}$&(Zn=EG2*DOkYk)pp`aqL5*kqw@TOolL^4tI2!jSj;Qk_5kgdy!USE zO(7(vpaQ)ip@!I3s~gJ$qhvF?Xf&Uq7@YIKe|y4aH=W;q9y0#pj2_rMu6NE{oY?BC zyQY|3RX>IUATMHMRzDaHVCC6aYu9;XUwpTRTgX7bLj8<26Ax0gvN?7QxQRLSc$j8y zB^n@8Ryx4}3x@)Wdda2`_@=vQooWA^GiH%{UyVZ9^RzzQ41pmf%lnG^BkQ>`FiKgg z>!GDdH|8t47e<}w8kG_tCs>d|_+4zt<`gT5jVIP?bB!=?BHv6HIL`L`Qw@2`MY(cDIWq7Y}1T_+D*rcl6;H0}@WRv#e%04wi7nj}f6L2Q{!2+#f> zt{&Rpm7?{3oH^#)?^1+}zL^94C0-a|l!678G;PREJP`5E@{s7447mkQnq9RHJ!avI zlQ*Yc7{PnO#ub^a89R!*SIp{X#w*vRX1ssLI>V~v;_8%O&VK`>V-TV`1tkt>^?ir85Zc(rMGvVyigcSAkA*s$m^NaSbYsLmkz!geGYrg&q&18I4y5ddE=wt-VrDVbOP z*Vn_KXy#DVY!}(2cQHd~R6`U_U>Y1SDk@j86iDPEhVl>+Ih5W$cHdAkAWU0M>MslE zD|e?b0I--vZU~zP>*!p;wa`hf*HSgqepn#_n?SOPKIL6?`IzPE>dkHJ#rb9clotm+ zwc$2e8lW_CRw)c*C3-zqe*!VZ1JZ2Zd7_J-xrTQ1A@y9DE|4I+0z|MvW&DLnR1V@e z>)~I0FLqDr?3$1n_Yfd?9b^D!AjlAml`SkafLlRI8kcGjuZ|hvweA6jud>2R%oaBP z(s5M88DCLfxShZvc*Y|t_^lAsHuE(-xil`oL`|^}1V*S4Nfg`TWH&+9P{?*&u89WF z(!4RTk(nzIy5ta8vQ`2@% zomkm(Ha`eI%dSC}hrIDqo(8aBGcsq-UV%sIbK2p_X<{Ok_QF#m78nTS ztC;8BJU7?7-99J!>KWJmvH>IDAvU4oydUel2xS1_;bZZ8d@)jE5A?mNalx(B=Ovi` zYO;vzS@LC*Jk{Xm^>~#Ak*%Jb`J)yMw5-sHs}6+GFh*l8& z_O^l%1!O$>)+vY5@mBnqC!3Rj7LbWN0L^3FSO2x|f(>}7=!quo?~|YG)i*|RB%5%> zxq2~1Fu8Q(Vu5qtKR2}1u4#%lkvB1ecGU@2}G?Vw>W;>@D8L)P*sNs15 z4{-tIB9dC1?g%lc?a4958!8{%AG7>GN=mO(>eGOa%0T4Zt`EzlpFsqt=v_~Z;-(Vn z2XV*2PT(DEet&aUcz^n&$!m*}PAmXLwH6Z5vHb#~zFBxt+SM=;3%g#a4Vti6XxYZg zrN)(zyP#O@6+_I+@HjZOd->M0nVS6q*RNB+sc4t%wf~f2?sf8P3$S}4V+%l?wp@J~ z*i?@XJhi?et63TBxvvD&&%PxrljH!GV6S7RjdE2Y;zPv8FFbdwnDL{!)1vEGEMVgG zhJ}+%EYXwe{-CFMu}t=}U9XuVCnNO1_D0v53wWIBCL~f~x6m6<6GLIW5Y-;DU&d(&B5P|CIV6PHKv$gyUwI6jCCFT8mp4T6NWrc?JjR&me)OwOsW z+AA*KM0N+n*Z+rtj~=jI+F+FKGOf<&dVVkZW}J(I)Gc_3&|5ye9!jw*>YF6GYi6rL z^p-oI9`~@69}9mxaBT>_1_0!2 zXv39{?f@#%SUX1iF`tQ_s?{sdDar;qss5df3#M#lZ^aScQseOHI<$xe7AbmF=m`wM zMLQU7Yx|nrpicb$5oVm=@83+Kcej}3T3K(a-s>H19T+16N>Pkgo>6S1OG+}t>P;)M z4Jn+00mv`=afq1FlLwsG<5!A)lB}xWv%GhH$T#1>^ng}o`O|A8!7sev14GLL@L#bM z_Md@O-cug|^c|OWyI(>@nWKOBhHW?Af)NCarJuzweD8G*voEP6eSf&sem7`n)s&$c zUZ|Fe!N2?E%7Psbw$hzEw$(0elGmgfMpcT;f1@O+nlZF?S zYM_r_oyn#vvX8i%XrS9rb?+~DRsYtQ92f*0{{`yFo@c44Htd*Oxx^*@$(dr5_tjH-Bt5q|DXVGp33hQ` zJ8l7>Y@dXON%N(Xu%XG=Gz!UF{c^M0CyVcp4JYnChyai*Q9+lBm=3c`kB(lleyhsz z{qzU5Uw#gQ(^ff~k#~(KPLaj9+uQ7YNj?CrAL{>+52-$3qC&UjuMJ*8Y?qjn=!>~X z27e7cHpFFm-fYdb&56ShyL3=h%>V~R0d5MNa=14I`@fE@<2$PSd@oqfU{h)rwp<61 zTl$x@d`gfSGKEBF=EWbV0X_{P(0yBbHN_TxWx*J{Y(?$9t=IMAzJX+C#bsc5nrNyP z=$b|u)^t7!?yOugjT#`vatAXe*!yt`x1>UZA8$2PbUB2^!QbGO_}t2yKDuB^$qU=N zB)l#D9Ax@zMssj+JLyHinAK)%!_-gtdvY>1p&b9-_iX~0yRMb*8>rpOkP@lH=ygJ~ zf*|U?ZN)7Gaf2Sf>lwCuFh?!R@hi9hpV#H$lPZYDG<#hTD@pJXIIm#{uvGR}Kn@@G zv>ic27#(2KivQJ3`iu@St_sz*2_ecBH_d0k7ZLwxQ$!qf? zk)~da=2;8n&^|8_@z}N?TtSbnWx;Nn6#i~c5n!;0_J@-Jx9u7YRUp!L54N96nx_%T zs>^1Cgk3TsPo*TDT$QPGBhx~x{AGU1lRckMNi4zYg@7A(5uAEWiZD72;F zi{JjJeUV*g*n9J}Dm;U{!xOdX;P(23^izcF&aH`p+D}@Lu2)tEx#SKtK}91HLo*=6 zI?Ony+gGea3#N#+<}(uEAV|rR6~;tMT~xlm^81(BvV;qFu3M4yCkg?Q?|Xbq#0y-? z+P*_&m_U#;z z$=+h#C+-h4Du;)cx705A?TZ&<4A(>{jfJEaHe3x*b}hGA#K(^^AB1$BH;3KN`*`@+ zaBscnb_w*gEzM$2tO@BZDxSO^aQriY-X-N4?D-c3sB4KI~3Z@)N~@V z4y68o#|y#X+76)mseQ)?p?XPwg`|{W5>)YO2*(`#mz<)^WC=O9V&uVOw=Qt}B z)J2r2E~}L2Jimk8YM4O%dN|O(8$8-43N@b7&AgsPNmg%>ON!4tv48T>ZY85)=f<>Q z-9lh?XIOFjm+=! zPnABUN@rZH94XgxQnT>3#H~orn{bv{({K%BD2*WNMp-iv8As%QN844|nwaZv1N|FG z_S?2y(+72Q36H9wv5PW=PFC?LVWN$rRsK_qi7hNgA@<^0 z9|D<$M1-Vp$UdahClDJQ%%!lv1_i~e^>uYQOG9)dyJH4s>u>jx!p`eZDZfC-{^+N~ zR#4LW2y@B!rX}$mp4rX^SM7nL){(*Q|AbzBq<*ywFWESz$Y0*k10-X2X<+QdQQ#tf zHQg|h4Q`yRxi4A2E)&=KB{z=Fm$zoug4`Ma?H5rVXc3*$Wb$R2{ z4eyY>?$5GwEesHdkP-g{h^{O08seOQD*v)AD;#?|ZKt#~ZM6}eQv*#FyX(;!x%imu z6U!3HA1R)A%J*5r@2;n8Wup87ZY1iD>KCYFQKi;~p$gGiQu{w?%0<0>C+lY;FFb2r z644goeOCYlIh}v#4Iz=IFTr0ha;o1s3!L@nrKVf_tIMQF1 zo2^uSbOan*>dRQOoqZrzZ_u<^0bw$r)5fM-@Ba_G6dluIrx(^~`TIMU*1Oewp+GmG z@U+Z()nAGj5qr{KE0OXs<=++u__&9z+QB`xUFk-TrKNw%*1v_PDwEQGhGXDYO&p^LLPx97>?BZT92uKcMGqT>fZouP6 zut-KuZY!(TBs$#@Mc3+v<*lBArpUpY+T?lt2fp`XMNt0TUePz#z2>#(&V2Ql{a+q} zn!V5Frg<+)Qsn<&TpN0Jumss6KFj{4`tP$V`usNn7M zsXG{pcoC<^$<*DZROS>0NP#}#AA)X9e)_O-pRa#RPxde|D6F5B=k67kUL<>gD!3o; z4&ak@IG6LCLhvuUpn8h7@du~r*(ued+iaEa9^c>K=tUuaKJOPMb}?7L{fPY+C1#=> z)U>ic{_d<3Cmy2=kYboKMLaO+qBJmIH!w_udGHxnT6N{g%9dbb;{^)3*%=8G)sw0r#NY^V0y5e#}T0Wo#R={!iZZ2p*1E zsK|E%UOy4VJ&z`e?O6)TnXfBCP*Y`ZD|1ql-c>J;rWGd5Zf&AdH{2WQb*CK40rD8> zj8=~#(a9!W?s)`atsQ$Idu{r7tx12u2#;4`H97Vm5nj??E^t|@J5f#?q8IZAe}AZX z`SB&ojPz62>g6v)Rs;-D0~YH3Kz}e_1Y;eDZ>$*wSs#|2u62G_T28zju^|9uptRlc0 z=S8FO`z_I@vM*EUxz`s1*&38o^ugPcdWW}=bmL^$E#to4=UDVC-)phzKsF*$9p)CR*xvvIXNCv zOdk|+HHVc~By)`7>Zrm`nJM2kURvTO)NQg(Yei>jkGl^FC1bg=rT*}FyIzaX;(Q4u zH1G<%FSdO$?C>4e-`?6iGTwjU5wfBBv!oGu3C?Uy9GK5GFucMsyhRX^OM0hze|k2b z1-8w~(?%E?hPou_q&j~56_-KhKW&|; z7LSYqL+Kf}anQLd+6AV0VLFv`2B15W^ zRfm@4wrD#=@b47O2cjd6_gtS1IcB zsg$QM6KphSWL%984fG^AvObi>`f-c!Y7vR`;SWb7!F3b~V(d8MoVEU$#-66N?$Py;HQ&8L93ff=< zYEfyU;gBA__3A`s6Q-^u{41AeSpx${0QfrSn#s8S;j#dIX*s|bGc&Xfu`q+Z^1Xw2 zlg2stU3LNEmf_iL)d$sQ(o)9h{YQcL%HT|!JiTHm{>^v@0$*pU~cTDJu3HQH2oK_G2(-1biZN-M3FYE z5j(?LXhiin(p9q>^g0%b;J9M=)=o248v3aA+i#9=wkYB@yZYI3k(33$@iFE+S z&VSvCG@;0Lb*3B$249gC)yTn=j4$FU%Kc*N$BRy7w) zPK2a;#XOO2$$c*Z6o*mE<=7LTiyo#ED0C=E1tu-9(xPzBuPA7@6V89LM6JsB-IFPX zdcki7MGXc@+&E>-4JR+&?g1|_kGOigJe<$|AsC1zvA|LRXydPE5L}V9W&EpOHdjn{ zv`&r11Mduke?%1{`!6G!{>yT!$6VgibRSIE0&1J?UkaR@F6I^oA)4nRs>bIRpx;$K z;prq&nyZ%{YJ|Ag4GqNrP2Q(2a3^r(54)E5a$;Dw;PsSyv-68O6v^RIknZF~(T7uE05{)Vl6iA+ z=Lj%51EX|ah2H->XDCsaPmOOT^_fP39vZF_{>Y<7!Hz)w^N5P#RS^wa+pM_A_z6i0 z68`az0wUy+p%tP0K6?KP`#)ZS_X`PNcIc4j6zY4A_Xt+eg*a68cL0D}I2EETe=B;~ zZZSsb-#HLjh_`N8DlcGY`9AlqV2^W3QGHP5S@_I$#Xhp1+aO8q5Kb!j-li4I+RMs{ zDWh#|%NqsVN_zvSPGC_J%6UqT(m{OEa$K<%=qD7y^Yn%?7}1e!$u|O7? zvzF$#aE1rzHp!Upa8unm&DuvUWP+S;I?4>=`MmJGHk)E`(==Hk~)Fo%df%TW_1f>DLQ6tNTOxDorT zW@zMfbNF5S(U()upgNH0EIB}blcp^Q6-m37dIhT`92>{?MqAnwSYAiII2r)RN^vgh z4h^9QWGO=M;CziC87Qa^v|(P?y;gQgi`=-xpR7OlcOjux%!BEcLya~bvDJ&V&7xp~ zgw4;#Jrv{*kJBrda&9fzn^=Qmh602s*PrQGYB5P9M%Vt2bS8L5EM$|1B zMfGxBW)1%<{qvQ}g5L*ja1&MgpFSw5W}NH>^sVd=i7Uz*U3#bXs4+3#lZo3WxaaB zS@DHJZwutZDC{gWKP_gh!&5Dspy}gkK}sOTnzJg`_|7{6a)pM#hekj^Am2awtWea< z%zq9YZRfHA;>KNM0XKyWIpru8q)53TVVl#F&b#z5&pi!ew26via|(V-gOhR z0aZ)4-%Z@>#k)eXbIa3Jvouk@zldQmJuocQFv4Sbbqb6Z z$z(RA46jWe3=%Yhj$Ifb1%?`db zFc`XRDZFK2cjWgz{H`-?Yo`w~epfo*3>-Qc@D2O8si3;{nDJk2Cij6ahEzl!fZk>1E_GR3?={tBRW4NB+6(Y7*ZX=7T3u)L)7CeqC~OTF^mP|5#FLyf>t< z#-EqxVfEHu#(?u*F3YCo8X|u%{^io0ua=}}@f8R2S_O2k9Fi9qo*lo5l^fM*m%2{P z%;2oz!d{etB>cO6^#D~31L$XWchjAW0q+ao!Y`5ElCH;V%-$6;D5ifXMu503Q&g`@MnlGRQ7T+dIW)W{ z6?xT53!5bw=}>HzVll|070b0bh`Cn8O4?N%t8;n9r!;2Y(p`7*ZK+i?s>Pu% zqMq9fPWeETgbPEwbf3GKCjYcFP3xZ<;-9rxh~uV8rE8qgG{_0m7GAnlNS!VOyl$F% zEg4D)*cMV=g;V#3P+7z#4y%0RhsNP~Eo8TGE0ifV!^I_oF#>Vy?Ys>`LMKc{9tFSQ=p^aXS4?Vk&{D;? z`g`j8iC!{#c~CWfc=gQlvr}p>;1AQX{X=Spi1|N8RxTHjr#X7@D#^fo0~ibT|3Lm2 zE-alVgtn~KLD;-J+YS53fkZu?S78-CNIUy{dIZixMuI*M6zT@hgR0fz2dYkr^GU{B z@;Mc{VWA(EZL%HzfTwx3sC%#ng-#^>$=Ov}YeIJ^ILBy36RQO@T8f3E7QWuQI+Pk8 zelp3)p|tcO2NlAGB6!icJA^W4GT z5gW!U?oN_PKca2bMWA3vs9#L{DXR=dttR#-?ctL}C>;f~&aA0tmk^2n7#uVg3B0Z@ zyU)=_#I4jJOH7qaUbAMCc7`a`=x|wCRt;ekf!pST4|#DKoH~gf_tso^@{X1whLr!g zPW+jsHa*l45V+)`7shVqXKo8zB*pm@F=8ZwmxNMY4|*4R)S*w)+nAm>O{0`mKjlv$g1c`S#@ll2Zu7uP!2}C1eZ*2ayiC2S&;dC{!qfb0( zOg(hoIui{uFC7!&nzUk+se;n!uFlMAkUAG2mp(YB(|>Qwlf5*mvE*S_ROV=5t$MG% zQ8eh6f@}I~KSigS)j?!>sDcn4sx_xo+)DT80kBQNTu+8&qiP&CnJnxRxhn#6|6mm_ zg7lsQ>29-cwP-eP-waQwIgfaMBu_ zJAQWxEZ!Ed(W+3O$E;`Lt69qcJrD&=Kkhf<$4dm}eS665`+;L`*SJQ27$O3#vSUCV42LcJBV7gJmks=Q-PSWZs0NYgMvu&CQ(=q;$#K0 zRuY(vFop@%B@x$MvlvPk73f(2nD;Fz0I>q~>cYG{7~#$#^KBh^C6v1o0AO^Rlo$=8 z*-sKDrWNJ;n@oE$q#Z?mV(Pb_H&2yQ?dp3#X%yfqh&zOgPcqsM47;bJM#fy4lc?sc zp2kxLf<1T}!ZaYH_AdeffPO-Frp5YA9*jjD{CLW-Ftq02rh zuyk(j+PNMC&xUkbsm6z5j}9 zpH1tH3B`pvVCJzm=9t|%oObRbc3=8_W?M5sB97xXat~I9B7^pV={VrF7{{Q;7Yp)5 zl7L~_ze!hy;1e7ptyGQUk{hxuVUaTM`@g*7zHlKWS>&KWL6$us1;W^O##Phee}?e2 zlch!_SdB6_snBVuvFeT{S0>9qhnS#^7P^O7&b<>AOX7oKmtM21>Lx|LNfEhD^6#(; zyfzm)u!tN%(WzlQ)C^4O-*JF_I@A#GrYm!hD~kS%2*|oLI)jr{DaCu*8m|%HJh@RF z5ojXqSAY3O&!bl;S~$MDpd-Ng+E`M6-zj-_RGb1-x<8hoSu?Lhglj52=}^D&2DWY}4FzP2mWMfz zt;ixEF0E=>??j4ocZ%^9<&bh4wVDyJ!@Z{szO5f|y$xnwjqyJjTSrG{mKf`BQH9Gg z1zXX7D?ZnT`oC5;yAWXyzoK3hHC5nvY62>T02VWkaWrPEp6htfZFn})=PINJJ)0C!oN(ym&dHvw?d~L>bPDC3BHd75qi_?WUNUN3 z)o^!Fe*+)&>7+xKxJjmfA;YD(l+}cp@`75BWL#wiKeBOD9|>41KW=96Zqe{7sAI-NsaJsn?CcdE^EW>o)!wR~a;^)2YE5GWe!DS;{^==HVTdP6 zYD5;F@%h7`*7Rf87QKPj^MY-iZdom_302axIl)Qk`Q;f)njYO#wXdz382Zs2)&cM1 zWvYF*PY-c`zNcE(GrH$wAT|!AIB$>C&gg@56-ND>@Wi!2YPv6fX#N`C5J4zaaQUEL z3d?FKp%0^EFbUSk==?$ZLoyRBQ$IOvK#v4ijobnE1gzbCsn6Ssx^oZ_#ctk)Ql%}h zW?Ez%rfiY=c|aJghJp`w1D-lxd-#xkRsg2=C%D?D7^=Y!QihWQSt}MIb$AKlmouLt zY!tDq3o>7~N*Sc&9lB84JEXXrZ-2_b9;wmCYHp-%HULA{InN|VgY$3?qHLxkoy+bQ zf&4LOx6jFuE1=}lBB@g`+4WAc^+}meeGuo0#cRY^dvcpUYEI9lX7sJ&>itM7uesws zteq4~uEWB;WA+2#v^xfntL+^R-c7fL0-55qluV$aHZ-0$t%6Yl2xqNJSr`XYJ3(fq zzGA>`m)0AbcS{cG2FWeNw=L$c=p*H3krzM45yYPT`gLBLiw*IIai!`Y@r85N{SLR& zeIFq=cPqLkulBH_uHoYJ#E~<#ND2?IS>xnP``L~;y5&rRq1}c+Oqwcz28cCW(-3Y_ zXIG|I)lF_o_erRwMUE^a9vQm>CpIh|pe#Z&)(g|Fz zw`B&ZCYd4#1jaDAO=B0j%!p5>2j_VKZW?wh&WESS0$=>Pb9FBnv7nFh+F^FtPL&CC z9t54CwZmsA$A(ja4E)`ZXQwH_R^mFX zb_YZ>C`yDe>ryl}rQendUrA!eO!B*@yO@I#(>*KmHqkM$L_Ga)I%znpwm?K8W!L?_FvcvAEw2 zPk;abACN(wLPZrUSNguxAP!w5LShw9@^=Hz&hQ@^!*_OdLM%G>|B1hVt0|o_E}(SV zpaB$@6=mraj<5tOTp-1B4w=3h3q5x$1N3_fM0e@KTtLOKH4%{f`wqKyn1xLIs3790 zM)`}hwa?|A$+2^~?-(RJsz@@JF{Bkz5QYc^`Kqy>2be2#8rHEqQp>u@UixgC82XMe zp$qz+KVB=+6WR$SESIo^=V5>Mx%%$-B3?bZAzKrOqm=&DPZs;x?#X-%m&7I3B+}sG zCPhbSw{e4#<3%Mf9S?=^O!Ri@UhznUyy-`B&fE4vM)sD_j?I+mDk?Scf(Lb)do3I( zE#+{NTRsnzv3RgrH5J#=Qg)wfHM(hWEXpXIsaBc@v@C}+3ZB6y>L2H?J6tct9q|#@ zLa*n{yKdReZGCL;fMguKxnKoSu5E}Gp4#);K#B88NOD%_Mm9*naGkLz(B7$N(UPzp zx<){bd-424pVvYcfteB==-jywhi|`f$38|so!-c7Xi}d(kpK%QVDe!(I;xx_i|#bM z__(nbAwt$KwJ-z5ET*zv9So-+2#<}~m5e;I6n5o@+wr#y$ey96SqalTFx!A$lP>R4 z;)WbP<`D1P0whW#u-foD4woT93vk|S;8d=L5Z`c7E_|-8W|k}}xiQVFxmn35{PiIm z-S)=37wBFkXEfXR*rv=rLjlqZ*)moIVaw&OTHzXV{rug0*giuBMOxyhG{?Q=^jN%V zQ>h8B{frH|nb;VilAH{E)iJL}*{{-H{R>-@c(wmjIHZr6>ALTT+`f$*xL&;htWg?< zg!2X>iTMBcvU8{${gGDW03%5cywls_xU(IQ+hdoBFxmpl>ksR;`~>RfiI0cORiU_{Mq9>oE_Dxjkz@*HS8L`!aG>%{@}uKl_| z6`pOZ0i@@MgK~ESXTij)*q1ht-0*l7>wW$AMr^*ng1xQ6&Gv5?(wxT3pl(YcggAW) zzUMkb2OpQygU=)4yTirMm=S6Q@)8VYqP@V8Tq#D}A*+Pk$0bK z=pIfB>dUP~rC-n#$KjZ;r9P;vv}SvBTGMbv`&;|k7?=mothR?XbY>qAhEu){swvG8 z+TlCy0~n2r2OV zvJlkty17>>?!hg*bxh`l%supV7dlz?BV^Oz5t6tkXHp#!!euhvQ=B-h9>j z$)Pt#NnTN1nY?dRPl^4+H~)~*LiIcRWKd9|(6`UQDU%2(gmQY4>ynJUt>iitkAApI z_n0$L#$xo;EZ%m=9c&AusabB;-!xyY^Ki{fpjWO(WIf!4CZsJ!(iMFik-}+CMX!6E zAO?9-;!4Tqsj30B-vTClbk~IDmhQf$d=WGGNEfEU%b8N3Q+p7VWG3i~1A4wCU2$kT zT4F_=`uN~-99s3i<)NjXd#BnMYwMmj)~4{!k@ynd>5(zvXAxi1$m1vxUygWv=!g< zK^PD?4tyL~t2WBKY$`JIPiRH%?|4j)^J|ZX-&VSVR}aX?ca{p;HyAI$txu@(Z2s0K z>Fwr%oxtlx-&4+`Wv7Ca2XejFrLLKDBsV52&kIFfPB~4h;&I(LsUNDroqIZGbAl`O z7YH|fa^-1Wl7X`ft;KTbJ{jn;i0gHaioe`3sUnS=HgX=M*pNbvxtnDFl&e2OW|{mH zS5zp!&%zVG{bvhboqm^I(f&RL#fNGFOC zig6N{x2XW6IFD+-H^3@dP)k5lE>*?gJ$<@>9@6v1wNqiYRc^>28zbZ?`&43$LMC=6 z4IB?^Pnagevu)(nlo8B%SRmn_{6pBk9VXM0cf0=6HK9m#bkQmM144KEK>4FLPsh2z zL#v5tbEvIwkSy9{J*-zY2WUX(4!55d`F^b{%VS?T%EyS)-PO74Xt^Z}tK$2fi=f5qxf9W}dXWCmif~ayT@Mqe87A7=ZvhK*PU< z31w4iX?BBy9^03%fQ6niDjLz-J{UBS9pl-)0V>c!(;L87faq|#51X8(~hTGq9O=~Y(>p?rCR&thQUiq zw8&*R>+FEBp&!ef4k->YIY!5}gH(vNe*17P8TEn;%uePzJMNj7&Ol|dK#mc0A|M<1 zb6Z8BmeNjaL_7d4iNZ(!=EjQ(3P+cQtr6mrklPfHq-D_|@)G>}SXJe(E{m>wD~cn5 zNil;{;+D5{G)=mbi3aI6TLt$zIn$OLYy@X+#O545QP0=6NpoKJfe(w;mDO>@T~9wj zAMIwopYp61RW9WQ=Se3#sx(gkY}Wx#c|t8 z>nNMO+sT6g`r3CX26FDLPvU){R3ni{dBoU!nN>QC=;V9bT`Ks^wdF6d{XBgp&JJFn;sWpqNmC?XcW$_9Uz{w)!8K1Klc*mH4ouYzCFdtLJSrSIyT@gTdiPUF~6R3)=;%s z!0eIt=+l3oAzUI)E>8ea^AOBSY zY92hN--@-{pDNp8A8a9j>=SRSLt-xCZ%6^rA=SQAC}T3Y(Hs9~wyXu!D)tV?C6Jcn zE>1y#ui$B|PVn(1PRqrTF?)d8zD#u?;BA$K^*2ay6L%V8pHo{hImy7rV$iP&vuZMu ziRw`ZT@9n4@ZQ)niwxo1d2`Snq-5)=V{0{T<+T2aIzQlv5z2@Ur`PbcaF8k=Y=V$_ z%TUl&O$GJ(fq~(Fr<>L3(@m5kEb#48k@~~tOK@SNJvE4sENLl;eoK)(4*A@pq|071 zRDV8iTNgu#TKVX|kAFH(z?-fUM<@^=6=P4x-w`IOWDW#FZ^2HX7ik70@E2h7<8D$_ zJo+<2Pdxv)i6qj9&`uVF3~dZ66SyeF6364QI?=aQ)I+rAo z&0p!XLwh82X*^wno4&K^b@s`4YEi2Gw_y#VZNyf)hrYT&WoLZ^upPv*D-Al_v{JTr zJ)ZpU4&aRUSrKkxVXofH(M+xHgF+nC|gC;m1xZA-WALhu1l65T6+BOLy(SICC88z>E_Bjxna*D+?^2 zk@E5dN}MQfXs$9Z#qyn=qL|BGC!dJ|1Aci(zP4$`nls1nV6Yd8ukB{7f~MA6Kkqx~ z0Tvl?ylE^&)60M)5BmOB4hKzkxr@&=<(w9dptBy;1R{ZESpO5@dH>!@X3CE^MN`78 z={Debd<`l|09nH#=Sue;d}np)@98Q!x(Y=cVWcUo*PGOKOUo+KEJ#*EbIJl3AtZNv zj^?#HNQnU6)p`gER}Xz+q#73$QsZK&UF-%~GcBxkVti2<2VvrkyWcXxu;HQ3MUkx@ zo>+9V;_Emocv}6K$Yf6L$U@vVo*Z&pSbSw~LaB18D0!_%+zj>dunAaZ1Xp>fjJu=nSpRdw=4`<{cZ}|l7P@P$n!_J7H zF&-PHS|z;$1Fpp%#O%0KN~M7I+$@jyWN1q+AIj2sWI`HxHDI?LDoy?0^6h2N{3pIu z+(W1|q$&r8*~ssv@j|w!q8{l~lT+(1yZ-1|-7g6V&j4z`CVa5$3Vn@=-ldzNEbpPf z694k$L5r zFSz;aI%`=45HhNi4ue$Totty|&qhJJyZhyp1D#m>GEBAFt5$N5-ExdP&P3c1b5+^I z65)b2_RQ5B(C1m;tQ(tl!q_@j-4(ofy`0j=GPDwE%5;J#&L~Z|~hza))MDfM`V*yOSsbR}z7V z@nyV?V>e=Dt~Eq<+Q*reTqZt2sqrP`NjUf3kbN&*gRvr~feFx3y2v3lvWK%gc6(16b*)$*X zolJF#fkmcvR@cS`!6y0_-`aby6=ocV`_!#}%v=$P%Wnmt4~>NzTBTA-XRwdVV`dYc z-&n5&$e_OIvWAsA_4Xy?6B32`UzqZHAxzBMS8) zboxlI)yMxBT!E7Q=soeBM)}ELoC7o0EVgrh=tU#QoyYoM$>L%Cd>o}zXXPTv(cyEg zwIQd+cd3qGmh-P^xcNTgPs(t^Qa1^F9!S8yKyP|^voPd+)5n*s4_TRb7RN_{lN!)C z9Ua_+@>YOdtiW^dDh&Kg!tUbDoJJK+oGEnZ8m;7c2%Z}gp?~~T^)eiRPiz!QDjU%$ z^!_>Tw=aJQHI-2BFdBaxFh$ra@sbTJe_3eYzPSNh(f~f22mwP|@CaaWW;`jDsOrVL z<*MFgW!;j5a}Y0M{@e>`)_9b04(^5ddol~>zt+scXlab-Bhec7^hCZy7Q{Xk|L*#K zH&otJsy9-J;Acx~ZNwX-h@%8kNF0J^e_0wur&hd<;OpTO@>pJ)x z8%iVjd;b#g_fdmz=#7D%Z>=U~*dAO(Rfu5TM!lhpU#pH@M<_EPhYw7Shn<3G@|D&A91pL2X<=IH5{>CvzSl;J{kCr091U08(r}aiBqkkh%q^-e@;F zb*=hIU!*`6v*N^S_VYb1a&^Qx{zKFswi(i)Oq8>?z-wqu$Sjs1bU%}uSI^t`YIEFAA z+4=nI7=gh*A7?r<@!fKMZGta2F!$X{W9GSpl>dzd!RCAF>tAe{vbXgatVn1d-FHrS z1)RDJvOPWg65|L&r$vRP$l~x1PCKJ>b{kn=71gN7AGgtmtU5%d_llkrflM2jLybpyJtL zDu&ftuSka`y!bs5nC`_W0yt2}TgJ+SU=;<9JRXufs)?g1MzFt%BbzgV->VU>Rac*I z`JRA11M80!?u?58c&rGsV$_NHmCH(mN)r&rNtOBp(YA~!nwxnSkB5b%-QE2K; zNP8PF7HtHHl*ni#lCCZZo@-rp1kkAPRUp$BZ)eoh>S>cD~83>`!?9~%i5s96tVnaV619hag_&sOA#{z$ooUoym)7N}e zn;SMlkjKmcFQ1Y$r3(0SQoN|1d*PKAL$tEiueg~|hNmVMdX)FUI{r}R;n6dl%YV&G ztGuYIhg#3X0A?!Nmvh&(Zg>DK+)cFaPvh0wLq>wq@w}G-=v}k}Q=wK6v$1O=7?q(82Y4b#|q)LKhmt}7o{yEx>@*y zMwbG21biC`O!9bso9qjlNX!8bx5Umt%MAcj87eEpe8wWaFy|=?-8>JV82tQxdyJty#dc7vWkKIy zLw66GOEMl~xk-K-t*wvbgbB<~6<{Sk5K92nD`dAII}HLb!Zqb%2oL);6&XzZ4tu7x zA}fe&5+i;9g^j3El+xDI5(|dtw0}UD0J7cyKr$zS0b@=Cg#zIxC$uZn8Q9 zdk})b5|Q)wg$;tm%l+6C5w+JqE!c#WFWPHPFb1cShK(JgbCXEoL3mrNj*|yS?z0u@ z*UUbpx~^dyprlN&48RmW^`%QocG_P%?tF%%>>OQDd)0mwF6mn>tN*AfIS`;n;!Sfj zD1B00HS;7-pB6em3ABhM14HlDftRI$I5F6C$oqmS7qskpat32}uAXYx)WC#^*UQ5+ z4(tyc!l{KBIM+Gq#Md%PJv#=(>~;%KduqK{T*^@>5frJBzL_gAGWityqmu;C>gLn5bhX)em$*_Squqm6jocF>PXvSpP1k^<$llgRw7gp-3sO z_(1a%DvUHy4L1w`aw=IxppsC8ao2iF5i{HXWvTuZpptW;O)4DGp!sw)9^5`_BTNuug5)rA z_+HupZ9nP+?OyhUHGBq zGnL~1gM?qtY!)YYgkpu=J|)lvP_9t3zbduMu%d`f8U$4zJ#kx;NksFkgfauY!m}`b zDIwsB>1ZNNhCs&wbnKWAV6Iuv*S)BW5qEHO*a@1S!_sNuQif zM_~XGf{uPy86l8&Y|);Eu`gpTN^?2-XCzB(mfz00SxqmDAaC$`H6hs7U~Uv#`B1AA zzgmhbzcymw&5UhYrqTgy4FZ9JG|^A3@ivd1n+h_iHc>V&)~%DkurB_Aj3hZ9XpADBS%2;F_Mqc;JPn$MEdy<+8)0R(SU9Pa8W(r_ zmI&(Gdk%XDOJ2@>5rZvO&4@F>Gd8DAe>SlTW zlhS6FBE|F-3ROMx>(sh6f?S}=-La^jOsqm&Ze-`Z>e%RaT}|2*0nv*MR`Vu@#2%8= zn@zch>^w!Urs|0f@iW@(+0GXbPv8;Bvas;mlh|8|<*@feG&6e!^q}||=xazkN#dNU zyR=dL=9|4^eW`In*SWdJN|~{O1Ua+h!#ONlcW`p^!sF-_pHVLVrwQ5Iy#ZwIg0arR zoW&?}gAzQbvyZ)Mhp*L0@*0X1SMC%0*DCp8gd`D;_yey%I#{y~*18ZmYb}!r|H5}% zr)s*!7pfdHff843vz)Q!9^hHI{D|$zg*Z=&+&ET8W#>2pfZjLHKo7Ui8KsTLuBgJcRPtL}ctSe>ehEC*g@7@}yxo3-hFhNO{3!J9gI5765602Pi8Q$7 zWunEyKO|~n(hG!gNLmg2?~1u-k^m=trMKe_0VYXEDoG@PCldiwRTU@W zn%ZZ>b&Jw`^!C$pUf%1IC|s%1jk@SjqKEAw}TfB zcC(!B*h4a6CY99HF@tRf$#AsHiyFO?S!?*x?svL4&GlZ`DXAYp!DhkZNOVq$5ftu>1@ae>;N2bA28Z*BB%^i zY%><9O5jY9WPgF~Ft08&;hpB)U>+l1zw%Q$MR1h>01Q9@o?&!GfBK-kj0p+Lgz5`F z@p1}m{~P;Dg1XfSF$nO+BO&Ib#qBsN zm?Yy+s_E?MWW!!;eW9XUQJ&Cv7{?MNC!);`5VQ;6{NXan6D(!HT$Yh*ibR@A;~q`Y z&gAT+*(3+Dneqk~d28Dp1?rabZg3LYk0ZdvWJS~6ti7M3oRVzh5kl4g$Zzwu*5$eb z&yltc5u>-;pXBzdScdHJxsf!cGHW$eEVE)vu7JLRrIcOD~zU!?J*i~foIw5 zm@dk+PzHT9_!VM#)Nh6)pZ7%STUxWG zaqDQj*WX>b2f_rU3D!EQ!B(P}jL6l>z zCM*?VcJm|m3;X;Rx~$BY&{e0b8Fc|3@@(#vfoFdo7`|ijrsgTo&w>B{2{q~_o+Hri z`AR!#R4=2FW`rODyR4lpFfNkMs&%4@ng}XK!1$XE`2jW=is6&w6iEi33!QC4*IlKB z*~josa?a?9)^Gl_@3hP*XMyIhlRYaZ=dsOP*c5cDqHg&sI2%cB7nq9%HWJbk!(y|>@roxr>nqzST`%o2u*;3rI z9O0i_`kH(t#mT%FCALJWWhjEYJDx)R>s6cpNhmm^=b1+#3`VKcO@Q8)?VxM=)f~py zttwM>Pi~KO;}^Y2GCkCYJC%kSlG?`7T8HH|&|tsH+1X&1Ua2-S1=7r)keNpv0%N{~ zCrd6ta$lqV^H{GmmrL+x44jWaoI{62iWg0|z#sN=QJrQBTv+?NIkvG&*v|&Y44YnX zbT>7sdZYQ3h#MgbQ=@7TVrKKpNXZ{K>O*U_IJqe-VC~#F`0sa=KFa>!XMs9>iVi@n zCWi_x-`&Xl@AlV0*nZt4oi)GAB(?jW*Tyo^)UQPY^FlS#Up&i)>y`85(Fn5Mjx(TG zeEYZsaFx_CNDmgE(FXt;o^TWAjZTwx!(1$Tg|3S5mXX|iMSuIu>TYi{&jENozwEVd z3Lc*&NhMQ4`ir$8j(}tI)He3((9}7QJUOMd+qEag$j|$UJ7c*JH@U0xoW9TUJnBX3 z-0o>Wd*Vl?Y^|CVx z<&7B866Mpm2x}YbdN=Vye7SGnb~ncrz~6G;Rp+hDFZK=H4{>N5d1!^!?pbFyHXh=2 z-R}+b@{wba?j9URR?hz5eJAEhe}me?QKom6{zxVrz}w1byXkluhRqy`ev?tdLK{}T zJgDDoOH-NF1Cb6)@E$_(ughsEjW3>j(hOS&EH(5%O$cWKes}kVH6;vyHbumA^{m(* z!@$MA;3{_a6J=#bP#d4U)BJYqqoEd?oGVFjGv>NIb*I^9}=& zKCoH%lPNxG?zWJT?w2Qo>4A$$+eIBR(YrqMsxWdEAxf$f7g8TzTBJl zNYfs=)--OoNBw@c@0S#d@G_XWo3$Icb>j&mJBlK1%PViui!|+Th4F;LOK!gT>?o0- zp|1cl6jmnnj_^N;Lzg&RnDQeHb2QQAsiqnJVFxh)rqi~=qlWT%91=a5C`+@R@5C6XDb^^Z>)z6G96d2&`T;W7@cj@jI zXYp7FXe%_t)vl9E$ccYqCvOZU0r|h?H6Cp4>6yw*@EHiDOh$SDj_e&#K@93 z6M*KDnofqzjnNVGzzSqk)A0*9Tm`|?1Lf54hx5QL5cIf=#v}+C+E-CE(MIr{M)x`R z5BGv?5B4`RyRyde|4BwVDHkpsURUsoy5czc(x${{{upvf z!zl#KEzyEyVVAwF{3uSoLG9KeGkHmOLyxohZ_Mw`P7BO3P3uY3v%R>4M)}|(+^>yK ze;ujk2YSKSdHkVr=SviWw7$A9PMR1$k16E8?b_vp_o|(v9&G&@>!cryHOBv{Lc&@) zQI$^aV>fL{GO-HGjw0~NhI96PCp8v)fGNj4=3>bt3-PNXc^*e6v)~vxWFDgYb><;Q z1dk@6tQyx!L}2tMFFIhhaMxYR0D_u==3Leczl~;6WJ}Mgf+}_(iRpdrPj|rD=ffyb zsVn8Pwb%|Pv#1^2TEtEob{3*26StJIB$r+C*j&8fHJCXifLiaP?lyn^`*@(1&xtks zIYt%%DQ56jX((#NOSwrnjpiWC;h%CaC1=nsSZ#v^{0vbg4fu$GWLe$Gf@lPyOd}{~ zo+!2wI0fJjf&q5Wv@F#j8YH6W`zb1bu$a3~-2bzo(Q-kJ2HbVu99Cpu0)LPbvl8p_ zZFr(XoU(|^nZu=CE0X&%)a9xM>J{mxq}12Ba-WlB@m$JBI0+UezkeS<1{P_lX8EHW zCMF!Jcu;BD|GOprCWGG$)#G51Ppc{Zd5f=Es2?;yaBG)6@@(%zseum44L<1PeK)cz zYrpbT)ZWS;eq!EV6&mkSp!c^^c;^spv=@oE=i$4__~(-2b``C91k570Jp&3$!pN7H zGPZ+?09tFX{rk4m>qDN&)X&ru^Uo=SYGtpH(p;D;!WxWS z9xjeO#&6o1c)l4=wIlk~m$T4Mz=^MCb__f@k1Y9Mt@dX*c;2-sX56cDt$#hb^sBKI zJz!yhi=bi)j2+|4VxjRvxk|RTzpp#m;yKESZVLp-6xuinwL^|R8vwhVNF)LMLKYn@ zI0_3&RvKTRnHxM$(8>V32O=&zh(VcGuj5lR94qg&bfWIG0eL0EEeICSao7J45Xtf% zP79RsBiBfY4|JkHoG)$&+uq9h-D8ea`sct`mx|H6_pX9|M)Fb4qrVTmh}!sh9=H!n z3s!|oiG^cwXh2r3YSv=|;Oc}gp(y;^neBi_#aqt0wYC^PO_UbT#lue-M0YZiCFSO^ z6#k9+%+4}Oz<^iWr+7%x2?siOhB#XK&@mCH9q-9rS`|^n#xDPn;Qo>I{oX+P*{P^c zZ&Q@W2*G3rm@Gx@X16CN^_8+_4b!VYSSiYF-_{|MnoHj=3FWFCyD~kRF#%ksonxzQ zIj5@DJ9$%)MH8YXTX)s1tl<~9@biD*lI=`Q`T)ZX-}Li~(hBPwBDHYJ|kk)QiBc*rB$i_9RNydeF+gOhu{Tqi!3K40a9x z01)~Co?~i8fAp_;t6ES1{vm5ugtK{{zqAnS=5NfO6Nw3~rv8yM?qp$Oig|122~Az1 zU+MGMIvLOE@ZJZ$tw%_VbhHc^SOXXeCwN@loYt`8*eJ#t2u%ZVrGxs`Wrq&?=NUB` zuhqT3Ue_ech&ALT6Y2gMOQOc=C_@55&MvT3J#7EQ3I}$VjuZtU)_9(+cxJzWQ`|y4d36=sd!^MO(tM7uCt^{kC_$bquJ1RL6^$O zG@-ZRr{GCwj6-g4^K2VKeo>{O;bKA5F7T~|jst9+QgFe)Vf6oiRY~gZ{!5pMJUl2M zGw%ZMH05EVYJazeDv+k_2X$dlO0>BhR4e*d;M-Xh9aVYSUs5r8`~^M%l89 z)n4|4IYgSG@Ca($cEIgA#QMwTJi}UejRnZAZ4LXZcNi{L_WU@QJ!SFr}rQJU`(S6lBw9-J_Bi1`cQD+;BLs2E`lO zkK1e?L*~Tnv-M5`ZYs(e2|5r*_Ty!Y4oDo|WA1n7;%aIjftn58|fw?4o+>hW?Q*Uwh80M>? z5Of=^5fa8Tx5Hh<-}?>FSLd?)lw@Yw<@hUPm?L0Shv-U;)6WZqOaW2nWm;8DJ!cDm z#@ikM_Oc|J(!%C_M4;XrR}(fo`eehZ*Quj;?0f6`g8SJCsXJd+?vbet*nJZqN@^Vq zzq>xjkYh63ysR9PzN6B>46YwuH0)~L|JYsU|2EcNq_N_ln-$mdX0B+xFD?)4aG=pD_IWOq#*|Uqib8gIMYG zm!&l&zG(`Ewo~HTD?BLGl~nPqJrxTNfBt5~Nn)v5;cQ1fU@IGO6_M`5SpY;!4QWZ* z0`BeND%uq8ACVZ_d(846PU1=eVJm+Sl0djkP2Ve42c-o|b@F^)G)IQp{P;%rus!CA z^Z+9@*8NW+ycdq^!a=?0(6cRtlEyIGgEF=Kjg8yM+8%^Q;ZCOMPb*W5^lAoR#$=+` zd*bQBjXgH-OonP#_k8wO8FQgw8T#J2(LR4L;9Xz2$z)Tz(f=*3)l`AOo4?#0dsSVC5tE4oB7eAIIls19p@>_6pm4eUnpFmp zJH9y}pZb|j6J2Q^qcKw?FW9_4Lp}ukLQ4r=GVgZl6Md?UA~iK1suOG$*t!Y0HFk8& z5Waf&V;w$UDuh=tx1QmXHs(EblH%$!_*>iSzy9BGEsSCQD_R{X z!CMMh>IY;w+>pAfSg*iF&4D?McEU0TsW$yU98$Z4E8nTEyg}^MtQ>Y(t&eAh`k|6t z0Xl;qbW{^uxp;nSuU5si9{rebk6APz(e9<*w#(JO0a=||MHyy8G}m0n+>qdTqOSf$ z=qnx|KXlDNLK5>mZ6R(c&#DIBy7-lWq_h1>)1>WV_f(&cSv;%rW=Y>!4@u00m00Zp zdY#SMa)AZpE_v|X9Ui-E?}UNhn!J=IKZH%%lzeH-`Wh`{$j?`q@@K(q;LR%lY{&Jf zpj^N@DqE`5tj?Ri4s0Q%`5M*8Gd1jSflKQKy>!ribQ?U<_WFMXGGdZHqdj8ju0to|0LdGz0`9 z1Ypq2`B*}UmKH*&PZ7ll2Rk270NWop;d`)}&d?5#7%M$N9Gxu<_7{x+75N2E{`qP7 zVQ(@KP8KFV8g`zpT9Wbr6IwPSSmjBr6GKnPQ-IjY*Ql~W{YR}<+E4sywcvWCCQk&v zOWx$*8Z$nLoc`~Dd}E^HFohdN;Gtrp!~M}*vaCBft2u1`WD&%g1;!D1$!m z-0FJ(CA}oRrc*2Z2`|Q@`0xr*71nST4Le$JPffEcXXd^b&$lZpa54H{&07zK%zKD*g=8M5t=+ zJ)hRySW9?3$VNA1pu~kl?#};w6gvktn$ElDhz7;dh6T+6We~JwX@II2gLZ}zB&)&oae1( z3#t~~D^xan)&-kh8z$|mG>3RG3QjeblcawIInRtfCiIv#2Z_L>a;65Mh*aM+=<}O{ z##QN|#I*GCwtvIq(J3xUQISeH_8pA?9Ym7|*3>vmLH_D!F+{;+c;>e8bkEnqf<=0J zVz^TSYPG07Lg?)wtP*T)P`U3a^|9k+)9dOLm~Dry(#rn_zymy1`Cm@;U3Emu!q$v} zcO@4riug?i@{gOk&2m2i&scVriWn zr7^0|uZ||~Iygah*+M@%zFH0w8tWWVQq`z6tKSZ6Gd|3AL$226Vj2^FB%D$j&iZJy z7{U(!dlG%zx-o-gOjc--U#4)Cuu9@#Xn{jL*>O7li6S_Flw}5882^F3nA+SFf&EhJ zC9<47>~giGNyG1u!QVwMWj0jc5e<^``w21fNYqi33#yA|&4@qYvclj53tn)gwA1$I z=@@PK+mFLo4b$tix^g?ToXYF-H2fL)zsJyY;1LshzLQIbB8_88J;7jIEr(*{W19s9p`#O&}-k-~G1 zN*a(VH|f*~U_Meqckr9~>p8KUF6zeNSh61)?6Xh2`K6O#-@wv2M9d}`j%tks5eS@o zu-O)Vn)huKan2V&mn)`djrYK0Dt?H!+xW8R{ZS-T2AOPS4h$(Cn5 z1rg|T7>Jzzn&ZFtp%*m0HE_G&y&i+yOT@0I^c2MY5*weMmaYBxYg19*sfoh)1T}s3 zui(V)CAO`(8+^G74rpG6IO^{3S9bu=0JXG#)*1@8{&YBg`4Qt~fvsVMs9s(rc@gBo#dmhLO$8y>HuM`Np)pFCFF5_$j<24{TZ&Swy1Ni! zv%)Z~IYn$4xHciK|G03f-FG_*W1xHZ$oa5~UOr*HYZc9=5LMn|u8y1dN<_wH_^004 zvGuX9NYg(qT~YL5mn==pNLJ<#O7ySZXn@Ysgf{0Q>c309rd2QuY>!2MqynH11RPHG zoIGWu$M!WNw(*r8WC-{U4o3QQp3NPZ=l0rn%4+HzxYT{p>TfSNa5-eUkN>|E zAO@~S=oBx@oetQ$pAi=|7YNva`un`B0a*bDrF-H90*@mAW&9QFol9P@oE; z2NG5~V*Sy@J^xiAQT@7OFr){TYNkyzF{;Kw3iiT3Sg z5a#l1;39?@mT|yXNi_833U)%IxY{X{Qc~g+M6{QYO(=h;RW|_GtI7~Ba!;Fnba0*u zn@!Qt5$2BaFe`h~Iu1XZgc6l}i98l{LIkJ$Z8~GA0pENWQ2}9_e}qyJ-+{Fpov{jVJZ#> z{qqFI*o9zLH!xr&CCPO4zt$h%hct^vgb*;@9^VU)b|_pE7aR?5w|3y?RPBm?_VO=I zcpd@$gAB8?#@0h_W7;-z1bE8iIQLw=NyJTc+`_F8rSaJ)fW33tX5fK$NWf;~Cn+9! zqJ^#Ng+}CsH^HXq@}sADL@9l@O>MfDKL5=PeR>lOnE(JEdO@0JNvJ_=nM?@J;E{wL zehPwzw^9H5ML;be`-|#DoGANMt^K=dBkpRzR^_A+QUOMK+o-wZjvkp^qbIO=M^@uE z?^e^t^5F566=(yq@byJs2SyEmprnVSg8m}+zLq$bY9z}VR46W=IdcJbD0yh1%SRmmh{@8xS>1z%*~d+1*(ULx_SqS-RXS*uCsCm zk^fJ{QrJ8iXEVNkxfrkS&JYnNH*AtQ?jFE9`8weJD02^Z zhm~iH*6F@n@1M4lwU!EcsYt(Z6~MLToItP6`oAlHcSpiA6Oe5+m@hOg79xEE)D#Pp z`3MmG9(OCwB~Tpa5Xlxke6I?Z>p(imNU)FuBskXS>dJBdJQHiWDK6CO0RQ1oO>%`T zE6!Cz_KC+ODA zg0t`UW-txCt5kCrL*&Mk3ZtO>AE@a-XDtMu4a*4s^cFSCtiruB$Bx>iR~JZ7OkL7X zPU^^<<0(dg7h{nFSaBLz*~K^L#{s!XDhHETuJrsR#ib{Snjv@NVH^MHJ3wV$8xk4@ zy^Q2IXZC#K9@AfsZSq#FHj(a&By=X5Qe}BC<(8$MfO9`>!a<>K{Ag;)UndrlB_^c3$63%|C>2kl~?XB~#3gnBte8nf}9Tnv6_PIclL_ zJ!f^!maPCS{>B<2)kP1S&RHeP=IP9;0gWjUGUW&~P|r`b#p#Pcli-@3KkmF|U7Os0 z_P|4-10^s5z`%EDe$RSSH)aW`Vp@qeve7>FCa3_x2)=IJ*yZ9Ie>U{)_aw0sxLctN zu)!%vZG%FI2v<}91NBUb0D>$~HM5I36XPZQS+VCTg{D6v;){+L2f#64=fJ!gOu*HY zJS6tBeQ0@vy@`6}$s?EgrK!YgE~JMdTgU5C22Nt_e~~_F;sq zrsN4k=Iwmz$168jFGOSQnqz^pcy+8ieLJ#kp|EYxAPiMn1htEDS*t`tvaWWB_B;bZ z6s$WmQ_sDkEtR!-a8CC?!fHVV5r1tI0JORs2Ep(}H{2rzGxYVjO{qlZ(Qec&=uGRp zA}7(NCa^wW{x;@$)bX(64zJNMQZM?p9nE?}K&`cC$KP^c>J`l2u_Bz{6c@ajq@bU_ z!P1eiBD%@9lQ)2rqk;RYJC^b; zD2s&QQ*JW+p<)LMcc;LO8jg76#HxvwH0%H*vG9~{)zYeCz8Bf}_An@cylbfpQI9J? zO`@^(Sb3lebs-S}2drm4c1xtRQpp4;L#uh7{&mclsbYD;e@ERHa8Mh&?HC|>h$18j zyl3udi#0zfQ&bzD1iFi13N=aLLx;|oG>0$6_20>Sf74>LMr24(0G%CBFkDi9JKOh3 z>fg362r^g<5iSE&iL$5kopmWkVjk=5Orj>vOGQ8S z18rWD{@pWX9~s_b+|W3@dE4>c{>r%gmnVZcPc#sXk0d(E2UO%pZqd%9Hx+Zq_Iq6h zW-6h=9PqYT0oQ@8qn&2_bC}Qb<@wydM#V+wdnj>D3&FV^B7Svji_D&Gr1befk3gL_7}Ak?JK$v*5ISQ8MUmkoWe!j$PoBh zWIwAcXUCcQvqK7e{tR8(Df=^8Rm3wFHfj#JtaTHqZqrw|52#m8O%Fx2z3lQ`x6X0T zVkCi%3}M#crR3rbgX>`1wFOf!U9Qu{90$*d#jID9GP2lt4&M4UlRGf&dR&Hm(@Bf+ zRrpmM;SP-D1KqnzRA|~=42u0g;=?ZCSOPp0(_-sLxurobhsSY0>a@k2ztvf)9fj;K z$OGXhl&7DcvzOnf#TU~DW0tcc~kC3PK1j0MtrATP>jeAVtp(YQ*R2t;`%h4Tr^-FBII^#tpTUgklI^=|f zmF_`yE!{i4-$HSd22(5c6IC~QO&i^J&Uz8@rRO8CE%P$JjEjC0;#kOyT&#__Q20Qj zpS$KkC8nOWz$_2XGDEXubR#L1_4VfuZN6H(4rgi!l~G}=2t#Y2;1yH8ausyB2RUR4 zw`aCl9*tg7U>IY=d-n^vQ7f$5Ak{W)2Z>b3WJ~nhaIoBd?)8jss!%ffYZk_I?9h7+ zUq-fLQm8SX7M%pgjGppR%ZV3`r|7^xg9tmNa2$q;|j7*T?YW>IozOu6+xf5=9&Ko*b@_JbapZLA`r#q6Ni;!ckZaR1DXv4DTO9-V@7eFk!4OANJOonze>2Z0+zAZ z-GyDH-VutwcjOFW{6E?uBjO5*GvJfaB3%lbFB4TkK?M|k*bMA(qm_HQO`P^_8KK0k z$m(R+(Likni_p}uY_--UppfJ!*e3=6ZaxyV!sU+vC3BS9D9JL4~c7ZnE|?Ql_R0W0|@_2AL+)ayYOWzAg=9?{x|_8NnOzZUL@^SrMP#` z)ho0nI_NvWYVPhlE)*W5Hhg^iP~&S!m@k~Lz);XV{n|y526+4%&0Gj zrflwRz(iMY2iG0Z5edW8BSN0)dA!)W2%3O!)_ylZy}f z76_YvNsJp~ML|a9#{?X{Q-a_n4JGgSs5n$z2J9c zl_xuGx9W!sqquSWn#@mi<5unYJ%1TU_@>~C2}w1I3FB4s)0*gT@LW0uP-h3NYYA~a zjjkfiSsq#_pAjQ-iPn&&&H*77)Y=@W?fb zm>z(%kuxpZw`@ivpu)bCj9SB+59$F5Xu1cj{Tw8mm$sm>N#}iCdSj+_L~QH{CD?gi zBYc7d^&!305!z2hy0Z@`yly)bdEpOo6jmPZwa?LQfQS$;KJq!(lu3Rvs+5DWY!0i4 zO7p!S%=Wdg8gVrZcII9Ijpy)1XBHv+Q0lyQGfW>#jd17x2g~*@6wyC&(u>3nmtQE$ z3kVqX`U7=bE~g{`M0&VtHL-(1yIG5 zP3GQk9!RCETh9h@2#ylYWbQX8oPHkJkkRlSdc_PA0eEjvjHw@tI8ntfDqUsN7PgDL zQ;^xq6%OuoaNK^;qg33}o(Wf}Ro>gF#=tGu5$`o=FC7Nj7Io~H0QmB*tQv}5-IYU~ z9Cz4&MmvH&qr+G3Lcf+vPt64M_5Y&^!bJ9E>pdYS`%R$S|5AhI-4CV8mGM&MoqX@q z_Y$fwfH5&Vq1E2Dy)@r-)Kph~f%qg<3! z#q_Eyc%L_P-_YS8C6OR}t(os3`Uaiu-p|yT=2#EvK8ru#R{v{v29)o6g}zpHwG?{& zgS>Vm5pMK1jq!Xw5h=LZ`xk3188 zH`PjSuV3UP6t{oM(A)isZrlDu1Iz~$o~D9!*xJ|&4q0{mXFO~DA)h;F;E#GcpEc*% zi}W{yuEA$(Es;GU3qsJ8dAlOof@dOn<%bQKag42y<{5SGp*|4-sudj{XbA5~*8BuQ zd_|=@Oh=n&nzM00{8vAm&&3}5Bl9CCzFATz@>nsj1s9d-zQRC`9vs(~YiB_(6)#t5V?LC) zdZe$-bpA{RkgK>ge@{D2=Yf!!O4DAt6vX|2b-RgctZgmgB4tE4mF{65F7Qz~kL2GT z=|)39!Rk^%F&&OIIv)0h4y}*Crxi+}x}qhMWXfk#=~(q1LN2cQtO%NGnNmJ)y3Upd z`Z$!9;WotGqcLYoGbPYVhdm(KgzaOsppG|BqkNih-&0biv3{yh9;b`L6rrK+^VMa+ z18zA3l!c=suKTnC+L0*j_SZV&gP&`rQGyG{Zb^&>l$sME)AMSv-ZAr;D3=Et~H{ z6OLDNysD8lqLFcnQCqii6%dfom`oo5!X01}b!?2Fp%@EA1IrWdLnx<`);CJjKal<2Z-(ya!?R?XKyZxq0064g zwsp;SqT)oa+siREcT03f)K3wV4My`mqR?qmiFX-?Mu5g1J}6ZH&eB}4p19RU$<RP1{K+^ibur5*Ex~9~iyDyB z>U#-~&ia9WHz_FXRl zi<%a#A*d+uQ3B`Y zW6YiQw{&6)MX~%IlTD2^A`Co{_2B173qmkNjjC$9NO{A<8Me$GZ1-2XJ^rt*WcK8) z&)5?Ky_~pJjg|xBO21KBvm97fxVb!i7Y5CmVmU884nP3psf;X0!xm8Wat%%eOW*b# z8<<8H4JpnB#wI8eWfa0N|k&qWk;UxadtXU5X-r%_|fo98Nz^_2#!QSYe=&SeK z41{Q7M$TN)pM6(nW`a`K|G_?B({ehI1#b(W;SnT+PE;d^x%Dc>%FDS_T?A>A$xNg_ zpLn7pq7O8A0M+E_fz4<|nxQQ3gwWi|mqH(t2dFzUrN41`8XI4Matl?XkR04RGdI^2 z5@Dv=7+U#xUZh2aexo1LGzPh56H*wOoqD+1);)@(SnabirABP6F`V5t;;Zkitrz$f z^fH}VL|$%dUiLh!+c)49&)^(H>a3dj%|c-Fop5ilJpvmzz!{e6-}8gF5i-@NV5vCh z-s?JmXW+eI7;(?cqq|Vp<#aD(RbZmP+!fp_AjWAPulWd4%D;%Z{XvR0a-_{ov}$30 zPZGN@h49?nsuhY@R2n4T@a;ul0Kv$f-0-;NvW8L(&*H9mjqhl?;hEYJRNtS=Nl z-0lwVRZ(S zJz`n%z<&OiRL)dE7})uc7VI5|$M%>t5Bp%?wJ@L%r7KV|CZac&hPE%c3elfd{#DS+ zJ$-I!FFP)ydM0IX9dq66Vk`2>ZWdnGqf5`YO}@zTfs6mpN8O@zY`J-+Bu(gplgbLh z3WoXV{1u8r?tDFnUHb&5YBs3=zu!avHZqI6$COdV;|D0$y5G4Q&Va((*|-B18IgJ~ z@NF~$il3rMBL#y+K%k_jMvoJm)-=%f`ZhtCXCL~^>#n!>g5~l(#XqajT11gfjs-9oV)3bq3f;)Nf9N1BU?7LkQLDZz_@0G!it@6jb#fPB^A#Do@SuCeEe)QXt$oiN0m1 z4kQ(e)^>H8Q){E61c0Q4w+oXSh_K-)&73y31o^9BD!MTA7r*XnCP=-NK) z^S)r9tB+}lv=6qY^#xYspRURk-B*%au+|+*rps?tpfA$VzFI^yYFB5ix213!H$s4D z{cl{1(77)E2CB zVAlh(?IxiQHJ+b39hH1_52fv!c{p>ok83HnoJ`9dh5>}eaj-jsP%#@WpCsW&qzq!j zJKHy-0Ho%}8a;q*M?BdV;eJ@<`6I<-#-OO$WhVyZL0TY&oUX`$Qs|U13wn|22`e6T zvX|~bvRreLquBy7kLJ44g1*A&QZlcR1wi9XrdX4i!dX`he{0`kDL33NZ0= z%q;r%lPYB9;S9z~6{BT#x3L88f;7)=(9!?N%3{4F4>ky)$%sDYW+9x4tlW=1e8{7G z0;PeD&#h~97iwQ_GjF-d9?p3|_r2vMyXeLm%6j5sRS@JbETWvs1S#?(%+o+niifuc zLH=>*C^NB^QGdA=SIJ2#$hN3vb{3g5afn*zP(t0T zgx=u$NGeHGz_gi{WvFJ#t+V=R5}oxi&49t%VpSg5sEL}t z=~Ad0Zg!q7GwLVT1VCHb2O6w4VsFYeFPvEGXX`87TiWh(25QgOSGr9r`}ax{HfH#k z6%&uZ@$ybqAd_>-2ZZDa?{uzs7t{>BF5|nD7RRtXjDtXxdJz&jI^`kcQN44bwdBH# z0u|U?ywl@sDHT1W>87j=Aa0BC#;4V3GgUg+#a=5F_L3z}^7USoRBN17NR%Z?2hq1% z-0-y6o$Z!mMA~_K^7Q}hPkE1xuoG|eQ4)~V-=0XE8c+HK)uiiWKvBEH!zc;P5iGmG z_yF<2r+!Cg>WW*9*l2oXH;c(SPE%tp)2SRK*ZW#6(&HbOy=VVOV>P+t?lTrn*>YzY z*Rm9k*Q<5Yx|&|~2-6>tL8&@u*ibi(%-*oP>z$PhzL%I4XOfrVl^AZFPmG`0aWq4= z=RJ-PwNlv_b1qHf+c!DgACYp=_xUlIcAmFTu7Kufa>P{=e{16?=77)jZQOW z@`YlunasQHD7QK|h5`TPVK&tz6n*P!kAgQJIkiELY+;y|KXOpeJ`oRtdJ*ClrD?wk zf>!B!AW16-fjERI!y58y93{RBWI}`bEObb0B?=lO8`{1{O%Tm7%?+zKKQI3uS_8ma z2^Y~^tDM18zpzeO>xPBQKbiMq$|0T&00lK=AjxG$Wsv!N$iM9`;8VB6Vn|LuFu#Of zbRSyRNlCPAzXsP6|AInJ)Az*PodN_K3hDXpxblxEQU%*UODYbPDQYzBuY%sCBl8!D zQaADUkFg$lhocfT7-4XwF^f8r$7DcGw9KN6ADY?F&;!yTmTmCV9hk?uZlVEu&?v3< zh<{7?`8@#Aa$&TQNIyA8c5>JeLJku17S1raVZnArMLqL?v=3*&000z^L7s(06)b1| z4Ku8Gwj68r8^DG#aK&Ph9Tej0yBvPse7(q8W#g4E&=E@F&cAD(lEV9B8(ss}Kemb_hCL6SBat9N4f}W8z=0*at7@cytitT}Yqn<~2usepZ9SFth#l!IEQ|{I2AC zHY!Bb>T>vHKN^vx)j%of@0736%6gP;=L@F}M3v*m3_XmmG~D&Qr6JWGYo!$-cwlSN zlclGi_5qDJ%Qe}qQm7Di6=hAJ{v?K< za%3(*wh>=Y$)?ict+|kG_dR??8M2wkwI+uG`uvzH;>_KCgwh~P1U{|l!y74j-r1K! zILT5P>k_EoEXHWyTHd;;#FCJsBhe*t@>Ov?`w>Ixv<}UZa_!j^I9Q574G!W7aylD z^}g1eEN|e!#khyfYLxES3)#t4S;V^tA^{|CE7D0EKyBHKpd1j4Yq%>HAt%bMC@jC6 z_N9B}K7B`yHvZ&e?Y?q?orAH#*Qn6a-lG1h71;uoS}8`s3gwNY4hLi<1{QaqdOJ9Z zVIT_OcD`m#5W3+yT=~p>^|b6mJgaIv*;U&w+k-EQbga=CC4!(MSs zQ4Qkpl}z^smIZ}T7XATiidT{ik9ZSnISF3Fv>IC9#9?G5NXh$jN{3B|@j%?O?9x0-9uA&Xd+^WhkQWc8xFLfKTU+_FUIr-Pz5= z`Va5HF*!UWtO6^y!}h_RN_4btt>B}$Rqg3Ske+rS3eO3f*__LzjW8KS<3QPftT+~b zh6L)c3_~1H@0AdYwci2~Bhf?dz{GWp>CBC-myUmapMD3O+O5lMGK$=tFUP5uoD`xc zXnp9jnGGlYkcf1Y`WmJurgSN%;PY>W!H!|w&kg;s(neXkYQ?pO?jn;$RX`FqnD9}p zcut50ySaj@xig4szoI6aHC-6HPmI!S1a_ICVGVI^aRI8aCS+drRt}-gr_^X@lyltn zfeP=L&hvJ!Pto(6#SV{(mAD;-4HWYh{^E&~?c>0VEv1atKmHJNW~5+*f(EzA&r9$2 z=G#|+hbQy+Rz%X{ZcoC$Q@!mc+1ZmtGM`DR13m@C4EVCK$PXI{Y3>$=R4@l_r1)j0 zKFO;(7s=2F@Y07PVJgWLT-&r$S;_%jYS81=i5a-81tGBWiFrBI*?czNQ^C_KQZ(4> z#;A43v4krrQV&egQRUFzskx;H(4N>eIKkBzpzN}g&GVUK{mjlHD+0-;kI5Aq{YIF} zIGLrQe~>YJ@Plf_D#DEBnB22zJ@VO_4HlW~y}kEu0;!AW>*(TPym&b2E_7k;(PhzB z&A}iOikobp7Aw~`7_{zR<;F#=N^&w|^U#$sJa zgzbt?E8=R9bMafw^iODK&2LBM|5HAzARvxQZw?CCbYaM!F1-n!%slY*9KM^I!D0fQ z&&0^SewLr3*ZPhQ=(tIlAcJ_sf!-oE&olle5Xh%|dTQG>IY!))+A#TwV{k@q8v7PY zVygW@qhm~9aS-RZeSv;$Zm!Hz7H!3Uh|s-g*|xlnU$Ba&ttG84 zt{B7>kY6jnnjx}B^I~Ac=;-;$1M)@sEe!_$u_fela8m!+ZJ_)-_H+^`iV$oSkM*LV zP5Q#(2m_6)R5Bsbs3F!gigLmrd@wBIY{au>Sg{+2<;M3lmINS@Z>qV^=N(QgSZi%- z?niGCa>hJoSM?gm6T!o$Fs!hlSpKF4T!`{c%h^&IRlz@)6F9>HyU0wADR>vzX5`|v z;oh}u#W{^t5OSQRqYT{gfQvZDM2tTXU*XkM#ddiTfZ5;>pxTS7p9ugX_#$aWHc&Ms zbjJ3Jjc!UK$bz2&vXjo`eiI`P5hTf+3whI>uA}JBKA`S&4F-%SSifZfOW-`%c7br!2 zvj;_a-wk@Blz~C<-e1%>Pr*@+WR(+Z<;vWp>Rlg%R@4HEgDj?zA?#;j)$4=l)Vi3a z| zE0?xcd-1?vRaa9@y%Z_RdE#1~pX1m+9%F2(5#}SrHDajy62Gw$u`HVrrY3F+m)M=# zGsH3YYJ?2FrtDTiZ=?$>8c{CHQg-WnXJz$erMyUsjjBoG2Ji~YRapXBeC2GA^L2@| zdgX!lb`{B%uG`RPz#OXdZDBN0|2^;z40$ZcBY4k#@)5iN)}hQ4|*K zddK5%wSM)53t@iHBps}vwIUTo=Kys($r>Co{l3sDxu-rG6$rlm9s@CZO`=RuLZ!}a zv(`H3!4P3vfR$Uo0KfZyqt-*tENdH@df}58gjGFcb}Kl*JsssydAIR zDP;qXH28G-&kvWL>21(js&A{3EP>(}$3C$Dm|6Vv>d16(1B+cwzDOL=?4I3&zk_-J z%WsM8!ROt%f4WEox3GliaAS=Huw9-3OroCo76Du1sYp-Ty%0uDN@tWcXE`>097({o zlp!C&tde@=r=ZHxPaflGSa&fQcy0%(rntLixLt>PU?&}AkY}^%p1$Vt5`UxUh%j^b zttIEgUXMJ33dmE<=;r+P)fB#a)gwTy14&aXikq@UMQ9E$YDi3im?j2z8Y&_g0v z<+#Rur%jaoQjx+vbOQE_YwmiC$r`?Q8=BXZ%4PFh496cK+ znt5TPo?nk(sNfvN#aMXeP#ZFK6)euE*<>g2Qtmy=kE`Ury!k|k$3@2A8pyrZa_rg2 zQm#XIw_Vzlx3e4vK?_b3km+3Zt7^r|;;c6p@M740LDLvV&O2wc2q{ zs~#Tk^KpgC^}+Wb2z<+mHHac<20IU&2U{p`GCpc;uTO;_jjr?O3{}W{>2P$;u-9s9 znG;B#SR3wIr_CuA*n9Aat#Y#9P6u4ENB?^+0T%!Odgei9@jv-C89AVZclk3)x(H7g zP^DR;gWd+qnG7y=(&TVGzY!{IaJT3cq{l${=Z=2ozLy+4iMQM&DUSvx)*yv6kior$ z{j-}&+#KSazgHbDXuLIJ3y%x^BuRzB{(8KqJ64kSwFO@ZOd?4U^wCggu3yQkR9Ar~ zNpOmh3YfL0jev;^eC4qVVaAzk)cYr?=GTgx@w`%IrNWxKZ|sZ*SN6oz-tM<62Ev&P zU}ZWX$f0Ms?zJj=-S?S{DqmC_*$#AkgHGHQTi!@kdU7^AiGxt_HWyv^q|`_#hHa-9 z6I`Rew!UUvB@#acfS(YWK|KINxac6vVhl8akO+&gr?^Hcrzu>=P5#!{X|;@7_J3|O=C=)9=bAbdikmyB_l!kY9JhN(^&5o*oJ(A zi%kUBg4MbBf}6#r$;SatirJs?JSwg=P(GU?Wuj^fbk!$2hg}Fk5i=;p*ZTxQ8KTYD z`G7B3j^uA-iskU&01LMsc0OcLP5$2g%#MNWXYBKO43K(3;EE%&{P&A3aQru$S1FZth%7G96YKavN}z_+;E zpTp?HYpa9@1S-sSkj8o#@VV_!gwt|$1C*mYNRaTH8Bbe@k0^qQ^8*iN4_T#9TZ;yM zBa#c*r44V!m*lQq4zl*>2d`4VGNU0Y!=#;xI1=5iC;mQpW&2ypzEh?VKGhmG9#@N_ z6$GW(9T|OkTitZhG1~EJkw!qdS_HLT*Bv=S=$KA4cid@5Tl*TsIdX`$4@)}o{D{~O zli!HCj59l*kP;uUVd|)u+5?ezTXyySpLWr9#Cya=Y;qSKAFqMGizt@thQ$E3`w&5` zid@Xwcw^IjuJ{rB(VACg3PvydV|Im~uF+7)c|k?D(J!9YXQ|SnwRH(jOy;Bl#DZJ` zxf(l{)vHkXuzF8k`~$mCR*>@YKmQ9RIo$_1tMOGxH^$*R^dV4m?PvYS8;%mbLO-i^ z^zfQVRi`+kwf223_9C%N1ND6GoJwmtMjyyifzm1~U+(&nA^cpd=v?)%XsBV^MEvZm zH?Z(p)V8bjxpkSKoZf%XT4EiPZm=x@W<>N>I$-Fhgz=}utU{b#YXDJPNIM}UsIIghqN5>4sj24XoU}FjgQrnt2%VnF_3hQ#9U*#@aTvVzN?+02y!OINg}Tn@B#( zVTJ{dgobZ+Ud7S=Mj-`~iqLEXYe+G2vk( zHlA(CArh6EWBcNks^jTv#lay87E{0wikJWwdFw5Jz(GGo@YS)H);7I)Gzo5YCL4Is zdQ5kX@kX+t;lnQLJUD(=?>=Fm>_34CJ|>Hl;Id4awde45gF;Hh#`|^k-t;f}lI(i6 z86xTA=I_eI?RBf$y@?w_0I{dJYjNxe4F*-9_h3K>&NBPeI`N8e#U(ACmOWyZuFvM2 zwAZR>$<^LbRuYVrVW+ZJ?*e~iJP!@yT!N)zc%+iA{y5r=|4<`(0G$--gU}y&0m3#{ z{d@-MBDGC6=0tb3?+@ZwGEfZnlOQ=vh z9q2&gdy1BB5e6k9uK{>?T~Zo z>Wal@2v_PIETyUlQbAvK>Z@(%U9T4&xE#L}HQO5@9{p4EctskSKxxL{=+4pFSrTej zJWWy@75?;;jD<+(UxJ{2*UNnm3=)3Z4I@BCQXWp09uX z@e7_&cP@PwzI3oaFl2#E!O_JCM7C>Z0QQYC;iDOCGXF#Jh)FXw8=GLiyrClX+ehg1 z58Y106Q?@Ih)}onTQ3_-!<$<>LE>3ul++&lCtH&0)a~G5;DVSrM0f0!p^|+{wCx02(l%9g$I8}Yo@}%WhOV)u_?$x|3gXq zw16`}ANz2C`O>wwZv*fz{g&$xeZouNyp2+fDIxQC#eVxS6284zPyvzYNrMW9ptRGu z?3t)xR?LXhqbNR6%Do((LgVJE34pDE#%DUL3eM1XfX^1}O=UNfZx;^oSW&@YecDFH zw8v)O3Q+xHXJpB?);o z`EHODP@CjcZu_^>A}AiX_F*I1+{hy%fbukv_~^m))qM6;rS_xnO9xDqqLy)`rw9_2 zm3WO}XMvg@WOPXXi-@6wF?k3)H?rURmZ1So#WzJ;Rr-^&9V)cyk7sEOmU^M?==8aS zOS>WyL4>oxN0C^k%W$>1aA0JWldH|y%s%B$=yvNdMrW%i#r0ce;jeJ2Y#C^&!~)qK z6z8Y!>`Re-%15*A+-JB53a-Oey+j>Xkmfn!sNpd#F0H`b z`%EA$;#Q$xEf#-^rhUeSg-xpEJeZxUs)!tOS|CL zn37z^LPj!Ul??4TQs?HgvaF;xEXtPd52}hC-hcuJ_tt0;sWBzio8{ zwWz6D4jp?#Pyav0g<5ML{30d5iN1TZ9 zFp*ZmDCF3Dm~#Lpqqb%)k>sAxL$2$NV<6C;S6F;=>nuRX|5BPqT$Fj?E%;Yii7sXi zY2j;ZB;?3HX!MaP#f{d~=Xu$Gm!7kw5ARE0O$c=^AcrRHgOhaDq~-bFcJl=hq}mtY zMU2SFk2AwepiC51=yPvKLELMahV9#K^6|DPD&Iiv-hvU?36lU7j;V8XZ~5txLNLv& z(Td_Q7e76f_W6g}{Qu!d1mJ=so>9#_e~*Qn%f-Z<3-SOfUmV2F=Zk~|`p45S!Cmw{ zO52@XJjN&=@ncX-z7}tJ83P7B_`1rXz>Z7PaeSHnB4K;6NA*aJwt+WojkEqe@6QI) z+Ga@i1!818JFv&OuuZx41aTK)!Lb+;dCg#6`s1d0%qzlX44ezQ5a=rQ5z;GhH=};q zk;3k1-`W8z%_1?lp*%f&dhK3o42<<0#z5)o9<&We-!jFJS0vf`ys}C2kO+15opR8s zJ`2QtiYj`1QIc5qr>JM53RO_>aE_MZC9@%SSl!&CgF2m(<5mcmTqK$RlGVy4EzBo` z{O*amz(S<@VdiGJeh>=tGw;>T5GCOjiW&~KCT(n4hR6vIv8v$= z{;2o6zQefR|r;GH@jqQn-ot0P*?jt0fd&W3jv( zGiJ%4SRN+lI-4XQxN)F|7^mdkywKB&nu{p?`*Xke6M1{w`E5DU@)tnWM@zv*aX=mG zn`Y*`Hu}fJOU_PzU7KAu1=MkK66G!%pub(|4N)B^90aCdL1s2z z!s{ilbO=>iZ~<6#>33}S?fJJpc5mY%aIG*D0fi`$59B~G#tPAOVrQIL2YjNcJ?n!# zlyq*E8xQF`95e%JIIL>D!<= z87}QWHdZK>U3E1{<)PJ`IqmXQxE4qHuHHH zh=WEO^6($)ghZjKVFb4zo_OO3D;0x~S-qmXE4|q)>eYJon+pZ2f#WbOPYlHsp0USN zXWSso#)nl)kDE!VOm-rJy!{M$&6J5kcga&U4F>W%8VutQ-^*VrX_o0gKxeaTW6JWs zxb7IVx)2vwIJz0IuT#a+yF-(Q@j=5C1F;e=E#PsVMEA$IbPn11^zg*kZ(BHqtC zfG6iNa%!QTEB2B`=j~&qorIE5trV`dg;-t^%?=F5#Wx7?Pmk_7Xx*A02u_bn36*FK z34KG2WI_KlA)20Ee>GRvTpYDNZxf3jJue&SSP7)tt_|>o+VxFu%>D)-!o*7fDIT)m@S0 z{je%O5~F2(2)W$!B8-&U>;+aSnt|NMW`6wn_q7S3^e$f{H>w#BTT%a9il8>&-3 z+eJbE>}2~tCX`i?Z-nnZ3n)hH z+Dfn`_MP+nZ*YHbO=DA=U@n94nhF>0otR+QWf6IVGN!`1I3?5WP2p&g=19KbKPKQFDj(FK7J$Qyml*Mwc+$(%X>cuGXrmf; zD|eUwUl)ZM2j#H?h?bL1FA(kie8F+BhGf4#{vWZKadur=ow_>Mu(oYC`0a#_$>73C z&4?Yxt7D^IriKg1 zU7)KHs}bIbWz}!6)2z2P2)escg9j^#73n#WN4m4qefc)-%EaO)7`?_vjMe)8Z?r9c= zfiG`rY;^nbXDzr6<|A(0GHMJP=;@PQRi;KnU7{KEzVEAQUiu^~U}83{au?h*uTzVj zWo%v+{_=LU$Q>i{oOGPWrcBfJYTyV`bnaM9x2}^b8;M0N#OCwqb)Su@i3F8NLYUeB z#UpCWN+;u;y$I8DIxP{r|k_)Uvf> z^Vq+GAQ;;#WKvsY?ehClDK{eT&z{pz&@m)ZTrI0)8VMcx+|L0ne8|B0MS%|!gmnM& zyyW->41@?7aM*z8EpiTtF*Bk_P=C|HCUReg0R9(Ap7zWrDbc{G`x6DNi`(8;5y5$_ z0GK~y_%Ik57Qhdm9tr|=bCq;_Y8~^Zw#e~2^co7kuFIC^eciENAs9?*FS5>5_l03i zXQvF=5wOiQ8%a@^5++`qxdnOqE~cFX^JTwbk|k?;9bgiP>BzR8Ke4|DQ^37r#2FC( zJe1Hknya@bgS=(s>+m;wA9rT)(*d+{^rccA0%Z$7Mq3}G;eD7!(rwXCj;C5$WEDS- zO`^QJ2WwUrXfY%5Zuj(Oj5L1?gY9h;FN78O`YQ62%QwS41^Y5hHG?24+pVo0bP0(5 zM+mB1sR`{QeDtO3Fd(iEX*-ic{LKITYh2lyHjq~rw&eWd>FI#J)> z-OnUuiU6=;iv{50tl25wmaDJj4<1P8m@#XKMzvKRvcp%isI?Ka{h{1XuDw_<0bft; zd|}CF6_|aXJv?Gf1dg9LpSNPfQ5X4l0d5?3gufB3BK;u?{9ra#QF$BO%6uXGX- zw?Lm*eHVN^1pZvFyl^eA_>Y>53Fo+XF`ds=pLx?pIy)Xyh~R(&Ki8eLNP%f|D=I7l zn!kQdFFgP=wzjAsWWf;N)K@EL_De+P%g={mp;=HkZ`hoU)bgN+q?6h6t_^6;yo@*U z3_@vWD{Ft@ABe~Q#f!u1AHe~W(mBB>hTNR|i6S6ti>|d(o10I?j=G~uf_*P=fl4*5 zA)yoBFoD_@^O@I)H^S6W7Gu-(b36S}umqK_W< z?LIr=vpDyYC^ILpD1h`<(a{W0QZ{Z%>jfx&;pF#0aV5&4Q4W9KGZfGLmBR7&O|+O# zv-P%?Q*3!0;kD4T*bf!Jcx8=O(hG+!!nFYzJP@$fw9832=)q5L@}qAJRDb2^dM}za zU2Jzm(K!swgJ~S2*5a*N;w_AO%U{dG9dtw04Hr#1$2+OLpesA&mASUOP+CXo)0Qo5 z6VJU2GGOou0m`$vIZlgv;9eo{t@YNnDMuK>{3erwygeBJ@q+js*Xx(X^NHE4+%V{i1JOfh7s@8i8SwZ_XvYWJkqy=xpa=Q?Elb87WO^j*!W ze+4XxeHH=DT7(IDvD~XI%hADp4gT0>Y(CdoE9?gIQBi}@Zp54f?pFX}bmkA^kLTNv zxH>x#%W~^XjH}mF-h`40t~_K^^&LVF0s>U&obq0CN-EllX=9tBsh#TiTBLTyJT>7B zCr6VZpXvuUywZairWJjMhzs00RJy=Osv+gwF*N%|9GA};OQ=^y zS+Wf=0`ZXH5GKTW1Q1 z3?v^cieysd7^nnIb>Rd-aM3f1FVgQh3GX>6$eSNrEEG;x@LJq6Y}M7gBe~MIVRFD5 zKya}gC_OP|b2^>3_s1Fia}%}G?ZNl%SceSHMp1Nvw}(9t;<9sQ&Qazt8VX8*& zl9Qv`^uD19=PmJx{$d3D6e)iWA2MpI7^8|{%KN9)(BMJZ_x5SpyF($vdH6q`c&PwH zG_9Qn3*A+xgY}#jV3MTFb}V0pie#2NvyFCsJETm3HBj8h`56GwL5^fNW6^k0>bN)e zWp=mYns*q<)oa5&V^Y~tRd$I~gg4@^ve?OUwNE&-cFYwR;ep@>iomDn*<|W!%Mc#@ zoVgQ>(h|}=3hzM?a{n7^)$M;Is2ZJW*Sf!fw@UY~D&MPjobTIo!AxXK_R%D2qp<1@ z)tSf;{)Vpe zUN#OR<;S*S6cZ!kt^H*#f0Y7EyTYvek&QOJT+AQ|;4d|lYoTMxy8$fTS2M|L=F)pj z3Y(=z3;KOp+HJ*Vb0UVBs5gGsVVaozmHqWGly7I!!_}<~q)AX-4@YL*z#~0)%pC3% z3vp%~2HQd|ovz)oB9+lkin@N+|AeL}sqf2Ei?%@j6lex1W#4M6C^x_!HrC1HYL&lV zp>O+Nw-mUah&Ma(2Y8Ex^51>ZJbhZ>o%$=`uhl_*-geMH!=|7rV4|InMr^%<1NwZT zraUp0rYRbO3N>+-tCe(<0l=E?N6Z)1RO%Nr-o-~1Qu%zFRUyFtnCQO zx!8EkkI2csupzuW~jQWKJb zy5YyU`oX!PL*Rpcxl?-=Skr*aMiS z$n$$L{);V;!PejEC+7x3+hdLjMOH%$3ks2hb^36t=T^TLe%E~vdFF%m8ICx_^Wc4o zk*oY;k*GutsL4!7`DL!sNRP5>KZ4FgXOdiPvUy*}JY9&#B)+bnzE=Br&w-$UzLI$@ zyDgxKYi@gA*!BCRRvGXa2EP^%59$#^0KbVMl|ziTGc5iIh0fcvbAU1ifS4j_9SJ)~ zBjU8p=)}q$t)83j`kq1K3IU%r2AMwMk|Yt-v&)%}DGno%bP*7Am}D*J%GrX`A*mJQ z^~Y;ZMm~%cpQVL=rrn$&v~5@lQY<=(GJJ66Dzor7)Smt(h}@&8?BZZRd@n+s(8b>d zWf>A1N`!GS9fp;vNnT1bd*IB2k#9jip>u}#n}}nnupMqQAQ6vPh!TZUASA4z4qo?i z0~)+$6ez+F+n!tkzgV(WRk{Z$UEZMF-}4^-Q^gKe9=SyHWz=PMmxEb~q0ei*?hg5O z1I_y{+T7FE#VY|K?>P(SR*vnf^yFdt7SedfecBrAMf~C}zs!647!2^7c3wJFgc4j1 z?lG#pND(j-fzVD)g<;L{Tkj}8E%|)}wG7h|=eqK!j+IH?5UPjRdJ@JObcWST@S{7b zp)H<9#GLoL7?z`xM~0{3prnw$D@-yAdIDuitA5CIPrG&(@dc9YI>wpO0m65bFW$$q% zVht^{u&U?UJ6PAw!BR$4OOdVwr@J$-1n5e&bLCk@v-q3#I_Pip^R3fBbI1gWb7U=$6<{kWaZd0OogKN*U_x2}W8he` zFdNNE|2p8h%CeH8CpH1AeM#|cxoFIr)0PiKYOK769MU~3ie?AexBmXvF`oQn-o)Mk zhxq9QAXC9m-K{D2*aQf$NG)&GvI?PYz7+brjDEf(VJLtPhPG34uz@^Skvv|nBbBgF z^*9e=2_6XNgA|OpN4|pk{X{Mqq*b=xw9uFETKfBSAd!qgM!J*S7xrD!d!#j-2k!EL zrV|QoMeu!a#E@#Cf3P-dMqA*H!Hmmv9xF;rK{1~Yxr5d*C|j_jS1PAfvkx6uphCv1 z)}={M&rAuc==2ukpg0{gkfHq^Qq5a>h@6Mf#^Z2K|O07j2==M3i#G6I{ zOx4u)Pwo)$zBtAe&1jJ(b=i;09zQ$fY5?JVx`Yv)KZ;PV(@OnGofU)2dCZJo-16%CRrt<|qE9iLu2d3BgXQWUCgzIaKpna2?OkoG8thnJN(;_9bLQ~~sRmM-dRA5e95SsYx^T8Sp>{9)YC?-ooz@gZA zHt#la{GEDkL^_4;)X#n8mMox!1Z2A84(~s?`PgaQ7bp`jMaAdgSw9`xkZ7P)jK*YK zBd2woI~7Vdim#`(M5(jX6u`}cq(ETzGSi{bwL?=rL{1z&n#xUfhp%G86)ler)tBGr zK{(Hsp13ChyGftn9RP5MOP=$V(^W+Xk7VEz4YHMGP|ZNTr!lRjh1rx3TnHD1Z6N`Y zkrL6)C~v@R@FdU0KT6?&O6<=!f0Tp5Hp}!x0$OzR{jAQIS>rVQCA?3aVSFR4TY|#F z%3f8@1jk!Di~Ha&TO(|qL)>2;*9)u}`wX8L`o2;gIVEyh?L3xQ81`0{5Zma7rlhQd z)4f=E4xf(Xb8ub{@asC)JL;0tO)|k&KV$-~CEI`YGvK1|#the?3qVn%Pv;{;<(wSK zQB<=#H^g~GHv5+|bO38&RJS?_nnIdCaJ|M64wH)iLau9Xy}B@XvO|10 zOWWA==uJ^Z6JUHQ+g8CR_T7ByH?m#5qx;=Q(QfOdM}D1%)iZlfQUJi?2QK--(S?;2 z2b1|WCqa=I5qHRofy&G4JjvmW^$GA)d0+?AfPs)^Id*`>_yk6}M_%G1#Wv(4gPThY z-?Q{Ej7Vj3u7ut;C~ZM*f*!)2B{DznPJY%-jLGQ+clE+m!_xqujj?x_+OXBEJQ){? zNIdE}vRA^}2J*o*H#~c$G7--GPG10eS4o`%owi8qXV3!ECrmfLIV0bXs83}Es`Xsd z_GTDVFEE8Q?15`2k;sadI`_sR1BiAk+pe}#JwSrq#vCzY&e{n&rc7L2J&3|^@fLiI zVj32rhW`v@TjMv@2Z(4mJEmY<%S2X&nQKc3<~mx6=xIOK2)!cGQh6#bMS8IkPJ+4= zB9UFoCKHkH4lU{7RDkM=#cUvtK9$wUi2z&oDe?{^+G1NF?b`oBb=_h@T^Gz?r_ovp zQi6p5VVC&P8@l^A%gtY~?P=mO)ZbK!fz0N&zxs_VdB#;FD!~@Ce;dozFn!HVK9-RZ zl0GL)_hp2$Z3UAR9b&E5<~q~$rOwS+TGam;#tMC@aSd$Meja0Jr-4Oc$f>;vzbDYL zxJ$XW_S1a~oOG=7Q6*sUdCKK%wU#hCqx!0!k*k4}_ht_v<9j!XXKKOmKH$KVpq~t@ zrINIq@_6sY_Yjtf7$JVb=cuee)Fcp4&C^M^9$sMVT@nv_aI#qRC*WV2Js#aCqEEBQ zKc97Z8ECJnbJ|;-U{%0^1;%g}52CePW<^hfl`uuTULuNp3rm_V6f<31J!!X2f<|?i zkkp;JakGEDYKD9L?nrw7C2*GuVa7kWQ15*bLWSE(IH;<)O|p-dYDsz5tZmckLkt{` z&W-DQIJUgW%Z{N#YPBV-Z*T2h{9}V&_NKM{U$Cg{C=@uN_=5_Fv`8@S0fzu;m1#lB z<-5Kje9ZJ^+ET?M?8hW|CT8!j%$-nuaLEgu>=6z>%K)cw={wG@6|IY1w*kt7U_f$> z+Y^F&(_IDL!J=TI+AV1|RLkOXK+?$+{1q+HJ6CCSwnh{@?YK526wMbOOFpb6=PPBOS2| z$%HGNmIB9j3QGLHz${rZiCe+iN$C2oaSIe#J9u3g&@y zN*1m;jr+4=$U5mnvIs~#-sdY&jsU#16bekf#Ge;u%6xl4rB8+{U0)a| z9E%Uh?g||oV+-n5RMC&oD!47kWvG@cXteiWjlt^4&ue{Dkm*A|x&L%U2_I?GpWWlZ zqj4nF?I8N0ZMl^u%b$&kFN6^=Za}M7-8+L>&-6SRlKB#vS#hCqEH)Uxe zr7l1pm_lU}Xm^S{^*Q~6am^K6r#aqd!B}tA-a788`gP?OE2pNeuR<*kri{=tFfK#W zp)A;a$arhU6u2y;L2jLmcYT2Q1#dpm_n@d>@@Y~<_k#$vgS$PDx zk;&tOxKdDyPFZ7VQE2xu#nM^@*68wqfl#q3>9H;I z9$wQi`idehP-aK=nvKh3;dmSmY4~PdG^y8C<1#-=0N*zWOJ$w}*~= zQ4+8-)C`9Rch=>>F{@xCFDhaw0DgBW{D#>67m|xZxW02KgTw<)iZ;v7U*UbL;Wtyd zaBq7aIw$zX&{*ndD$cT?op*n{*~koMXSsCUtoY+WJBYBrUDR$f8m`gOaUK4#v$4Vx5=IK)k1QR|P= z4}%S*v>e>381$&MA41R*;0+L!aO}I8djG)SmA=HLozFLcEiPIPL@A!Z|8dt zdO>&;N%6(1wco!__saB^90+HG57egXE#CU8XEO$;snKD4l(P5Rs0TsvRv>2i+t0S* zK-+6D%;O`tyzCjOrL4$-yg=yK4b;7xe8Wm!0>9V%Zl;$1uhMf^{2SLY>(zew7h!Mj z(bdvwy8;H~=nIe>JpwbOk z{o(Ssd(W!hi{#-gu{zYy6^+7H%R2>l$I-9h>VcSbij3Ek(^QWs0Kv(=$T%Lkmy}{r z9{vu7>`DQ7_Cd_=Pk_RLotF+>tW^5|nFlUGm)K`kyiHWU{OHO}qLhCVXA4j8UO5*4$P z*h_NNc{*{APYXU}>p=8W^3?jO3L7BTl|ykvCI3!FVrq%&7^r<}DRK@8RbkZKKHv%y z%)EX_3INJL9iCrMK#WWaVQ3ti5)k;OPLo#7J!1#12LCCFb|4zw$Ik!Yl-8*BMo%d9 zS|R&(X(WC*OEp__Vqd$NQQFUlAMQMnR>X4f&aF{WjqD_67RcKnJxM@R0^>R8<2Ex~ zba7JGF6W#KfPTQ!OzYTPQWwH?E*O7cASQ$yO5!q-4vu#}Ixc zVE%W{(L9?v*s?^F{-uV&?$F)C%ZxSXG;e`eUzdV)L1s*Vhe4ws=HjSE2_+&Rn>pRf z2LBFjE5Whj@sL%J@)#Glr^&c8EjY>#HWAX~zxo7*R88OSPtWo02F~mcUi~(0V#msH zL^R@+(|^T&WKu=lMFa>yY!9gavJegYAA2Fl{R7uZK^aujaflEj2} z@*mWWuRx~aPq&R^>n&Q7fP(ox@@WIwEL+17d#HwxiszQoR?eO1!L8z49Ct!l_2G>! zq{Qu1jmTupmiib9w=Qc^)@adSvE=LmdY>n6TYE&G_}ncxrv^RuJB$dc0JdkpI}D;A zW)C7nap*014Ig*?^$x;D7kHfZsm((dL3XShJjk!nb2c394t-IY(R^=2k4Tm%A2Gq{btfVL9`}p- z(GlqxY*DgS-TDs?Ye>c{wT$`}=Z8(nj<&*;ErDD9)#vP|n&&M^s6HUW4;b7+$%giu zXOT>1$zpWS-Y(5gT9Ye=2%O5F1nicwBc0#000uck@<#(h-7-V^6qiP0iaDbhdU_hX zW2vS-pzIVggR|njO%GvT-<0^dIIR7$8n2Xef{w@x*924wx)*?k=5BLr2-QelA@X^+ zBsvy>iuC0WsPDHqC!aMDC1Q8qU3GVr07zfwsVVem$|1sRBg4S^<^v+TYv3hID&!A2 zD^T3aTBlP|%NJMgwm(2&Y811tVuZaw2gx4W_mPIBO0)AdpRzYZ`*w;x%ZfyTR^w32 z4-Z`6H=zr!Fr~COykU|TqIeV6-f9$XLcA5RXtgzERhoaX%&9upzpFLLn^Y(V1T?#3 zW$*VcJ%Jbhew3aXHFJ?8YZy8~=LjF?wOlW45On%FC=b)1$(q}ay-X>r7C0VN4Ur>1 z7m%hz@g(cdGk3^aR?)PS!UKh=|C0kF82dtch!c_G z`!2e<4>Fla5eAQZr|e7f=9?9+TKa5V5x1B|cu{VsR!4K#2IE3S1Rl0jWE9P&gjr+V zj2>bog*iMDjkDWT=Eri4U!^b&lYGVVIGFupKg8Z#T^;kZU25m`dzu{R zUcH4|T#@^D_!j(qo9%?6so1TeU#kCI7rhKNEk=6I-CxE`k5;xhU(w4bCu{~1$aK!3 zz%wBgo-}pkY0cn)xg`I73wlh0gA_ zaG*w3P&%KMi7>Ik)T*1G$6;sqmK6ii`cbpbOnd+uUg*F2Y{-hfh}zX=s@TthCO178 zxd@L%r&YVsF1xvl8diA~?KG~;%1u=_2q07PPwz{e5_78)y;tx!26D|=OfqyZ7x(hJCpV_+|1r2>Cq49DcRqBp~05n;+261@i z4^5f9;IvlfDpceI#f)7ZeAalnxH%noWu{VZZ2Wlmmy-2QwZP>=A@|STOsjz}tXiSK z2SW#upJ{PVM?_Rro1tHIdUpfbEjZ^c-qZ1)3QR9Cjdkj!y949shm--P6Dlg`lHfG= zIMa38$SyWRMeTY=RX&_tl*W@*>qUrOYb_K4^t?To>r3wU&*u(2+1~ zvt_VHb%I&nUlwVL$QOyd(>BlVtauhnIIsA(uF}C-qr=7y8J)!`I<=a^^gu7+RFfu5ZM#S5n2;?77tDi&*^UtcqoZ1a z*EaOLPN}&Rw-=@R34QyCO${2hwX)-BpNU&AA`kH8#t{_wtfAn>iuuZt;5hfY1E~rB zO&qO?R=dio*T3CBjB?HsE1XfY;^QQfWTl3!hE~g0bcRN7L;{6K*YmonCGD$5j(anL zay4SS*s83UWO=jaq*ZlOF2)}uKIQy8*Rf3;iXVBDHjJV{%yUf=KjRV2f`vS+_Trg5%JZnm&^n0U{J^!D{?QudTR`^33TubutFod?sNroJ9;-&?vL%Q&-vVugV?)Do#VbZ8J>T9F8$4>((hS3` zEy6&Z2zFhCf4|HHzv4fWd%R<5))*0{2q|DqamJJcPRkWD12Yn4|Bi_(b=p4wb`JmS zuNbJynFuDLOQEyG3G7Wtw=_vZEy86N7{aT?4JJ$?*WRZDp%YLITIlnL{0{+Z5C;aw z&6LL}D&u|#=PS`)jAt83kcYJAw`G3jG_>AbLGAj#wl~iwl~p!Co>lB1Ln|B#-ZXXo zsCIje3#VSG*rv!)NVk6KhS=*ub;QbgeVWvg14u?hmO_Y#1-e2SrwKPA6q&x3FaJHk zh3h(r%)Y4tb^$(Y2ULdsQjGoYh*fU1i@WryCbK`R*X;abZ!@=~ktw;>!F}gByVnwR zh>k5pJi{y`84)H=|$13G`YZ<0cSfQ^3U!5HR(R%NZ z1pjh2?+>58!b_%(Z*}=(qYFm&n+V3YC{S+7g`4?qft%S*<&>!Hb2_#n(qvWj>WAcE zh>zvSfi>P9y$cQ-o!{o+Ctsi{fB$Da-B+I6w%WQW$Idl>P{7JT&3<0|$5Znb_flLRTE~Bj@CNNreuP%okr% zbSf$a`a9zvLwU9N^C-u!7NM?Y?eE`qc;CGRJPcplFN+p!f{Bst5eCs;@5|+w!^H?Z zKLPYup_I9F`D2#48n@HW1%8nrBxeSu0EOh;?4xo^-pkt+wc`uO@?eCKjxV$!u09D; zrsil)+(j*TBw!^4(HA;kwy(VozXOC+hv)^S@kX4;wtMEv2l}!Rbk(;fnH*ugweJfF zWJIikm;4i0$nr%GOvwUG&zc_-H>T?GRJH;FY z6=f17C5J3q=_>8i6@gJjrn=ExcsrBZOT(G5OI#WMca(yg)pZ8P*hkpHxY>8QB8enh z(W26b8KB?h6~BTsv3~Uryxe#COe%|x1-cvP!vq`e3UH>SWmbu$#<-&%HUB`<@Ayx| zABqdq1f0%Mj-43Hg-?cnhb6E)>24*#H_nRTiJN~%pm-#4bbSfTggCpeo1?R;1&QKr-N zR*J#TkZb>c3*kQ{y#S(!B*6!gRIFkYD!uc$u30PRm$U2YOFb?nH@*l1gFdb1Txzt& ze@Fp;MuB(m$K{%||Kk2-{?U!Hd|4VCZ-`2lb|4R&pIZK;33foo(zG1f>LCmfNsJP4 z(-~4AIt&`3%d`5-%>O#LIZNo&@FDh(L(?pAdN~QMSvQ8Iv9J?cMj#Ps!m*>rN3GL- zD`*d{bbO%kvJ;yXyu>5qxVMYsPDzF;1vm0iHi0UQ19jh0&rB#U=<%k-%>r7hK5t7> zjy;Pg;IVdphC(A-sLJ-|Oy24^qUBDhE$>_AZP;h+;?YZn5U> z<@JJnY9WvqsX~xLb*AI(T0SX_MuzocFL>|)hIKR=CfL;PL`^|cnO8o4oA;QN=zWA8 zhWGmCN8T&t+6&Akin`@+ac{Ay>$f5nY?om!y9~H58&!+Fe;)B?#}K`)8SfQPnwpPC za9bLgAUTUi2s^^Py+HGp; z);x7N6drh8KQ_mQnqVp*!?F_9aCe6B_Znw*JE(?Gxr-}od?ygmuXc{WKJTNeRE>9A z6F}UkMK}ngi9c3qVti_Tw1<6VjZ)s;cO`7-tqeA%$6GAW$r-i*!`ACCI$lZXX7xhB zSQI$DS2)6>4D*=%hizk2R};;R1gek_$(gwe&WP0{A>KPV-gyM%iAb28JTIimst1V=#4^$c2|I{_^j!f#S3L>OtgToQm68|c*~CH~t)aM8Pa z8x1U&q+IqYu9z3!uN^!bVW70xbv4cqjE5HRC_Jr>JmiiBLZBR_fJd0a185fQ(MBs( zxh#C8voj_eJizpS`cSQ_lDoF@m7@+J%Wdd>yu!&o=UE|9-B;_xt^=T~);-pR9IO3D zv@h%5bm!AyNS+3HxJzxW(yHMonG4x3h$K#IU}Q2k7U}F&nY5_r3Zvl`o@!-a>mrqLOL7YO3UmHw$} ze3H*sUHORKq!`%-ebZNH!-2sYbM*urv_`V?N|^mht4V_w)LB)^l!_{EQ(vqrt?5GF zM{PbGe8-2=$yRD?rW6Bc44eJOO|0I_fMV~B2ax))L57@XdHJx5sl>~f(Jdf7h89BS zsv79yi)eNS9YTDD_s;Mav;+U>%vs_Kf$J9lic%{21qlF=NFV}9jI&6YPYg=`C>HR_`W(yh=MOBvPf-UB$c;C@BzE^Z&CRSw=j zh_O)h6P#zg2QcVXH8kBY0R^KOA?y|DP()`Feq#aZSKcGWZmmWET4$irSD~BoWHkNx zS`mxF-GPEHGex3pn)%m#VYOu<1q#g18;h0&o@wRq-^+SIjVg0Uyul~`d2o`75IM^U z<0JY4KDmR5n*s-N*>Cnj5*2_`>`r_ncP}d8qm3py1y;E{Vbi&~A~o~JVkdk>Rt#o2 z{)YK-i3>e9aBxOJgDH5y2G^rsy}e(}LN8Hx{k}vggMKT+UjpivX1Xo^q{czDTLS+9 z40i@wsS80rrq`6mV>GR#k{F2Ap+M5gGLUAys@+Gj$*@~bB;Wu56{bO&By3CncWH+C z%IdyhBdjN)Bu)FMC}tT)zQ4ZZ!~P6Et$b`Nqqi)tCe?#LmwnWl7@xa4mH2>Hgopmj z(cOOG78rSZ4DqVFXZHRIn?9yA-9b@>{G)8GT_BxvC9csAe)3$S&4*iI$yVrTN%tOd z?|>eW(VhVv?!%0%GM;T^sAd6P4V~a7{7MevUT!^Xwu_QB-&j8I{F<<^XNZhgZgrx z?}#o>WTZVo_m6s<7Iw}5<|oE&+QpDdcJVNW1e3xOrkZ#T;qgpL>_l@JI$gzZDZEt; zZ7yysX69yoY10uZ4KDM{1H{w9Pe9{}c|;go4D`VBN?3bTtjF-+doxa$uW4@Zn%(0X zwWC9`S9s%u-I=_Uv%pv}pFF%O-u6~7x@bgypd!l869yoKpR?(`kX+ZSJiYT=#?d#x zmT}#&9Y2oploCoSO!AwKc&IK=$6e4VVR&}#_|Q-6lY&O=uii7Q6)qE&C9tFU)hJ@c z7ac*z8a5mAxNVT1i3efjaZpCRdsc;mat>R_k~x-%z@Ff-IEAvLf=PJ;XyMDk0>*0$Y#@6_RDXBPfu3}~Au&&PDM%O~aL^%5 z^yanLd}@|1vdz-81@shkeiB3fP#i$GToyN8{sb4M7F)h|O0FHui1~Fjn#&`-ZZx{P zf1(P>QcIKLBMT}jklL%u-`TaZr zVSst|5?~KVNQDQ009|jz6ru5WYThzp1F+DR$|I7Cu)P|qIf)&}sDnKDlGLO3lrb^8 zPJS-pw|=49bG6zPf5(+Ck;Vf#Khr8RcF~pAphUJMkScC-$l6t&o>Ih)F(8Q_s$TDf zR}E{ya<n_n-{u<%3=jp9q&&!~u7RZExy#KjhyBU*nA;Be5BWB$uV<7>Y*K4hxsf5s#H^YBS@FSJpD};NGc5S zkUjR*Svi7KwKqe-aX3-Q!ce1|*mbJ$!w!ylB`nMM)bsD^j!}Z$AaN#rHs6xManAA+ zDiD>nO2+&OG5bQJs)alKxBoqqGNpa5;FGDtXyzAgY-m?r=45XzUdZsL7MfnThHUPb zm)h@cQBs%z8VBu^_HuC`U7dMv91b&Xf(Zq~Rh#_Zw1=UY9=$2WHn27V8M2}mG9%5r z%5E>IMJU3DnEVi1{2e)*`11Ax;(26)TzDA#UVYnVVGfalr=88YrEu|D9SH84s#b+KqL%BFa8%;$C)_IdQ03BpP9Z{_I*vzWOmG$!mZ5^ zV_)Hfki0*9+PvuloNwo(-2)XI&Cl~c7L%d0JIuaCi`8XueVCbU9FLpHn}UJ4809+n z`mezE56+M~%bfHX%#CYfv_U+Q*60JWVuvB_h0zQ3R#ZEaI$!}5)6?_gi_+RXhmcGC zs3vi)uXqwvv{^+#?tUQrf(&+zu@}>PhE>nMp9h~FjyFu{jlfqFMlNH^U<{MjnCQFZ$YJg?5J z*i(-_xla(rF=%n(XI%#zmL!AU+*!gkflu2Cf>YGKD|3;}-dRTrx@6hkT5Dj#cH=(R z18+nVn0o@DLm{GqwLt(u6#N@#C)8;BisbT2JBBxW|cET%S(6ReiV2UY&UpkVqx&Q$OWD9A<6n?)F(nhLSv(B`e;YQNC7xO(+!L;if94F zQw|h|cLs$*n_%F|&0m-phB=_-fO-GZW#Xv;AucJ$6{$NhJ`1*y|HY^4q1gP>bF6>h zdAV^Py9q0&iGQEs5&5W2nJ!%D=Cs%`7s0vpKhiySMx+Km?#xf7>ZS2}xeY!4t{M5$ z!IWHb7S?<(m=xxAeRJgv%%p4lH1}Z&1hc+cEQ>_HURca#zhmKDZ`;rO?!Zf_MS5)} zdwq)mX+--_KX6#h3Z^PPoQgY3NC~`3>FM|I%{$cr|U~&J_H91P&cKMg$%m< zk{aKVb0nYo|y=CAmsc{zKkF|Kt-TxBsN!cNehGhzur%F8Ek#3@>4uz{EAkzwG@Kh=@ClS zbWzUij@`L(yK=S&k0)C|Rc ztFQi@Y6rpgdNT&7+T#luh^cFqVaW-z&bu3gsy;Ct@E?Jmp&C}N74u#&gs=N+{HXV* ztRk5VGmWL=Sl5`fV_(i^A+DL`sO1Zh=rx1YB1X;{vl6P;SF3Sjw)qN;^BbCOu;0z! z7ybT6dKn$WwEGw!PG%&8fu?_Q(YPvdME6i~X44W@KQA9bCv;VPT@9w{9YEFF#!@*p z2z(T?5I!$B68BC<>`7hGyta@(uuC)nklUBV%?o{_-Zc)c05dk8;~{5kwazN>Pax@i z>n0uELBq>-uX4`5K?BUG{V6vhbV|PNy;_vXsLRV~#E@yAs_j;e<&Ql-q?V;l0Dk#; zZ_#0Y3c8~ua;*uecQd!Y=ImlLZ92K&(JP{Bmvky{dkfqgt-+}d_wBQ=YC2g7fAst( zzb+ta78Nmi&UkzN6EQsj3=cP@5}{HKg5ZxR3)ha>q&CSW*He^f*FUizkzyt zbpMN$PLO8en($wEe8`FfXhVc6UO%%Qh%~#q(rS>q^;%&lhT|ybU$C2YAn~EsXZOgg zfOT?fd=IO*|8PF>{9R3;Z7e^8Ym>5S+qzeFZCv7x<#%KebxdyPAv$-D3Bjl?B-jF# zNALWr$mlof2Z8h^g$l!n3xOhHRO_@l`A?m~{1ji@#^(=q1(+y62 zJ-2+QwneqoRv7#7vg#;S;^_Bf>8^yA5p}90hKtOmiW#e&4-wDRGK-;&_FK~jLhc*i zmL~I?sBo-Rr!LqMOb~lp0&mhIa8uRkW4nx!Lyt@>G=Bfkv^G-KnSD`NL4~!n^j#b6 zvp1pe(6hay@EiYv|AP(x_)@pqPTmDC6J_L{DBq$Y#iTJm{TFtFPWU+4o^H8+BXgoP z+_AVwa;8lFwjCbu(I1pRd>%STLY1@74m3XQ^sAL7zh(QFRYisecU6Tj6|P0uL!p7k zIZ7)cPJ1AMRP8g*vlifQO6b`i#ueWL{%e=%3?cr%;c1Hy84^}~DqKMo>gG`^7z5E( z;{F}oqBezG$Z?9#_4Gq*^~t{8k_QV1tZ~0(K6$)1Dg?yT(Vhml7YsCC!!{?=k7LRN zMOIVdpgm3nEe6)+g5qxZ%?noZ_^RwTNM6Tpgv+oa+KJ(PChpqM#I6>2odFz9#OJIj%5EL@bO9CDtP z$_f*Fq6Nf`*5%}s1F0;vARp{aQzp6AH6_wn9>Y*W^Q--d<*4lIhJR|>f7H~PR*INf zTWDV-k5>hN2*SSz!)xVC9V2GGAs%+lQ(uC<)Xr*Vf{rE~5_3YFA&n-Quor;zRk2+v zElpf~xmUW*u=DBZDM%oqI1a53uAbJ_IBsH^sU<87t8tmL@Y6kN0{Z!Sr@liX;WNkI`ifrDOc8FPyarPTlfbTUpL~NF=EYlw;p__7emvacm@ct1XKxZ$j|yaKJPR zD(;Vbb&t|ZJ*TTUpG9Q$YidSAkQ!m1+2hob9^lkmRhVKFMF&Mbov$D%TXcIUAClvt zvCVnsBAjq112FKB!!O@-D7+p@nhF0pFuL!SY&;jw&V+q_i2-24<7d z=wzLj``YUXC9{PF-IMQ@HXbWyTZx8l5wfxGzd_+;2fpU{6lIAz^G8{B|KQmj!3Ifz zxpeWkbcX}saG_flvtUmTek32&2J*jB)wR{*rce11xR*YX9IYMTbpvg8%8X!+OrivO zm=A!ZJ9er{IHUMJ+Y})kJv#gob}ywW!4ndo@dB#NYEDSkLDmt%&q#%=?>xIZ&MOKz z2_MDa@%Nx8{zH>G@V#`?xG_k-(B*d2Dtn*9E&4hva zse1ls-0d)cYDUpH^2DHY=jsfHK4JKC_5}-Zq|7-Fo3In>uzVFSsV8P#i-3?a{k`kx zvFH2I2g!{kSJ%>kb>cC#gGLHz%hRWF52Ucyu=NH+XyjJqZ0rL?mV3SH3)|Q%Lb(vk zGJMRVx*;7U-X1*XE%q@luCk%k0`0v|rS+|eLgUV%iOKgky5=Kdlbz2(C%XOw0ah-X zfqRDQP(a9ElswwZ+6w5+MBfGj!W@@BPUAv2EDaCOZ`9H?PZ^j%_+>d-XW$%0Sm z5ln3td*GSw&WYa(Tx1dc8_5T2>_RfkR}4Cqj`@;-=@QNPgz?Qv$~2YZFf`#$>!J6Y zCCXm%4|<`fhalLR_fGc?>%%AVq%uao@xg<)yd(U$3A5I z2R#X@4U--HAB9X67|^qdjALV+B?m=r*QwG2<5(Saloi3jq_K9;`Cg4rT3~j?MYm_$&`P6{R?f(@mgsz9OHQDlkeQIcO2__Ikf#$lmKvB; z6`U2>x;#vV7FNc6We`RmX-iO>zMG|wG_n6P8WfP;zkZUxu+G0da?!4nKnDI#8Z_75 zBUIdfD3M)Edm3`!!7I|vAVg`rP@-yiCD@zyP!qX^xbjjsnA)1)C0{vubEw+nav0j* zkM{G*-TCtMi)@-|Z7+parX+((<_2JcZ`oc!TOKIQxDI(gM&gi+@ry4#KfhYr-Z?vv zCPab0QN`a^kvi`0uBN0`&jg=4!Rzcsp@r@6IkD^Bl{Huj2%;U5R^M~NyKWgRP`Dr- zG+kyu=LJx1)QX6#iZ0WLMrL|o1I~y#1wT5Cdb&}$-rpp#4MyrxK-F&7v;xL@wT{Fdtt3i`c~#bDv5%fA2@9 z7_KwTql0FSS6PDdz4(=p5W1r|xIfP&<4`$EF z&W%R2C2VS-g{mc9ax;nhCy&Hq!3knqmvncjfQCMZ`oXTt02mSe(HA_xfK9pwZi(wK z2804OzPsn1CSR1d_xeyV=c9J5w~s7glP#3xBfs`Y?ks|?+>mSOjx~?`A?X6D%PqX4 zTspOEc8v_+`EV>s%OsYB{UCxOysWtR+fAkCph>>NJH9U5RyLDc++lsp~V(wTeOS z-z!FyIQX!cPA5|}mk#s2&G?r}4ydRsLOro+v;e&<%Y zh}lf?iOMN|CIQkdUkgBXu(5yJ$BAyN4#Am&W4HObc2MWCoUcOzM3}$e$_LMi&gf`t z3C$K@IIV6DYpPL=q>q;>2gNdgaOAUHh^;%n_3jKvCh1cyy~eOiXN+6T;A$d#!816)@h{@#{f$b9|1fcu;&tw`;vo!EX`n2K3+WP zUlKWD^SaOrJ(1mZtd*GMD|87BF$D~K+_F$OdX-FZ>NDULqy%XUv|(1Ep&c>bGgSTaViTPr zvzQ!v|1orBNmwUi3#YiCj^zk~Ke@otNhbpTMUs+w4;ap~%7@Qe^YW+yFU;mW4H>Hw zN7Wwjgr8re;+7=+$KJJlCv}XTc=&kBWE?iz#N6|9l`)Zg$E1k=m z<-fwEv#JXLXg}10qBuD-x%s+U)V90_D&CldX@hKN=7hI%Udm=C6t_~ujNVon*cF)L z`rRLZK=fD5ME0rau=)J_F?uY%4KCr&i(jTzoWjWJ@^mk^$VeE}G5M5|M~1!%BsIc( z2lq^{{WAfwspW8Jb#a>K$63w_;7Gu3+r*f`Imcs@qOeLl-VB%>dvAxB+3+uqnTs;{swl%2XjA2|vN({S67d-g$*csM`$*BS81 z&IG8Qcb#KBu4V;RS#xUws|t~$;rtcry4t+OtFp1$r24?jGngNujLM9Wx9`4n+Z zYOUa+zao75uNGE?CI=T~&HOEh7uQdMh}{Xc9cD7A;s<&RtFMiMoDm(EtUFFGf%3<2 ze4Jn;IiA>*>bbb+toV&5jGG7b2YBb{DZY1-5$PJizZt6h-13kp9I`inU$GFSz-{K$ zavK3iQ#sSFK8@AJ)dch9oNSXtEtC0SGGD9#-?*-w`2tTrp;LUI`A&=DxU|##o3AbB z`94@v`W$i9S7r;80~+!k&-6Iq(E_YjNe%ajLESsc|Ep~0NbfzCyCqP}S60BGe2&is zuBm`NztnnwTtyvySfkXgLKUsv(;7Ls=%@XA#%e1H0jV>6>;zZgs%aF9N*yo$@=hm1 zL}?r5=X{Cq=P~8pyW^>!HU#$-LnQSlztd1x5o0%X8awd3@w)1;@(bDsA&7_cB_YWY zx`#syq$VRbVI_hQC{*jvEycpa)X%r?qsOVeI-xzhqw^eHU!5CKV)SmA0#6O+ zic4K`OlvXZ4z#R3Cx!ybY*!4u~6GGhFvp={{A1$p4;P4JTAZd!y<5sxs^AXdLgH* zr(ob*;$N3b*k7A8zye77^O_!SlZKQ|>bVica6| zzVT$T2ihanNT&aDk{iB+k{nalQ0CJ!wf)NM{fo@Bc83VJY)N>81s%!%!z##3@mEp& zd$0x{_9N&XI?$*CriTw$vxiObZGswH^EpMG!HL)X*a_p4zhEe9Z8*of9C_C0x2LVw zO35ST3lLpS@ZbuREgUC0DsLOk3+RJ9ueVulIwf*X-y-i{FiL?JwD^Tn3>D;Azj7C6 zjW}o4iFVQi5&RC>uIo@KPJ9d|AL-zqC7XCi)r>+tcZevggWJTt8)jWIJR-YPZQQ5# zCGp|9Z$W;*K{KZnp2zw`NKqq;2{RkCQH|-@%@@A+JQR3a?hu1AM zmncB;lObuAoZbI4ThEUjz26KJbu=t(%p}{Y36g*YZI{Dv*-eHJpPqn;|6a26&l}2+ z!XO#o1$`?xaF4DnSr)*g_EKrMbXc|%t+oKSUW&Q57Wc?(iq!!%^&=1cD8uO>Z-gs6 zTU>gVDcsLue{oGF7%*G8A@6W9n$;OvTfwQu!|K!e!$agl!WFF703Irj8TRLV*=2U` zzssiT5qpYrs{H3aF)4zxeT=o1w|&kj{13qJ-LLNyJ!c0EhM8kNC9t9spQ3-Cf^S(onMqFA?OG`rVV>=#aPu%@Gr0NAH^8oMP2qKz({#{f2Uw~` zf5XKFcjfzhAGI*}ld=SGPRadjaC@!{8CVOPGT+bpGp^hj(blX=fvUBFp7^U62`7A zOfI^W(`fsvn$m-(gF&3yje-|h9Kn*TXW9i22a}Ip6&*rXNDq((!!IdN{jfeRw751s z-J>=aMI*R0HBPKP%pBj6RX!z8>dv$EM%9c%ua8VO$p39p*9^7oru6|V5L>_(bx0Q~ zpn6$k6mmI`-;b;_i|As*{@*K9R2C^FKoxL)c#tN9rF~iVtJ7&i9N`RTrVd+oR~ATW z%TOcY3(JMo?R}@+b1=2Z#C~Qnl7Pyvg6gcS#`52LB{-g8 zszdzT`<7M2lF`N8YLFGwKR2ibG9#iyd%yDrH>Uoaxq$u-1*@2M-M3wujv-OBwDg>3 zZ0v8;=~rkZvrN3A#prKdxFmt2GZ6@}5!mU^2;VLnS7=Sh`5VhNhC{bJ<=%xQK}65v zF#)E5uKc%Xo%%wDD_F+mE@C3Gs>~JTY>D*#1OxaucZkH{`H#|&FOpqJ2^2}M%Rkgf zs%25PSytugU=Sf^1m!#*PbcC(J;hmHBA4$iN2k?q?pl0h9~GeU8pyj^NGXOMJ56BM zdw4k5CpM_kR$c>-=1+*Z*7+^2Yl^12R@Oats8W^iWQOq>Kap9E&lHnE@A0l%{B3`y zcLXEM9|BgFBCJ=;`Nj^y&B90&rO>$DcPG0qOEvfj?GQq-B!AVP2EhRb%Zu=8QiByWEv}c~S;QhNQ?t29yYa%Zs#|lcjL-!oi9A z7z-~zjV_Ia$1R*65%hq1nR(Snv{v=3X(A$zS8ebd81y#}PYe{hL*6o(B;6GIeu&p=H8`h|ygHFtGzMV0K;g1$$~gy5MfAtb^Jn_Cqr%A3^k(7L zE1O+QPnrXIyb$Hf?32SP|8MFWw{#|WjJT2MT0Sy& zjH$wtzuti$rpte(y|E(iasK|e1|_eL0~kF0@f`3ivuYjyR9A4yseOQy6G=mgW$GzN zT;V9Cw!s`~cvC5kt4aKOcF`CvmVKeDqL0E@Ik?5T82Hg}f)HDBU{PxS=W`d;fEsxYJPR|yp>Z3ECMn6RenwNnX z%vs(h83JdFN=@`#Wo6))%W1*;r&ocr=2TD#@2+{Mr>OTC()2$}+A#y~vE6c2aEeW3 zRb~(vN8Y{1`;P<3N1kP~{dzs?r^dxF!mdVS4m`SuEWd@iVg=87KnZjw4;)aPbscS# z^$o0xMC1$=DT>3tcktcp4?V=1RW+s|&_M{lde^o+*6K2!0%zEKdE2KhZN`ht$80$( zj+qiqk@ZG9NS_II-B%Y*8>?Ksze6)w7xe|N?bbT#TY#Bfje zJKQ-XfBni{Iz^ocd_2R)oS+?M-8u(cYy&cnjI}1RIsuigEpx}gVGTGkcgLVCHH#wA5V#5-Nb*hx+S(PH4uSMAv^8Rkd#;X2PJgc^;k&s z+Dl1Kxi&K(Uj}kX^aB8F&C~{Mdf5eFZ~%(hG4}aLXnzrN5y_^A9JkeDKa^dy)N`PE zZ|iAZ!c<4v@Qg-z%Y}9rLm2jkE;m!%J~Dfo-@Wv`JYX0|NQG9Uim4eInBcwD^`fo` z2i(xR@RCsZj?~ZsH@UdEz64)n!$<}hwsA;orB7w?C2IvgZCb2pX^*pdPVhamslgwi zrm7$yZqOlV)AV(udJei@=8xUM`O78%N7V4ts{w|==4rBa&25SE6sEmy7Kg)RF>lS} zK>qsk+86t6hQ$1!`6a8-P_DQD`7E(e{leM?Fvq>|LaGx|q?3cM0fV5BD6alL zmg2vp$l3qh`04r3zD=`w8iTuXxt)(5je`ATSd|0lFsEoi(f+{;7dRD#qxN4bMz@vD z8-UnMDC-K6oa~iOW)`>`&8${`FhjNc2JppB0hA0mG#Xx;xolkv_3bAhojOM*CI1dd z0YUC1I`!UmnjEikFYr?k*cZG;*rZ&= z?PX=K2J!|6#xw3>c;z1<^N+trO7sr=MPV7D(*ZaaZh6UgF7j6dOlB<<%+p@cL;hJ?~}^C#u-HO zoWH(OTDBsZfnktt$6bmRHC>;%?LhXEWFy4GdyFw9Su-2AFXeQtMH<4Md_Tydm{=Fu zuQ2@OH(uPuUpA@YRJJ}cA;Y=%E})fiz9U|NCvj>340=W&;lI^UUIM1bXO);lWwH;Af{Skb_2iEazB(9j#F=TZXQ|@nRHpPwG}#&xw_K&QO!C}T9WbzgTnO?#i7mc0 z+TCTU8u_B5eYogEzC8}(%?L?NpU@`h-VHG>)5j!@37vKah%=7C-$><`8Yi6kz15ej z+!EyOFtFkX_yY%`BVXx7isszdVn`r}1nrlKG-`j<^S0Y_1ULKziu!(JdckRr|bdWw41!#44-;T%HvI&3Y$!m>+rlzs)S2(a+rt0O)!8zSKg?cL^b~ zMIqJh&oOavz-@9m$h3QQUgfe%*^ps)lDCd!XFqHzBkA32B@XB>O$-6_qAd z)ty>~CW#whbz({OI)!{UPEFExMtu#2aG?8+-AhUr002K)Vo?&!X?0){q{u#AhIB_*mII(~6r_6UK zb0Yd#H~RQ=Y|bDNUqsAUa$p=9w_v>BX*pxuU?`U3svM%TF@P`IWB=wjKbBbJ?)N=2 z3}C(p@7fVQcSyZEmDU#3=9!)&WToPGn5P!8ry!q z%{xO!k8*3?M*bPTe|kK~pzb_OUKK>XIH=4tp8%Mo3S@?QoCbJxOrUzMpiXS{99p(- z>>2{GX+?~0J(;&ehf|?2-)9+4UamNf?;e$KalCL;CnA(#h$)t~98ca-D8>|D`ZU_S z)XFpWU*h0D{Vp*SN#wOy66|WMY_z2;I1lGH&_*rsnaH3sBjcQe5)KxDcRgq(zl$}e zJBMF_JTosSut^e1VK5IGSlP}fk6ktLQKZBRp|`4?Mq_=1;y z4rf?^U7F7dHG_lcWF3OZA0Q}$qMtGTf|H6k>-tR|{$3LLvXGh90(NC0cwJGD=&k%! zTK2L_neKqB=lttB#SYb;836=A-Df8Xs?GPAa81x&t%!#1%cH#6ZrkY!ov5vSzkC-R z4yVmxrz?Y|#nS=ui?J^AT?gAs;7leBse7FvLHAwNs~=g(`Rm4s6xnb=23x={dwo)c zFd2w!t<~z!o2`;PLz8rQaB`Ao8w(K&zRE^Rp^QN7x+_wq{&8-f1+eS61|?BoBkd+( zNKHBJ2gYnO@NYfWs6@au=MfRmbmJ2MNl6ax*wMd5?t+M=vG#e9z~I7C^z!~4Rs=s0 zZSUn2&HhKpnm48zgil9)xH#}@yUyxueB>f_n(|pvyPNB(QIXoY-WX06<^tO5I70L8 z&=-g@<-Vj;&Rhr=V@~sD`F-~hG*gHf3hB!uB>@urr-^1rgEK4-PQf-e&<7<3eiFkN zZaJ+JbTu_}o>STRZ^`Kh+qp^&_vkqR{b3VwdG_x^Hj-Vw&vxC_B=z9Q zNUl~i-eyn_w_9tB?CXw+^$v%zcIpXj-#x2^k-kS)j&!>e;cDRIF-oOzB0dJguXbC! z{?Tx}c;E4>$HGbZ`>O5+tNyov73-&-t~l{|yWN_&hbNw@e7!#w7~|>=d!doo48zuU zl{&Iv*9(jm=0^`n#HBv)`u{`DH-5oo7t~yR%aTqsZTT8e*q^#LRczo9%0MC(imDo$ znXxvTN&F^9jCir**UyC*u8NK_v6Ys60r7)stTfz52#FPc!wfr{OQs99Y0@`CPcveKT)!!VAqbN8bYl<*t>k3v=#6c+9siPe9EI;no80OnV$*V5$Ee{ zx^`!*#c~6=W8FYHs9dswWwKOuNzOuBn`$ao-4zk~xRmGCSfQZT(7=NnbBg}Wkl;

hX|%0{Gy~O|6EZ#d zyM$@xo`QC=$E+$*@oRLWfX*=2xgw6#)hIj@tvVVzpsBj;%}Olv5Zse^yAe-uO1H4$ z_D}m<$iPPFWYS_f4(g~yTdWkO>DHCuA4)~o6U=Es~ z6|P)JkDGu1>fI{zcL8;GDH9VmDQUa{#hQh! zm)a6+56wh^A!#Zj9OqD&E{V*>C!H?y2YPQ_dGiR#xpVTu^SP`Vn zJj<`^!$5LPrggtsU{_R^63Ft&8D#7-mW6OyScj@!ssfQ!VSW1}n26Z`10cjS6nWMd z->@rYE>n&>EvsPW)(M5RkGtbLSHCxN!lH|QfWi6JL`lMwW@>#UAFq{-_zL=08a}i3 z|I@3jw^Vn5Ed=~X)&e2_5o4n*N1sR5BfeVXuh_5f>CVi4p=$+~D`x2~eQ?I;fpZm| zU~&16)|Osvio8mr<4u8s0I@Y7T=9eRdk8!*=(!Q3Xk!)~bz=hgZk_wS_OVna2Z@rf zl?TVfS%4|eEGw)P)X^3R+A2h3gkPJS_P5aL%hr{A)=?&B6Z_1)vuJ~PO5nx$=cxrH zQW~~|0rn%`yVJXnSyt??_I*U#d^@h3Jz8}KClhMsuxIu?o$h@EK6Q4&BttzDW*ukf zZ(#L{!7IBF(5kgF$jgy%ik6Pkxo)uI9eQ$#6K0 ziwOc1+I7mIRP>JHYqm z`p4uAW2T_ba%d<$b!9ZKkU$1%Z8e$n9Nq2ocl!oYhk%a2t4U!kFAeGc#J})cUx&31 zyWqHUORnEDbF>Ugbbm;@!Y!*DB>G_Nq`i~I3IlMxA+&U=%|P%1FPts;Op68>Ic>gdDJTB=aiN^ zy(e-2SLz1vpwmf~kJI47&W9x0DJifnW*UO7Iv?)#+RnkO80g-V99zw9e?J3-|6lK> zo%XH~Q6p_gNP}I#u>rk&>wJefZ`dJ-xKDPw)pSrV$9-Ghp)K_EHl$?1ArZip`Tq6q z<|2Sg@=vVccSKy_G;|cqyAt1u!)WE?L4^U|2*-!Zf0wseIE=)Ruwo5C{_B{WC6 z%>9K3CTJO*u&vBw@`pP;+3WXsF0<@z6HxpX4+6eGQ?b^rgS*VuWSJJ>$Ghzwf6*I{ z?0OYu!Pb1)=;d888)1idNqfHRw9C^8hU{$e5o$onN|5B1q#(uBCUZ-BY#WU1ZurD> zHDS6VNXUSX_uQtvA$rLbSXisL7Bu=B-}g7KzaBGb(DK~sj|5?!p!q=nRb+ln-A|Uo zx>=Z_y#x|*tTFh(FcV4s^csR5M^v?jjjH6w+d_{Cg{k%*R9ni_w2w-w>F)jRn=&+A zzefKJLd}|m0P;WKMU$3lX&L8e$?t=g-c#ZH*t@!WsqEp=RZ^nB0F|iz&Z*lDNY7pA zB5;vBA4;9Srt4S+U^p z2zcq_FTqTHZD2!(KT1)`|56`6Fh(#dtI=S};-fM!(r|uYl}dfn(ZdM5W`pGgj&_7W zH4CMjxr~exzQIfM=(ouFWQe zOH1?Ee5i#@(Hy>!0Mj)C4uhsf*jh)fV3|aa(OoSv2gTdC2#|HVsCI9UHmf3c;uYzBgc1T@7MjSD zExJ&Y_`WmOmIjEERMSUJoZrCR(kY%Pn|iY`=S2EwqYK;SYtV>0Svrpwsx_1j=1hOq z>r1`Xb#e`|E9qW#x}&nNL$=M38D{*?u=`emjC`XcqNDlCrZuGg@;-t#uA}ZE5hSZr z;JD)4VN-Iz@yXS0VhxH7@h{AN;me6&fNL8es#b2`k5 z^mG5Z0mzPP{B-!{1+MW{=gYc={Y3Uw*1iXPOhMu35oBF;iP-Av*Fd7M8?fvq#sGX~ zO5v)#dc)K_vUm0glPnsZ(hC^=CyOr>y)uhW#v(=%f`}H5W{=^796~yo@~^>egVrX& z@Si}A%wfmbhksD`e7fE4Mqyl4MnUjVr6ZZ;NCv&id0B}t<9XeS9aJ;ZSj zSk+o@gfxo-4>iJ4d0B)z6Vnb_l(BW!DC+q7U;5@~axIN3!t5FWhNIuBY#~j}@k9`; zI4IjupuBT(y2V~F+x<*5;veo06N=sRe>GS5&nZ&}p{*d7^Gn#GlWr)m~lk2#` zRF}o|Kv!P+EsF^EF_Ebn^-7-O~ z)?xa-V)x&gWB28y)6-kuEp*U2t?VR3kYmv|tM*=QEpQYHn?r@Lf#GCF8RO8FEXE}u z>0L)JtZ~460}uwX)t4ZK=?s2)C@7Y)`kw(foN`{IM6q>bd+ky3%a8azpeOLpUAfP1 zcqqELG-3e@$;?AwSl0(BBQWZM)Mnvsl{kr*bVrjE3YI#AXLx0|o0Ybo#x`YG$aD|$ zcJ)&B%-|2bwINh0xwve+DX5VQl_`juB__WD^q{xz zB_`wgjREsuQj2vn12So4-ybxa*V_}`*jEeIrd_3eW#nw!Zp19`00X8yDR%!$ zCJ#Y=4lWvp)P1rA)}_-K&Fi)y)xr6`$s2gtiWnNe;JaZ4U()J%$#y~RhNayb8%{`< z=ZLKx{q9e~hmacweL~Twy8N+O3bF0&nJsWSN7pI~rHhVU7O0Ov&;-5cb_RI&_iV5!-OlDXgcD^sk| zE-S-s$2tjJ`^i0He#QiV!tJhDq44=AlUzk|{utw6DkF0}!OW7>FdJM#=weWR{*87m zvK0=oWOOP^?1e1!v$;J14Ym@_MyWu>lo#%7J}CSHNY* zFQZoVbo0LPm3hdl;cQpSgvMfbkaDb~-k!m_tfrrcDN|8^5GU<~61b}>M>gM}TawA7 zhrV!#l56V$`dy2)ofIG-xkYqH&#m88~%O3ldf0_@>`>-B1AG4EM#lZlq)I54EP2 z_FOe+v#(@%@fKTk97-5i8l9KzRa+WXQ=UV=e1xQf~ooH@#@u5;mPxJYXlzo$`C=OgaKWt{B)M+D@3YofT$_OphUV59y& z|2~CaYy1KoYXXNmh8Dz@2x~Rjcc)8`FQ)~$d=Ez;GUlT4ybn;+5pUH$BNBC$r2#%; zOS^+^MCb_xn)ER%qXEWP+<@g?p}!>N{}S* z)zzx8L~YvMK16q<$+(C%EbwtNE2<5sq_^}l9UALF!QrTi+UUI~q}%{Cu3IJ|kk%O3 zm4f7Cu=G@?Oun)E??`>Kn;93E$5pPv;6W;Rc|NL3nG)piDhKMzQT>p?du7va{ z@(g$PZMkgZi=c0YAlx(GrZdsXfJvTLkIOdt!kZ(o46cI9D%sD z`hk0VoAG$BvcI4TSY${;0jIg&MgTM{TK8dISqf{)9S8w;n-9w0GUb#hpLXEHP_)N9 zGHqu)VJoJ8PA{e1%fJcdThQgdZ61;9>|y&pBZ_j+)Y#LdU3PvP8RM*4>-Dq=ipoOgB4am!>Z%^$slcYDLBkLMQn`Ls6& zGgfsh&6W&^w)z_JX`+~CNH^prsrfb@4DuK7H*d-~o2x^coW9R^7YKY_R*AJ%iw4|P zY`d=QAbNLmI30rGy2<6$osB$(ob2DWk9u9}p{%mO)dHRksUphSm#Pq_MNwSmUA|BL zx9?#!tR?eJwt|V6H)D+z*cY8Y@%C)E+oLH|Ji7NDj@UUt=KAI6)#W!`H>2`2hpmv-0x{J%>NNwUmooJNH;P{@Hw~UV{>5Fj)5@+M*OsQ z@MSsJ0))2dGq_31b_&74Q(kD+FX2k3iJ9P&;@(K$5TXfh$`nAb)(@t&sYRgjC#55w zX>eZqc#aYi;A9YEWEPg0c=1`O%-pAKC?N(oUqhc?h1;V&3H_p$K zovp5Dy8J8wA>dm&z&Flqbp@3I3#?M3Pyex%v$^M;_*l8dvF2X zX~6H|ZHgJ3XB%WhQ%WZCV2z9@F#)|EF`#1N@^f6a6bKzRlCwRU z5#Jg|r40JmVL`9^rKv3{*Lcn~1gU4fGSps4=KXp4tsyAe4iuStih~7=?{KR|?gJJg zYtSa)hwjWz-Bdi^1fO7Li$nl3Glr?6WOqUZVGz%O$1cE!WQzoeq4!Jn0tBQ8nZ?j- z0g&HTN#IG7jpktFo~F3ufHBq8^-%G;j}7Oa7&n1Hgme;6ICs`o2^V4j*b(^_$6BkO za|=9{xMWD8^4U$90sSzkNT-%G96l#JyS@tRU+@8)?tV4)VHV6(5$n|$j0jJ}Kv6xy ztp?&9Q&Aa-S}Wm>aOUsPqVjXkd3bOZ90KzLDNW|utz0LWBOj+StLqv4U(E+W%p{hXAJQ9pa*Q;_>U|i!*i#IaG64BZS10zJ; z-3xTKb*fw;=CUV{S(<1A~du2 zon#X@VEC^{tf+{26Js}4d)wZ|yrKBkB=VZsUg|gSmU1?D+BKAAdk==y(ziOy&(4wI z2Pr{q5KT@a3o&+RjOxs;-y#Q%UYdt$$|ASB;U`)j>U!-6t&8VUN;cB<@)oS;(OeeC zjvMLfTIw8PZy@_Rx`_1Y=f&zJIe-+P$w(;HF_SQmX7Q?jhI<>~ws{~a4|q!*V0Eg=?M`~lR)raaP2DG2^Cj7MO(<$-Y>uKmG=1M1lkbq0t$x!=mQc{EFm)C!jp~Rb zcsg)zU@E$E8x>=xX+sPnAAF~vafp(-|HmY8R-@;4b5&F|UwWn57}jIK#qj{Qr>qIL zFCpVC-=lKp;lG2D-(GetG?h5+$uBAjkRi45o3cGocQ7K!!aTC zK=p*yQPtgnU_A&Oo`+^D%C8$4`aj`(fO6G=I6+hMmGV{bq4KRQ8i;WAFP9PoYYXZ# zt{A*09vg_VK2!5oeur@14U89G9&yxE|-=DS|0^| zB<5E1cmH>ss4a-P8@y~YRgOl_(=$)M-9uNipK&cGO>UOC!p zQsKD;6NY^MxLJRfJ?r5tBbo)1#pjj@tRb>x<1qJ2L75kTa7-fo^E=uWQ54f&^U0YpCPxCxm>VLQ(7;#l#0&wsf$g~`B0Mu<LxIHhknHxk`38a zr_76^uIt<$EWOo2W~)%GAHN|ij2ltg>hdBBO&(&eVQK*}|Nj@I9(&sps_Z{V!XM6$ z31$GpFGM|et{0cM$M4P}XtrzY4hK6XWezZ&37FtYyWA5S!G6=PBoi?eiF}7j*}bYa z+pDr$N@&C+v>HI#>BCbz=;gEgT>=x(mcMWe%nZGX>V9jpJ=W*jNTuBVpzH}dxNGar z_ay<%R9Jw)^!tkGbhNNESOteVj&P&-6V5bALV&3+d^D2j+-*)m zZ^}PFIMKep!6-Y7-(sp{Sw@TF)jAT?l6h7E`rcF|XTm{7=$8N2JW%+>MOBnpa9R@| zoNNmBQ?pW(cowkfW(jkoSo<&cX((cg*@^@F8ZFR=Rt+FI#<1oT!B_nv71lYzQdA>b+7>xLh#-qV3kf>&TU@9j-w&l=~&_L$d- zK3oSxU53urT|S|A<5VSSqiR|1O}hdnSY@7{g^Rpkp$zQZ3&}K05ev2VGh(RYRI-6y zsIq&*QmoMobEPXWu3zAZG~bh$cKJ-v5L6P^gFeZ?CP^NmQqim8Lgz2~6;kowi}^_L z+ALhh8ZbJ^!nQCm{pr=6LWRply+_ee5L0l1O5H$}RCfO~{yY^=C@msSxosVG5E4+0j5pE)`Jw1k1h%(74sZ3D1T^7Bl*fn`Mq_@4O`wOF6Dtm3C88t1Me1 z)eVJgW9Lc@1uA7Weh%Q*e#hXNf?~c*d5hhIoz9)NT8aQqQCSQ`77fClrv_w?>Boao z1rR6eFtO_8?5vpZ82DX|3LMrYSMnYm3BqPD=}xu%#&T{!C`03{NPsx+Yi-1HGqtmT z2*~so=l!4?`nANxNjN$x)z0(X6*AS3hIYNk-p}JO-rO|e55pn z3WJwn=J0#Sc0urFP@b2J5GT12o-G?rg%D3!$pUQ817x5tbF$e`DM1_mcpWq60cz4s zu!phD2#DK33Um=Buh~GJ@TjYnU-MaEKL%_~4#a84NQgiVk7PT18Jb zmNQTezzy(oUeD<4FpHqE;**gikg`HxepBT^4PHd)6)S#n6o_JsB?Xx)tNTm#;5did zXogn^P-DAAFFf8j{o04s2bRfv^c?D$r&fULe!Kh7J|K;bot~aspypWH3`5vq#MOGL zuYvUp%>O)$@hdU<(=$iZnsZeJ0Y$$nqZs`M*TLYCPe;w4`QIq}Hp!$<_<`fk=S;{o z(tMJ|WOLh;lZ6zA|Fzo){RIe-3LSLedfoniuX2GqJ1K3*@@ryXp>KNybyCL4mEnkT zChZCwsd`on3BVFB`0%le;~vNoI6fdiyX;7oUZrhYJ(F~%HvM!LLH3k;vN|;!%JrJw zO2)?g;BRC&upnQd7+?$o4tq1OH}n|?Yx!AHWK+Cm>eD`=(1x3$;D;6q-x`F)b~+X? ziDu?HEg^sb@vDW4bTp>!7Ox(!rvH*^U#Y%G%HLMTU9RJGc16b-W|9GH;4RSfdQ8kU zluV*0l1-Y8`jImk|DP<&EyDt%|AQIv`@)rWsWrA0l{i4)Q@fjp zS^dCA`G+u~2f%#Xtx3oacR? zlrD^DH0y0h8M<339?&+n9@5VyRI(t_>J8ClcCD^(?xKIQ%172ddsxsg^LX5) zQr$Zj)?COp*IOPYV5%L?b<+~QxzV$gEz8O~lF0nxr8iiv*^+ejOZ^l#uGL?QUsg;h zbCsTC(SD3fCiCV~A~b&Vq!v0E-ds>DLJNEx{GY|~Yj0LEq{Z72E*_mm77*L)7|lw< zjJQT58RN!217E;U78W|F!2Z>4xFD{cX@(!$wP1u43N{^!+rBDahyltpu0r$tEFSRb z3)ZCC8upuSv|ZTfasMRmC|ce<4FV%~-2psR*dg8`BQ`n=jnEwpz9X=k);l4hkj$cu zEoh3u?v+6jmp8NVbK{21^_1JLgDU|S4M7?Z3qdWmh{?VPR~7wze3P?*0gxA&qXnMl zh@^^B)FHr7%+$ko*q1CFVj151{i70Y(XG=pQ~1r@+_zOm7YB&L0SZ4S(Y?!YD<^_L zxhRQlN?L01?c-^V13S=N#6exxM^d7s%nnCcmx&eB>{wo`;d-t$Mx#J``(u?MrSS?$ zoV73N4a)9nk#G+I!EzCD;q_?6TBk*M;D&m8j`I#0)Nm6uoBIktRiu#_>wLmK3p;Yt7ZEgTv-zckTPTzt$uf2_FoZ zEZRWY((U|U4U}>u_d+ReIji#(vcqMb=>Z#z;=`xEgF~_{-6s3tzdRCufs^Wl)~bW` z_p&r-R|73M>z|35k*L*yht&g;IEawGonSW`g-h4GRjq61L@{dy*OCDv*Xs5Uf^*~BKfFH{c ztxbdffYdSjIhBX=K*NKifxaR<1R+7WBeV`Ffj+nq01s1lEzu-{HaOdnz^~}To zDfo!m1VF!R7BdIUIMqkB)M%Wk#jCP--=1~*59BG#EbJlq5nsfXD7w9*D&$`lB;vE+ zpz6aQmqy7qVMjpeC=&NFeG8cfsQ+e49kvpP`L}EYK`0_AjlVL+P5%~$EzgzOjY5&9b{`gugu-(%8lOgA6taqOH zEZpi%huJeNu^z=?c?52VIZl1~(a`UI+07T|GE?DW7vT=%)9@qM)ytJw4v7scwbwLm z%s}@GXOrmGU12%E(oNv45X0y5m-#IEm5BNSFybVOUI+6~1TU`p9)Ca@u)Zz?`2 ziQ4PWQbQ(|qs7mevJ{96V*2B@5mlSrv-dU2?##>!R3>or1bR@Dsr8?tl=KLCY)yei z?ZWQ&87De4FOt9O>9{aXskk(55In#uuF zlg(7d|KfzhdF8*sbbyV%ho36B$769=_OpzMv{>4W{`&|9PAESH>>6@6K_;Q;2UmDF zmr@?@CH)<7%i{VbhP(Q9IpOX85)aRr(f$H3^D--dLUj>b0K(^oF0e*j4Q&8tXrwg> z^3ujMkn9S%Uml|viP4n%iC@4*W4=8|k&^fNLN;;En{@B#1L>*Bb5tJJ+Wm;$x7@hO z0%!o*TF{oQjWo876OUJS|HcGcK6-%c_GG}HVN!DnF@@Y6$8{HnRucR}eQ{~)7f z-VNMQ^m(I_%F|59(dN~N7ZF5X)Ep_6VdrxXR1mXFAEnV!7ycvTy0N50F()*svEXs! zD+|biZ+|p-=lNg5-O5n(&2j~&i5x*}axXwla*42YRKa#|-?Zw&2Rk#~!u@r0Qyfm+&x zB&~L2B(nh#AwMqv1#4Dj!51t$*E5AlHw1El{P$vF{+$h8_5m6)d87RimTuE;UoH>kBdz7f1!xFO)LE+I62!C>Xs-yQhfK`0oTu|U5zVHs}G z>hcdj2xgD3H5&C;v&5=+m*YCQqqx3h?JUO`NAb`Qm<6L>*911I{aI5N9vh1nw@;gU*Rs@aPhcC&UZes% zq9`8QyXejJLFL7ZZNi-x>wrqLr0EB!eSM)PCrz_dARVa};_gP)#p@YI=W8cyUuhnf zsY9Cl2Hx|m{O%Azp{9QN$doVf&wO|WYC$Pog(yVyjH0ec7psWY`SwCcSHYFvVV^;s zWJ`5H!TRpSc4tDw^8Gcdl{TbO{iWdav%Y>FN!{|Z21a~ZTo#PgklC)LSHEl3dt_{B z`jBF$m={~qs&=ThKr1&jllVd1lD*tegz`_EkXu!&Ya~GQJPBT>W@{1fXJc%hVrM*j zYgZq)cVSeP`JuH~3{z5`+P2akHccBslv`pVHJZ`Typ6Z@AgVXTmyA;r+AQa$W@oM-yd9F;N_T?u= zQpaHA4ae!sV1q+#g)6iU8G}XY*=B=25mSgt(K8#tqH!+<5}Xk_HjShH!p3PGwEfUP<2l#G;}d zi3l1&a#Fc-Kp=Cq?;DpO{px8mK*vG?ofOG{8Njio^)LjI|4PUJ(=a+>e08omeANTD zq_|JhRb6!s&!tIBtjo28Lu_9Ay%A5!qCV6ur2>b<*#gqD##Ac-K^o8ZypI=DnD>H> z_~94~oK}XlCM9*kM=t)DW|4f!?EXF8ALAH_1s$T7BSz4>t65@2aIb|aTweU6KEI5- zfJ$J4Juyh-Bg30=g}ii>fH5>Y?nipwQbdcY=HyemV_}~+3spu(wTA?)$C!#{aCA0+ zRNwP*LT5s~DWR6oiB}@&@4oV)g%lJF%}Lv{{>8V6!z zVK?HQ_G za+9t=2v(UJWP?@?IeS(5T^=TyrUL;C9L8esW$wsc7C&M~1LTQB7sx@*>NK6A<(zGO zAo5Bhb*6Szi_B!a6faquus3Um3@FizpE<5lfSi(Cl_EC(z^~+Yl`}&9{R|2O{#Qpp z1Z6I<`z+#}$G(O`#z(-nHO^3zww$U>G`2ow%5`@iY>1y12xtC**>&Qbfbs`W;tq>P z_(I|`7hX8ys#Y92L^APf+&?HA>qh0uFwtb|b2qx4x%$@8!Moj%L?5Pa2C#?Z?-CsP z+8$xkaH~d2M_8>qe;zgUR*Zdx+XN&}V0{E)YW1;45Lfb zM)b5^)JY%=#WDS{i=Pi0e!zc+c(U%;wU6QG3*RKNhYElV?-e`dY-LS1h^ZS*NFRhK zw{Wrh4!Dx2m14k{$*McFDSxBT8IhDhd*}ZJf)#kTabr{Z_b*<1T?MniWc9GD>R`cB@pm>V{qnY=^ES7`6#YW)g6x2WhX!eHJb5o-(0 zG_fVAR;&ujtDZ7kNX0F7wG^GrQ`S~Y6mNGNSp4XK2XmQgTq7OdgtPiy?RlQT($9xM zQZK@9KY(tyj&+1zgnEO1nB=z_NN5uX%*Ha=W?nbHK@~>=>QciBU3T<3$Zc~%DpVZf zA1O@r8Z=kEU`+TxXuakx^!JaM{)jL)%xeEx91u3Ygqd6VjW3>LE!PZ1W@?HP(w+q* zB0G88DMYMs4u*pf`nFD~rx6yIa>UpeeV*z;{^45m8?b&8{}%; zD>-)Spu-ER+X9kR<9W=~PQ*9id%EC)MceMY7o$&h8*-B>Vw^CEiTON{fI{+B?teRW z$d&0%H+;36b(Y!Pg(kxVH#FSNxk5K^AE=l3s`%~Pgv*|26h;RN^(Lum$uo%B^C8)= zRZ1x(&Q@@fYvks>XY6hpsaEBLYm4#fi3D9p%L(m%lJuFczNIPV2%^lqGF>nSA^-$| zcr6Oae81(rOK1;W9e?YQ(gu#py=9?z$DzUGojjNxZfj*8vJ&)k0d0Wqm3#iyWmyj5 zSi{dGrLT6`Wy-=LE*j3u#)Z_p!^to-;;$Acr#D}XzY#MpMH?f)3=8)26y4`CcPc;J zMOyB_@Udk4nDXqIinCZzoiCF{?Y8M~Dl@&)#FaggqgQ;GrLbG`=UyWi1A{XD%&ro5 zmZrXQLOMx`+ zU`$w|K*T?iijax-8#K}~+J<*ix@8*=Y)%W8`#VI(k@NVi%=8gyW^({0OimRUOe-H| zWz8e#FIvCe2cxuMk7jk@aTvcAb z@;+P9kBcZ`IXM94z4PpElLWMea|sgbrQJA~X&!X}pE#;HVAkl0R9$LK^(mlTuH+JW zf{X-&kmob=JkTjKbNW)pgo{o0)sbPAu|E|_o}YaFU79VrynmQ6tEL`yX-nTOLlzc| z!ZRnBqp?LkjNWu6V6(;o0UmVSBWbl~A-C`QybIhs3`03k$L}<#WQkvP9KH5;4VJ}m z$Uu4O5x$PY{EREHRScV2>j3oijp|;I<2+>%np(WV5a-Iyh?I|;E)2DMi6x4hlHBe^ zp%1xo3gp7vZmZ+A%9D}62T>*lCSF;`icq70KtALKIp*n1V-d~gVEnaz>F7}A<%nYa<_6groL8hQ_+1j9^jistfoYb_%*Nirk^2b#ol_u zw-*n|H8TIz(^}9O_9-I;IsO@OqeTv&`(#-vIH6vo|%FlHsPhvQ?p-i=@w?MawI0HuSasx4gfV zaEcuHjw?$3<{-3v{fMI zafy{-C59WHkwPp8L3s;SoGpGh_(cs%?o%wxp;8*j&yqJ~C8Lo;_mDgb1t`nB1`ea;!;G*CEgLwxZhLVIF^IIj&H6@|HbhL1TPC<0D`F&-{e6@~xq@ zFAgJ}VBRZ295N<7a{Q24xXL2(+##YaG!0k}w3>h!e{EkGMTJj_<=<`lig=`|#5hJB z;a>c55Qn@dTLBd+`qU@He>yV|s^0u{N+93Ku>uD33YEATBQ1cL?K`ja82r`uwea-1 z2IV@5-|Lv#Xk&1vkNrp~6dh4pW2!hzBG{9jc-thElVq|)3V=@J!E%?pZ-61laTG!y zV(@gxcgdZ9B-UQ@dcun5{97fea^Fig3yw&ve@M+&RL{C12K>bJ+r0SBd8 z856)SjTuPY+6%W-}DFcnaH^ibHO{!!>;9$ju``5$<59xAL;V+qh}fKkvZ8 zxF3Dte7yL{*uRs^-eSyH+y-g4iV?693FY(M1Fb)nIGr_6F`EZ2=1$)fdTE7UmA?JB zgxfaL10sgwy^O@NqyZXzF%f(GC-^d@39}srB&A5{8$?&P(ndXKV}i=#iy2H3`r}o| zM-$>83|SE8dZSS?PX7^aZCO0peq-^L1)r8lw$(U@{`77FSrU#1JZ2Etk435s#G|m7 z*JEb7-Y*~6SYApy^GSE*t!-&Qt3U&r2zVdUZO#lm)S5cK(aoKP*(w^ry+|#}e zZ;;oTlOGhfASbfibk0x-bqK9VzBGkQD=A@Da8P9l^Cf_fQ-pmPaPf21Ih2~N*!TBK zPD$=r-F7G#`z4yl`2uazu@U`%?7tH|9O!IFb-(j>63{>?EFXeE7`Y23Ls8#F07^ z^xnWW5{2==XE?ba0Q%Po6co-jSwSE+2c1st*Ti!D7@rO|A$lFiqn^rTutzgDofhzTAe>!oNoKXgd?0yN-%Y7l(W*a8Pr`cD_=xp_e!t}3L;sL zzQCPmk1kiFM0MRqctx=o&1M5gc&UaKk=HvPlT~o)1ENo|JK!gk1#qwq zo~g3~>3*Y$i>1qcJ#L-OG_T!}WIg7MgFJfS?wyg9#Gyc_PPUxNc5~7SxKUk+B4BWf z9xL#shJer{=+xEZPtSE=EVi>wt=%$p@MHif&C6XYVRCpdiP3mb6Y}0MV*Z0slZs(K zfe9slf<~BQh4Bp%)5^IdHnR2)B^kQ5YA#Jbo#%pEyrssyov4#}I$XMZ4&9)A^I>2T z5>P*@Mmn(UBCb-pJKf57@ETkWsIMDWAgXUTcRPqMc& zYTJFI{tG*(%CI6xx>09`Hz-DxVKc|NQ3HjK3|p5el)|3|20Flrc!X!3{9N#f8$z>! zJUw*og*}J|l&hz|RrtQO#Igc8FTdynh_XF{7(En3J~$K!4v&%FLQGG8&;+_&hS{R~Tp z(A%<42%lYtQ_VL7CK2{6qMRRde>gy+{f@xQ@Ne3=z(M^J%@ap^cx=-03s11%uHnAaB1- zVnG04p0)Kf6c*5dhZIVTp&khr)0K&AQeNUyigreD?e`ft0&IG+1IC{!)b|+XQ z&6C{R&g9^T%sgg;x#2>TOyMW1od-c;7Kn?7wp-yJMY$_mkTRc4jco7jLshrLnMP9ieU} ziU8BSyRyM=0w#Af?dk1 zunxh@xWz*n;=zxsQib_k1~G$H>Ch==prZva85DZ}5WytD0^~d~P{9;2h@)znCoE+kP4xU_eWC)2pxXkH)?e*FmHFfZLfka%I z>hv<(0fb3mh;WranC3tRcK)*w-Yv568p4EZw@j*+BREBV&CN zNV(APtfukg`q+Ve`;fK%*AV3(C6ut- zzUR4r!Tx-~lVy+bs8qh~jSdXv{zt59cLekdx08RFBO+hHFY>fCT%is?k|)8u!0mqv zDAwQJ7g$zl30xe zzQdb`6XmOM6OnNGb<=#oqQndAV}K|}ro`lOEa|(;B@{Vjbs4g}n?wHS_5Vn8WT#!SYi5%eTL)!z9uaw8a(C{=Uk$^6Q~S zs8sVqMLuF{bT`~vxKx3N&t@u_UDzmeO95!4`jvAWH!k8Zy^md8qByn6jp z%Io+wntmI6u^Po2I+~KT9<+bj8QT}#d%%|fIUDTj&IS5-81MU+)r%}n$HlW;JARdI zlW~Mh-D;JyKz2wqxtQ*4DNbh-&0ve-_@=)9fNSIFF zXon7B089W|DG|mtVmS=-#aD=tY-28r-a}a&9oYEL$C2qR@4}Z=*L1`~hjHie@4ikI zjS=ERnKk_}vepP4^M9JKC{#QmaXcee`Ao z1{R#f-pFwkfe45iXpRh7^MWvp=;RU*kw_7TE2(vJEx-s3Be@@M)!mr6zZ>&G-*zOE zIjd2)&@LREkeo^a{BU<4*F|jY6%09J+EDNFrEb|YFLk>fAiSZ41PX_DXak9Hj(&6V zeR+J5;dk-27ctEe(N!poZh;=fmT1=`#zm6Hf96`2m>_#&MzSQBm&>vT0{__@+&#fd zH4w#)1&nkr;yuq~exdseN)cJoi})!!?Y6-xPRZ@F==2}wnfo2Bs>l5=jx}Bdu9x6< zIx~Zlxd6k&1Jb%(6C`1!mAxmD7TL&G%U6W&=ady}qkA1j*QW;=LZO{7bo>-oSmQ~x zOX_8Gv4qYAj>>WOp?I->tFz_ISEgFR#_1a`inG4`hTHqFIhcc`$WzI`Ef z>dC1}*$ov%$?XX}wUIbzyN$|pxT=rkn~`qF2GwbESeM1FVK+q%OOmN2755{Z!%x zM|(MV8HqQ7e2vGeEiCR!`$s1H7hla*^nMv)z7+IBrCcNxK+Sa!xe%5-8**^d`o$vh zw94>YZ8!5?rU*v7Tb{A3FVP54!?Q4a22N}3JORhRysH+rU{DA`hUcrxbr(Agbx$X zFbFl9%!5l(@APqzEYsSW6#}&^*;_NOX|J6%)Fj&B*cW?fMBkmqO(|6dOw_aoMQz^s zZK>DC!@{GNxgUTb0b>;B)?TmQ_9OZJHE;^m#UY9(hTesXoLPVg(b~HPsZR9E9**Wq z)<2frlLvQ-u%$d+ufgq$vxLcA7pZ}@VGc?q^iI>xLvTSU(#AUiY3{d{LewE zf5c`B82yDc30l%>&gE^PK-CHE(*xIznd5oPBx6# z=fwHt?(;j>u-(YxG0@2D?6TgVtSfCks(u1^L@jDD$zi}^jr#gA+;=&@|MO4^_1 z)TRdHMTT0+XJ$vWWNmECLGi@lwRZ1+f2p1;BXin3Gn+HbTlXRPA|%F2A92A30A*=kiyzk} zYVHgJn+CM_*`6yT^X6JhQoe@(RotOOzQ4O9kUZd+Tn}Z8%AVP84j$R}k@|gsScZBP z<8wrM=yW`qX7~=EW-$eSK0!bwUN@v>zpdZtcb-Kc&PCd+(_p$X<^U6-hdch^&f6@H z@tv%jz0RxU3v#sve(0Nez0dDZHlYuCA+kY~PsqU(9zgeqD5z`jqFWo3753=5f5^M4 zn%@&xLES`Ae>SV=_U#re?Ss}voDzysHMPuw6#WoXIis_PU~pi^nVgMtL6KRIRLG~oEsFYBC zOS)cT+zn32tsb`_nMMl4R0kzdy%ExWJ1-d(GJjhFRY_ekr6F`IXz+5E<2MOaHwz;l z>Berb!>E0el}nh5@!xP=6Nib805*j$xiGo6^V>84oUApPFrzqeF54czSni;uYQ;ny z#`+Xr8M7))gl|*h88lsCoK~hKcwb&Sfpe%Eo&+I{Lu~i*Lg6?3Yt;ItD4X04{CZ9Vxjnyy#QkRh)Qchj> zJnFuI2vj$Lcd#%D$-sQVqXn@dpCsRlU93us$Ml%7dS@!2)Y4^519ssnV95X)irnHW zTz9C64WVnBdt4_&O*qWTW2xowzEM69!y>J{nNxNJ`R*DxHtb!!rM~y8jI!sq9}= z`KNv4S{_s?a!EyPw1ir_Z5iyH&$=`A``rp1h$BfsEt1>nR^HChvzT9t5o6pea*RhrJmV%I0uBN zxlsS2Xs^VnsQ$0sPo_GawM5W1=YL`UatoYdD`6QTd#qqRI-S=w$5Ee@zSjz zLltbO_o%qdNc)(+VZ&~A6>cH7b0>@)$5_IxM-Ilv%{g$W4{P$)4_#tP9GR`lEC6M*P}LH44=7OmoGknv(Kn6# z*gj;R((U8r(J?F7rv?_^)KRmT(a2Yked|e6*b(`d4Y4>%iaP^8m1FP8`^)8P%*N-g zw^b#)rpHlIHbFbZ!MyV~>s;udTCEU9ggi!vfP2M4tO_7hIDOw0y2t+BO?$};Y=Sq> zT=vd%auCytPfpvs3)k5Y#H3u|Q<}LySbu=gu7+0Y5a^o*UYSnCiRf2c`TJKbIj~VJ zyp3wQCRVc-yW!}N_zz%M21#OUVY1MUaqA@lk5bbt^)IxLZytKQTA4b9@7 zBBm?unjXI#m-`s>JqOAjxftLjxVyvobePUnngsIN>8WGJ#0A2`0=+2g?<-XVv(;@; z%Y|sR;8PVXyIM#JlYV#}AG|tFj}{JmU0}|1c9%C`eUg^r9=ncDk%hKcOuzGT9mBwA z+%Y)S-k`Nf2Pc%wNrpk_>W+#fWDC{Q&mg?an#c#Lwh!9pREFxcbb|TP)zafu~8*pn+J>EMYg6yB%jY$vb8>a)3rvBEx#TVRs1Y0^u!b=aX^u(}t(-t98Bv_*q zQL)YgtzT7Cd)9r$GyqCJ3su%wVZx8!$f2G#gGK-86@&nD>|4=qX8uaIsMJZ z8MmPlKiL_QMHfefWrq`&X%kB*N@c`&5Wq;Wmm?%lUY0S^=*#bi0}f!99EoXO$+r$3 zRgjc;+b)~ZJwL8DQhySd9dlqUxyk|B3O1MT-&wy}jLTdz=*2 zl11{|oPz}+IA&v%Mv1-0alAXvy0t1Cw>uH{do(V$?Me?5xD{aLu2fu(T3Q2`ueRou2xliNC_cKi z0vzoY-+AAiN?%+Z!;X8k;S$P`DGEh8Hnmh^oS8j^d9m~E)^g}yV{DzlB}$QqF?C16 z&?!c}v5gCm@V)&&wBh64;Tb?N}lcH36H1y?wBUv8cnZ$Nc&AsSgaj2o0WR4mS#;s$*4kPvqS9pttE-> zon#M>DQ_5+5XNPa1_zBILz-#sX=a52B1CJSS_W+q>aX@KrlL^v@Xp{?ObU{J(Q$M_ zkzow21a6)aFDa3#I^p|$PD!q@`1|56dRZB~P=yC6>4=&#Mly+4N6F$ao7q0WrMbxkqF1OW3 zJd;Xuht*{c>4ZohreFOH^gqT3pv3F&w@|KX*fUfMbC+< z=wLPVa7*<~9+>{Z`|PAvb%xthyDgyV4u9rI0o-DiIge{`J@;7!GxB*Uq)8*G^bI?pU*CwV8=dm%S66j(-0YZXNlrM4^zQ zh91BEk>~Cg+@@xjSf}WvG4u)V9Ub%cFC5 z7Z>|=?0b-1cd8ZLt3Q3R-2vOsWtdF5fS-y~8cL_@z6R7ic#H$RUQ2;(RU;qwWzlx2 z3V!1JI55bpYh7KaqzlS6Ub!j$@qQ!))#M6#eAF7_w=?K;L}0X%UDLjkyMgfpRk3`m z0w%lT;+`FrVwvJxS1$Tsj@~|X|K-s-;EMJ8u~^SJ6l@6E(Y!RInb(c|9$&zlm9G3g z6!mMos&tUdF}w+-)@=v-!_oU_yxpYIqqHMmvhy}k)?_gEkLuJkFN2p%q0l=oQ-t+f zwx8`q$soR54vYF=MF*{#x%E6^Z$dztD3yQEIKD$UQ)k?EQee$Q%lk_xd(s*%)V9&n z*W^i^29Q)gg51e{`nl0`h z@G>>udfnp%sZXtV<|RRyzx){o>V!E=s2XFANkA#Xbn&v|3lt_>o2XdTb>+Ha=_lUl z)_CmELbV7O)P{|O`3R03Fz8UE$bAb?^u06CKm=-SWy@N&hBV!xbdxmeHG#1NiUFzYliHsGZkr`pqdQ3Ql1&w zruh!h;|aFIsOEZ}W&z`>rp7>?G9j0k8!dO|*uXFxR!;(YxD6{!7hB9c#eoD@W}#W& z(uEHRdKS7-e>Dcki3D<(Z7-g&63@H`26V*b!_D-uTP={0=`JBdXcF>UMnHG_GkTq&%1ax53GC zP)~KG(WVx_8tB+FO6&(_L+K;>2JgY#rKws3A8V17OFRMI!0GCt_xZW?&8~MS2pI55 zIif#JTUh^n+}=e(je-<@`&^T5$qsykOGQdMJo=Do=?02ciKpp#+YQ}CpMeMP%9eu~ z|Cp<<>d0EeU0lw%+{wfVx=)9Rs4ywhvN-969!Q+XM<&T`HwL;zS+R8GfZMQ#JEPUDk5q)9@G?afT*iEobVyjK} z_yj2$;t}7E%=SebA2M;v%jVsamI<)nk>hNV;rJ|O^gpM##gol}BP_Ax*k zce=Kyf{ zs~3?%!@cv4^?qt%2xkb7&h@#yMS2vDw7TE*rl7hv%a4~hdxkn zh2$-)8*}zKH{Ye$b)Y7r?fL7V#G7?1dym2Mf|UU=_KSwCnM*~Ik=x=gbN+w=wwN&K zJGPQBQD5W;o+j5dtr8JKHDQOtrNe;1LwND8^J zpmb+-(*Zj?pNvhr@6uCxb1|WlKQhdbaw_Wnc+9>nh*&fpU6B@sA!EFsIt>7n$*uA2mY7}i~9U?)0 zMCSKZ<6m8v)kBXn11qa!18!q}_jQCZ80zil7*K@qkJ}wwr)&LPa2)IGYG9~3+=dmk zI=y@pbOAW=c8{ZC3G_W?ZsQ6{hi^hhYBd300I3^S-1S0$((i-J88Bwu=7OF)N#ww`A=c_BM@ zPHI?{|Ih}IRPfNs96Y|ocgsumeg;}>bTaQ)C@I<~xjZTY3*BF91E@zjQ0U4nsYv?v z>BC=LLmMEkele9)?!Si}9bCqSY<1eBWtx4<_A%`}E~_j@%H6~Uzxb<*8B_s%*7^{? zWFY=)S-z7JD`h##H4g&9m;IZ9&cBW78P`NxlyO)uF}%YOK)b-qZgkOv&yw9^$7SV? zUPdlASp>f;9UOaCL;;i5?S`0B&r5jTr;L06Fs^equ@Q6@7v-`(rmwzu1S_tjfT}hFvSiv~1%oS?^j1y9EKy^`bZC z`+*$4cRZRYyXY@RIDCk;3{%*DI3a~@mGx|AW|A}2e<{b#)+7ol zZPg(@WtDu=05LIpY?1{z9I1(m2yQnR?doY3Z85R){7zWRhe~xJ{+s7nV07W`UX7ac zZnik!FEmxiFYuy2+LgEYSm*gl3@E-C3#b3SDh|2`e~7HNXot>bHmS`Q*~gjjsuWLWH*2TDVzzfO~}iMgp)OH zpE#kn0oH8nGYj;G&A};+Lz>l^oV+w)$wOoF+g^&MrgXShu1Rf zyJQi)hkk$NPR1=V61YJf*x}6&fJdb{^IWwraDeBg$x@t?=j?T6)`40P%No84@`-6O zv55w0hG*7gn27P{)8djLA{3x596wY8C*T&~4}aWs^S=hvGjPJWeL;Y!_owbRX)AUh zIG|&k5pOVUC~Jd9hzCXs5m7~*4a^9@y8;E%tGG!2iJZ z5VkfMvN*{||Ev8DSuXmM6H~Q17p*ww?S9F>*P61QmL476vqd*}ylBv$pjbMfxs(1F zE6SZK8(q6B^JIA&>9Cjj?M%ffM%I@3>;SWPxbUnx@#m5{xkjf_!X`OB^}5UFRUW3X z!rG$#R@I3qJ~X~`)8!!lK+CErGn1Rdo|nAVx43Ib3Z)M`T>ZWgfX(XP)Wc&Ya#tW0 zrmZy5m7Nw4+9UQ+aP8rpyT8rG$!fK@ivxe-Gx63iXn1U!>-L{=#mkq_+v#NM7a$PT z!Fm$5*0qu(Y8;COa2OoK_fj{PtWrF6x~U=41efys>CLDOx!il>%>N{U+nm;%UL`AU zhJ#^fdI6c)>*HFy%Vh3UvCSlben45W!@SMoGt1S42^)t4=v5B4KVGLO7ahS`urCH1 zut|Fc4|o6n+~^#WhL5($zun^V;sBQ<+|T#V@#O5kcq{xKovS8QW34{O=DE->CkSr~ z59<|Uq~Xe{nQ1>g$guSI7;~){WyYA>m)mcIDu z&KtcwGi{yehAR--1y=O2cRKW9LAXM-1mu!s<;^O<@QvVm&}zVH?uQ;5lhB`YIq?8PMDnaFBzmPNZ-@TuIIOZVFtfxHg2><+$tN)CjYX2 zEDUk}ISeg@CT|P^YiwtD{I?@-z7mH4YNIkoch?t3>zJ?bvcI)4X& zmqD>W)ES!RCQfDK%B6O(zU}K;0|2jaqXB5FZ_S`GWxUqo#Q|lPOHoK{^+?!2H9k$J zWTFbRZ!1#J(mS?Bbek1!u@f(nSw2~so*9|hF~Xu@bm8sQ1GD#*DtcT};S$#kL`p(= z*%A_1;fpf8m^B7Y6T}o?FaNOEUuDnF^k`^w19FwsNHPky4pE^;LjVT#4(2ul&nR63 z<4Z(8>R*?>*v5=zC3$0&Qh1jRKYCIp$pFogzRy%jiU$*|)>S9LLYZ`h1AfaUx*(2o=;_eo#__Ss-qi9Db6S0{eMwdeBhBkiD*3mc(Fy zhZs@VL#ya(#sB`u_0liWN>lEGw)Q26us#S!FT6+Mt-hZV@{H!xVJ;3-exeA_<@V;u z2uB9WxeoJSmq*U8w7uZb#ybxAb6nuq`V=zsNhl+A)x z{d~-(pB;cHJ#st&`SJS!cetbQUBJm@dovJhdLb=8!Y)w}868vWN}?O6i-_LeA6E^H zdM<*@5;S>%Q?p&hObKNgo=Dg5caJt_mC$@JhGYmiA^D&s^^){>IUtfk>ga5o0 z?C2tj(|Me$ZP677)T{>dU!V>~!)!5xV9@F(c>_Y@riN{z#KY7ZMv z^SbKL|E77H=3eG@H6|?U^;rfpC-rc%8R*B*US%Y98rY6^3Qa08d0N2h=SfA#N?ovq zzeLtNp?(qcjOk04!(lb_^rkkfmYvuGEN#ko8Gu0>+1f`D2Vr*uoJ8F*nRbB+Tu+>-H-kE6Ip4(L32d7skaicnSa z$E0EPn+qw3*b1#B>02mV8xeN$hvB)kQGoj>8Fv?&PAGe=l8S~;;rJ6UkYL!PY%U_# zLm5-I)f+43W0MZ-ML zFfXtq2calUtK%iVqdmzXIsdEMja4h!za3z%GLFLm5M&TaqK~si05j&>U*5qsgxO;G zf)A}*qiW=ddD6pwG!1qg#|EV)@^nU6FxrP%70wKD=oArG@6lYWV0y=%@wS)B0UQ5GsCIYz$+fQD zgw@Y;klRHO-C3g+;xQp4ax=92W86`ZU;<0;n(@CXq%J|W^}D(%W!;Ebkhje{o^i`> zR^*%xDXmHyLDak1*Sw@LM8@LdB(`4}u>ej$vA?S;+jCYMem&}eBdkvz>Z^E@OaNy5 zR#<4|RCiFt;k#0eno@qGV%8O3zNoVJC1}Ie)FXrUGAi+t2CTE=3hgV69>6%vPCBzX zvv%f?{s8G<>pL1o#<3M6e#FHkxZ)UuAj^JL>OfSX&3dfs!d;f-oC~>@im!T&x$Hr@e0tqR8ce#3y73Lj}r4OaXu)z`LH&4__GM`be={nkpJ5Z@>3gMOSz-QOCPp=o>Fi& z-jwvRQ5BTyV~TN1=p?$!2;`$0CI_)0>T?L6^rHN6)5?%t&(l{OA1IXLhf zZ9cKjHiIZF9=e5)K03+dDB1gKV{PkL6i#p9^YcD2$- znY*s_UQQ1L^ldZH+>Xndg;(v2;te>4Nz7zvNw=b?(4-S8Y zq4dNh>j!8BT1sW0ocTZ06$B5PV_+-bc0F&ywDf&{3;eP7ZwWpD)WvbakgD7NBikLY z61e$0rWe8sn%=>J?&yw!4V>9W6)Moh=)!zN^^AWzV<&X$%kI%c6Q#XoFeId}0)ZPu ztr@a%wPcJ=i-F9^)8@E0b6la;x@5P}F5U-zk70QQb0FQPS5y#dJ;c5A7Gi!u`{S%6 z@)@RMN$gStcH~c|TA6!w1^JO~Fc+kE`Z#FK$>RJlKCn1Sd`<2Mi0{eAhmTMG=r{-H zscI7y4pX2zuxS8J0VJ5ynFWjjWA-272~cIp@;Z`V+cOI2xp{I9E5?j_x-6ltHNo?+ zXBI6Arvbv*KpCjJ^{?(S3|Cyz_BwrH$;nw<=kc|JlsBO^aXZ^~a!>K%59mf{l$=d` zUhp643P8+tZOfNMCAl0@WzyMo99%n0jA((36CS~o42WXJ%=qBwsmHqg78oT&(->tF zx3~JE`O}w!ZX-A62e<7NK`jXtmT`M*@H~x-6!Uep+&JQfjq(RB`_Y#thB6%0rY;7< zWCF|VURShQErW*dz$F0Z=}Ks`qBRlYqgaLv{0(Pn7CTji4uETyIKj&Jm%Y!?C9pY2 zb)Sc|{YRMuNP#!y{St5yJ!Qg2DYA*(8j^>HszRR^sQ1)RQ^~CWy*AXz9~>~9Qmc_t z5+ZMatW8XoqS&b@M1+|9E{yj=j zv6BL)=cdZpMjrU<7_6*x*&-Vnc)(0ecDtR=-6(G*Oa&X?(A=8bqwv(9fhgvjj1Wj( zf$VG}@_;@O-pajV>X2MAuR%i;_&FD6wWh#aldy06`Hg8Y-Z=gw3FgEH=*8M*iTJzz zLRrGymvDi!#l>C))p)ey`lM|8zYRAHsQ4g6Spzs5A2(nM_cm9C+I~ZZu~H#g&q_eT zTW83QlU2z)`c6#4<%3xB(~s8(TsO2im+|D!pUO4zVK-cv45v{}UQ#A5{RSiu)u}g#PeCfOfayb&YhWj?z(vMY+(s1<;>*-{6 zd#W_jd3UK1Y8p)5yJx05=s735$vunu+7HG;cgqs85(xP%a*nqetS2=-;&})8%KYZ} z&79(sv_7|2C-cS8tX_beqzsj!X@Nq;)-DlFU};>qMJCd)`Av$)68gkVT4%u;0CxQk z9Qx2=8-BClJCA)cD&pIVWW{mr5^!<31G(f{$QL{bge4Oo_m6*39xFep8ng~4nVNh+ zu|zg~&T&_kGe@i4a*)@_dOvJO(!Xw%Xeq!^tSow21V<>x$wjTynn6uzJNRH1yQi&t z{OY@hYW}5{f5HZ4ZLF%?wO!lI!=o-|Wl`|TEF9uyK{f)4_YXl@8w+t`beLtLfN9P0 z-!jrn92o(@;AcWn(GYvAx}}}%HcRFsw+#kHBJA9LPsll&F_%_ye>WrW-@Na_UlsHU zMpl}DF!7}yRygihB4eC(=U_=^&Hxu9ZAy_X4cf$Y>=4(PkIOg%H!iF5&=%Oy5|16s zYppmVW%haBk9hA|Ib&aj5BYWU1_9ciU#@-mXnm9vaM`jKxnU!hy{xL(JQiDn?93KX zjQ^=$BF}P+qkBV#;=Mx;lTgOre4yx1JukoiuNf+BwYJN+NQR!-LLpHBr8ovP1Rm)a z|D(=&yVbd^+Hq_$x?{<~97{hcLTO6};f*`jg6*Yqn$1hKg-+g>J#!%wC$tZ*I@@=qqvV;V zF3XcS7%w2oFkxKW5=12}I*?6$n2gH&i{%Y@Fd614F`De1Fj_}=LwRWR9ig(m8YI9FuGa@QPW?JU~>uUl6m)k@#hSEu|Eu z_j)QIlsC*pHdgJ~{W5`aNQ=8mU8mM?GAyQx-}V22O!bQPBESnX+h|JFRmBd9M-%TI z)0G*xG{LVO*R9i?GF&6x)}fHVMV8O$qv_qmyP5;qg;t$p)xa4{dr}8nlkw3)jgX`S z^d!Nrzl=^Q_b6l06;S!FdE2bo?U-V8tJ#Th>gGr5taU!6+QNW-Wu8$6V-OqBi~)@q zG8Ooyi*kL2tFvudkWlz?K4v+$S%jJfQEcI=al75gL)+RN=N6BXh=<GadNyh0j4xD*)=H#dDk4G?j?!te<)Cab0YUSfn6pRn z?w#*js}gSfrC|%nw~yKix_e;!0!gbp{6U%6s6 zWk!Ubo11aUd8zN5b??stM{r?tkl&=ADlz5=2smj#0013mL7J*bs6lL*OcYE<{u)G| zOR;6ifBunS$7G*gt$fZS7qE6nt{U87i_R5#7=fqu7e?r}(3ckn83xb(a1YMRs;rFa3&MPN zHC(B6>?AW_I1XCxIXTV}0K8*XhaQJr#jyW_Rw>fh=HFDe0it~;8gl0~k^qsjqb)p| z&;s&rjl0gkpwE4Fzz^%Nn$S75I^fyrmMAMEtESFuRv@XZzq;UC14N)=%>2xia7W8M ziCsx;>qB>ToBfOWf|?cGub?GvDGk8+Y^CbMUKT_q&RP#O#3q)G`@HT71e+PIDA<$> z@jb@5xhQ-;&wbVV%Gd}iR2}7S@k6SFnd4@BkONil8aUkyZ=HM}BPTpW0N5N#5r-cX zx_HKzMO;#lwW0=WY%pn{Yu3*w>R0Ae(7O|9c*3I7(W_BH-&-Q?40|fJTqlAwb<_j~ z!G?``9kx*5#O8u}n`T@#ke1nNF`ko*%*76t+l&r}H+^)ysD|Tf^#;Zxf&Q2lb-4Lm z-u`q=`?zuQN3KEHtD;r65kTBV89J#V0UB_Mc4PCCdjCKeH_2mth33dAw|}^&1u+8C zg&J2?-b}tO`LIk0I9&8diiiWk)1+zG=H*UqH1 zu87`i{vNS{kW*3t-vr{`WfDRG-pG7{+P$*4(o;`JX!`r_bWq!MATh?)A+n|+oI$KX zd&Ysd^j%f$H3hV2B?{`=WjFHMg60>3DxmRyzcH6#KPT9a_jWFu5zZY_Dy!MU^}q%6 zy>pd}2uIv`S@rW2)%{cYH%Hx!t{?Dh%eW5rJ5?&}+tbSpbpkyiKO zWb~!>=^7z^WLFD}QGo@eDs^7j1x?cIGZ1`Diaq`HD126!tHS61*Qtbv+f1=k%I|&3 zN|V)dn#Msu5vm-4Nqm=1j%J*?4Ze2F8PxNTO(2jmj={EMrjc;|3hXB{h1kP!dP~%pLW~j@WEHFx6EnGCwMV`x3Foi5_BN~6IFXgR(0;us0%K)l6@bX09kv_ z`DmB9=CB>{thYt&Xf)$SjHGHJSi7;^-XOO;2+I;##Y1&fb7_slamdtBlh$8H5Uk>M%cW<^X`xLI&iSU%mQJzK z{wD`;*^oNm(|9(Y-S=vVn?j*u*6x{(y;bHUB>m)|H>&5sXqkd29iIT~62RKOM2gY# z)^9`vsJqZ)X_2svA94k=7}P#qb84qRKH((z1rD{lM4)Xb!&G@ zh@ht&)*hH>2IIH%X=NF);drIlE;qzPt|y)>J+rP6M$fJg0!S4Q0A!X|BqzTG1Oejc z%6)B>_Pt&Cj^F?wgUC2`?lG3SA~3u3ao~(StXB!y=bXu#_6(u}Fv$dDRS#1dBW!So zUemx(Hh>SOE@c64yMi#+e+i$M?<%vmQ1Mr;N zqQg|s6pK$`_HJj$6ts?yi{*-2OCQSjfhSIwW=W726W zlxNLKtNfzB2Jtp#mCrF=0!xzY3e}3k@eEM?-U|Ihk+=qpS*c*Gk!Wv9NcHTXwCR(o zO^h*_C^`@R9wOc@k+%Z+uz#4%F0gY&hv2sU zN7QTr7RXq0D@k?kK67x~cO2T{lnKE!;r@U=g8fDw!$|vMSP!a?<@CrMTs;@kn#y_L zN7I0q36!)_2?!*V22^Ykh{UiK4TzVE`!i6~jo^cvVuW%rf39hj0zc<>kofT=GVSif znm|5RXKCAiMXgnmh12?}EI9K6H z<5p8ic5HMA6vEI5u4DxO@LLh(-{ztg?$((j@p;xXG|-L{3!{ECnUhn5e-`@-#$fGF z zeFw|iSSveR(4)`*)ADuUu6pb-i|YQjkD(`dXb#XA-9;uww@T2obR9Vq_y zkE0T?XS(b9w`|Y}5@b5AY}P!W8lTHKEbFQAviZS=a#Ir5=G%<;t~=iNNXK}6SIu`in!MYp>l&<)^H zmh`~WPy2A5AhWKg(@xyHjyFSv#uT{bX+8FN6A1&O1T*azO})bqo+NUs#-GXFGVAl;#Nh-7KPR z2VJe8<2$Kul7P5Ymeg@l72K!VxtEM+PMPuuKX^f|^v})H48u4ibz;XN1GC#m?y9da)A>Dg_9DORl~SnYtTPHU1|le-)$G} zQi*|#w-KpzS-<#-=e=sU+ZfUw?&j1K%*AF82?kx<6~r_nlpZ7Lgn!{N^s8*bhv()k zFgA>kJzBu>^gt8nl~JW?3g$EsoDiq<90GBKmwFTm$>($)vKw?I3DSuZgHQ-t3}9xJ zYWIdG%W6F)g|0C{V^DV>;!99UXozPLmPK)+wHE8J zo`Now{g!SYy|m@q?M&*$g+8kdN~BzKR9;~xW!rxS6Ws1$KAO+@gzqfLuyL)2o5^(4 z(7Tj|&i{##Z{_6dpJFaPxWG@V|8w@b2)^bTkgRV>RU}?H)FtC!?QXu?O&<&|`k61f zAFQ-k_M}h-4?4jPhT;iG)^eEF^l3L!u#YSGK=Ru4QHjKlaS$q;Hz86lT)m1E_wCC^ z3?P5D4xZ?_S;pvBC&`%ZxE(co8vK7OCeSp_Nh5(Kv+v zKwq3Mrzs8VZ7yC82}xEwNvo0JYkp^URZ_TEc^$$*^SQB_DtZ4t8joCzU`1Pxib!Fu zcRV9Tbwjs(I`BY9#LOMarOeGLsMV|&-r=Fm8DEHQD`8`#-W^X@_)pHmSfS)bgkD6K zth)|(lIix=M$Cd3|7yUtynZ@_i2^~&xH{w)MXLaSR%Q6vUqE2M%M)Rvi-&b0T zK*APh%)1IvG>DKcd?zRE74NOwWm-|ayX&`5%;v^K_9HizpQQ(<2}7RXsfCKaf6ju&G)Z-sKH1nMZ%`aGmvs(S~P|=4nr~6M!Hw zzy~>Xe~CL7c;=Z-QW5&@V}Z|oE^J4Q3A=X`{a2^W^0)yDn4e9TSU5>uBDdy z%)xQ&(+tbw)UoL7+MecXv}&E_z=;R27Nh8~9t_j%$=~_B&a4ySPS(-ED-o{3m_IF3 zrC|{ivc|EMkXFa?N?j*E<$8o02sL~cNi*e(#*3m|Bk!#A1ZYZLYJ6O8DXHPfGugvO zcSY1N_i1WpH-&KiHdzI|^F`r++}TsO(X%(O1D*ZllY)ra{A2@X%r`%o9&4CpX!yT! z3(VUsHEx&Sa#3{OfJ$M`MOf4W%AoB~dYY`%p3z|yTlJTe7;U6N=h~6?r5kW)sQrqk zpKksGb3Bo(Jz&<`v2JiCu(*^iiW&EslJjRe-_N8puta{Z(z&+38mBvOr`~o_$$7cu z4{?pbK58=Qq~tTDSr!(kep~eLJF*FqC`PPYV~QYrE?HpHs67a~v82Mo6D=K>GI0rZ zyKzbbgN=(FcBM_E?}`j_@1nE$H|b>Z>TF7ZzS}1hWKNyH^rn~-9HDe3p)7YmV}${` zjS5&Nev4BKc#ChG4}{CoMwMiHK4mO{hmhyQ@!mC4l9WGB47W`1r_l`i<<8Ld9{@w= z3_WmKz0gciqGMITCRJiVn~s77`cWFreq!pt_SB^-_ke}07?zoN1YOZ4B%DxSi#F3} z9>Kv$@@6ig91&ZK)aq9@rYOF+hQ@fr(YD+}OAK$XxjsGPjNKQ#R*pr4ta|4O`TNU$ zj3Tk@Uc*ml)*_uPzdJouH-FYwu+g1O?*(*a``MDBX{Akt%vQ&=3a3i7*xh!M%LtF@HQ1Lw5ep*B7BuX$DaeHaXKT zycAqioCXGgy{BGug4#Jm`LG;DHDnDDNac%q81(~9}!2m?1*xnzFw4^jF~Zp zD$|oSFkXD=3E;Jv!xnj5uvmlG4%Rl($q(&i4>(B!L~TfU4p)l=2jYi2BUbZxBrlzA9t z+wTNpotL0rF;)d${=^+_2{g>0Qky7s;NZA&i=$mrK(^2Q`P6)QeyvaD2n$PWaa3~F z=ato|7JUW$d?~lX#Pn2`^?z&3QD~XPlUDCziX7Bq@6LRoi^2suj{(M1-CKC`YLViL zon+(nERX^#c-dChC~Xja9lBUM7Ihh&atrp@tH!_8hMZf7)*u-yWaA~0fW`OrlU)kp z!R{YR?NFW@kW2qnB=+FQ69xoQ64kRyhI^)Ih#m z)|aW?YOPqQdJYV1^DPWA$+DtD! zEH_k`YH}M!$Z0yb09%8KM{-NI3qr6t1Okf8#4(sTd zxNcpg#-b6*KoDhCh^C3la=j3!>NeXCH%2})P6C}XxC;fT&~6IkL`!b7U_N{><=cSm zYp%7(LmP;!+;bu1=0TWfD0bqwwaBsiqzWfMwILd>cUaoA9Kw+}Mp~Jj6$X4-j3dwi zDt!Dvn$uARZvM9vlW08C6xiZO1bf0-{MY!!4cW`B`-)Px(f;PFEP3^Tq9 zQ#d+*QnP!zXVDk7ixzH)J+Dd+;J$c*^i+5;%~#0a&M~zV;tIS|tJV9AGKz+ojuzAy z^(1rQAJm!I+(~%CMvl&87r-5{hW-fhHJkhvQHEwX%c?X8ztHU&AXfhC{r=!#w&OQa zv}k#z^Yk8{3dxbldKA0LB@e&Q|Gbo&rEFCDQ6?mwt4~)U8q8%d3|0`RL6D*j zDidr2_6|6WN-G0KE+!z(0{ONsYbkPpRQouA!mox=cCbY8ig#kNqI&#y^` zNeEz8fd~yn zW>5*#$B^4|)e6wc9m+30BrGu%k9Iml)>f4OWh=IxO#kiZAmcbrJv(ONy0NQ89k1AY!esZP5eJOCC1Y)id=?7SvrXbf02I=O$o>aSfsBP z#(T1yhI2+vSr+f#bc`1fO0Ft3K+0k~;bP=D2W;$3pJ5?^x=|q< zogHyhI6Hls^CWK|tPHWe1mg)8x0Z;T%l9-v^cLDd#Ym<~e-u~mFNCLMj0 zk2I(2KA9Ss-@S%7YsA!2O=Pw_2&K!7s|LErV21QP9HxL*E?d;FS{`kGhK_WlM(QH; zg$OQg#1h6xbRMo8)|y6+CR0#}*Jzq=ruwXa!d$(T9{=8BmCgF?j3Bh_oIxIM@;$3q zd&?gGam>j6Ow_a=9LG5X z-|P*PE%eMi;*h`&X8;$L68>8CFnVn*Oo8~KP`64nPt@^ndO&IYk^wxu!G zc#aZq#PMt3G({J&?Uzu#WPx=ehbnFN6~S|q^5a2IUK6qDV$W^?xcvYfW3P0Y&FuqB zySeh`hWDNj4+IT)02V^E9KA#bndEx0TJ1P%*MQsc$d!AN!)@qC%ZwBzW5G(7Y_=-Ik9$ahH6*T1L zW5e3j#I~ViMMFftzt2a$UO1zSUz6yo2XJ~01!L+NDE4gujS@23)7@eXDRf`OF<;%( zV%kvDu%8NqK#z-{UMbkO)JXD}CM&=1Ta5%Wl1cEP8%kbJ>%Bp#pTRN%`M8kt#mtbw zc9fRw5cK~NWHqwUly;!txw(Y6uTyK=@1`Wo&A;=D*cCj>G5nvHA&!G4$2Y*bvdwLG z9`%zfF4fM=q3KWi&L%Px`I311(-HmEtOLx50xqvCK}N>G+mmCPMq=xa)Q&7bI3(|p zxk_u~dFYc`g!AD}G_mJhT5!bW$mD`jV1)){lnpJ^P|*?-8k+(1|5x z{+R>1mH=pRR*8uvxO)N9ZQQ!`s91qt+hf$dOLmqZY!P?-bKnaSB&HO6*bT&ygY=5u zR&PX~(XP+O3g8QelBt$dlMU!7229mXJ_I^~I~;@gX={_?+Nv6gDVdrWaMUZH9?$xw zwe2c}_#ODY!2e;Xji(c8BQx@q#&VeJxE_(Y9))Kh$^pfW2~TN!YGWHGNA#;<<08CF zfCk0m;bkpU=iqq{%03lxw5MZY)YhFv>?^_a_(W{{+~Vioaq3ara2mHyZt`yD4;I$4 zs1OqO*<+5aw(Vu$lfHF~1Lj;B_B2nlgb?hOh(r%Q;5TX7Nmp*6L&&|NQG&oIpapgy z2^Z_-(hZo;59eu~U?Z=|kd~wzytCk=a4gJa$J%<~NhZ}glS=Ogtk;HYAi-oV>mt?h zR3my^%Z0guTEQX#7mQe3rU4^@gI%z^XDS(M)3X#}cMioC+f>fxD4QkW4<_{J)2fbM zSKqcz426?s-mKlrg?2OBo3Cnij@LxltC1YaPw;SbzL&sD31xpl>McWer@=gj#@-B8 zDh0Blg-x1jTnf={7yLMy<#jaF&CPnkGtC+D@9R(pvW4r5lH%1MFP%OaL`jYxD6?lL zcd&z?HAW@QgepRUWM%X7qTRfP+{QkMBwl2z?6@x?@wpU-p4c>msr0yQsJaX z2g%toE&AS#1CuLmqZevz9RA^+AfA8PUeuVJVH)^Nb~o)YSfrMq^kIteyPZHlW4VQy zvRM7^E$WwZwtj6dKq?{*a3BYXcB#AmKScIjUO3f_a$#N(*H%yI9KtdW=)czs$mQB# z{t+64I&1bm$N#-$K|jjJ|G#D&RQ|)KbkO@YoMJb?4snAN+Kq|AKp7;$7*cRk>p=uL zsX7zF-@E=^;mT0xHUaNAz-Kb9)@l^*5ZCf>JxmvvKPlYw<7WhFko%P?a^8_moTWKu zdk5fFgfvD1#EuSfPB}l8&#!~?ZqGZCj4{~l3`&m1aa#5yQ4)SWb@s>KUB_3d%`j4R z&7vf9ItZ-HGGQUt7T>)UzD4%}-IP);l97^I8_S(H zBYp{hu@{L#s|GUF#*ggpZTEkou($19IivPIWhM_j(G&9);QQCrS%OSfs0(1gdBEB1 zS!3Kbl4io*{7}-A+q4KvggknvfX!WyTszt>uBL1pRHq0qzx>z|`GRo6LWf_3peXf^ zH{oXI^&6_k3#Icv_x1r8piRux#g(ihh3+P>E^)zPDMk%1@Jav`&I|z%itm{7>bAN) z;p1IJB1nDZT-il4Kl*j4x}|u+A^TEQvbRfTV)?WiKLJ86=H6zt_ivUg5FAlGkXb}e z`r=y2|901A6ZB?j%??iZ4#+Cl2n?xz^JESdu&K*VJ z6frCovHiRfN%gJSiWTu+gJ|O?vA1%`1ZYJmLWREsWC`)YI$=9ad`*+ay5ML`71!om zO=s(14gZr}RI#fkXPHx{o@^};?@U4t_*5l7l$4qe1u?kHs$y%ne8r{omN}0-8*>n6 zVI|FGhx7L=cZsTgrp)^MgE{d}CU}?3Fuug5YDbnW8!+*b_$oq z^N{hrI#@QaIeuzO-~?Qmal<7%n9+CHBWT~=9P%{6ksxu^xHIIt!w}?s9|n8CYH8q< zBK`r3Wx%9K|FfxY}bSsgdwO?+3V6Djp8E|A8tlM#{Zat$n z;vWOwkQBUHhF<85G4R5T9n2hm1xY?X-?hvs7|S{OvyguL0cs5>^vHJzKVF6%!mH2p>mv zls5*dXdGC_|3+3ajhhZI#&i*0?0uU4Pb3u;2|X)Bwog`OJ`UwNRFwxgbg9QxXnsQu#6IDpz|@U; zspt&1v4i+i6?XQ%)^f$6a;~c^nzRZC9EorotS|haOEjR{NXC#Hrvs;vWH~@(ODE%8 zo)hl2?|-td=mBF-+AMx-u>rciDPAENU-VKjiQ}h^JdPc9I{e7AYOntg`fbuap;dEF zT*RfiF{gaH#q5rMu1E1dKaFY=JN}4-Mx3wd?lF4ji6?so3Y;8UV{mB?Pz0)JcY42~w9v_%Ou_AWbzxU7~CJtnev76rDo2yBpoP^+5I&J^FUijjNXv$Q;P{F6-D9v zZOT30bSs5fo52NA*f14DB(wLdojPcbVep^^AZj=Q{-3=e*6l#Qqo>Wl^URQe!Z&bW zG!)}ib1*OLpZmo%Z}&hpmPP9~7w9vBGP$^6@gV$(5~A)+-#6IpgAicEHGO#6bY^S7rm*?=qhhKpcB(T7 z2v1IAu1a6fq#Mwch9f9lw2`lx;DN`lr@!}N6pnCDBS#BzB3*FKxI z(ks$_7Hq!A@8YvHxRLI7TmEMaj=GtVhft=ksW3JUII6`blU+ODc~B73XW^&gkRLkWJwZD?kP%Bt!x+ zTIfQ*u%`zUNh?3heNGA2;)NEQ>3!5@{qZk4{1II4Hh&FIESA0SzW>^5ew+4>Li7J_ zo$-B5mI0DSBia`h!RlhplmqGzkjeR_38?hjJN37(WN?w9+b7DmIu&JJOgAJ`;;tnW zCrmdAXdjmou*(xeg6R2z_ZsmAl8P2MWxfl|aJtb%v&Fze9D%=brjn>JgT}h z4p8v=DWNacx%z>&_zNKG{oY@$sAhNmT@ueKPWz77okXqK&@mRmud zqMq+I1scM|ZGU7{a^V85Sii+hlEOme0@^JU%1&qQ(nJeb3^Mk}MIcD7N$ntrA9;S) zXEg{UD2T{`oR-^~H{+}sa{zuom~+Y?6w}c|&JUd%3JxDO(3A8J8CoJAI#k^AN=-vW z%&am_<1bhM$c0lJsEinecWyq9F2AQL9h*`C+1959`r+(CLadHXh_+5^ zUFIyz_4!MV6pow@M=xw3P=DOFW6*M4WVdGfD0|&LRABKaU0Yf02?WmkGb(E%xMvbU zL5V^D(g~x3gFMi^M+%+rnaJA}J+-j}-IUyA%M>mgqDh#jTw(ly1e4c-3;0!}03if8 zQ}z$HNQ0GEy90qnF05&k*Z#7dqo9PB=@NYJB>KH8JQ?Y17zDu}xO#%0EvsgLp)+fpHoBl#I zEqrCvJ&6237Rys0a#lt;fsm#&TlhJSA_#fmWO;c24XGVGh{1H}Df2q<^geS_le6>& zevBfAtost>-S2w6DBU2QQ`h4=nAnE6!lY#KJJ5MrkD38>$4}w-x$0liNZL!4?U}0J zfUj)Dn3Yzji-e0bXYRmym#uL^S_ZvkB*@#c`}GL8IdE|i@08ULrnffEp68_S$UvG2 zBiGovs>>z8<=Te-CV(Jr>ek_IFtuV-F^7$cVl2{BcU7RMAS!vlbU=EgnHG z<9TXmR*i%yuvg2_km}1(b-7X&VN3!w)RxDFKRzuFjE!zw%A}|!oEzeH1)bZq9}cf$ zuIF{L#{gSyEw$kjJmO%TCrR+rI?Gzq;vKqFyu6qL$wTau!BwZ}K@SWep6aE_<01D5 z^blFtPdyTFNZlb-*K70}n@9`w3m!n#B1_a|HMVtWrni$1>(x~_&HP{?)5=6tv{f}T z=FC}EGs71BeoZ56)5J~=MpR0w|GNv5JD!s89=rlzDBMoWOeWQlNaM6oezb$=V8`8( zMZ?eAYLoSD^&u#qvvi^@Ih61POAkTPz2%AD)jD8tf|1~T5?LMYuc8E3YAs+@+agA! z;-9j7rdf>AzI}OoGRFzCI_>5rT{x|mA`2@l5%N(kEnf7?GQllcr2x*dFavE#?`~~d z3K5T3XR+hrPppdjVtpxHm#`JB&hp)|&PCgSJc&bFx6C8wJ-&8gS$tY}MW+}?%;C<< z?{66s6+ZI@%O^t>f-awhsL=3*S(c<5=QZAo70%Yj<(FR4ouc!Ty$-8yWeK54#v}&<5JtD zy1f_bwC3NoY-WbCb;~;D@>M9Fwn=M$7t>@I_o<9BvDt`GcW2!*W!Cxd4@`U!5qTi< z5JXj;hPq_Ds#GTpZ3l1;8uKlz?{z~k$*1+ zDaO_;83+%u9y-KFg!phDI7qB|q;WnTi*O;@ zMOU*yWE6c{c?^4Jf*w`BeD!!rUPiQ{>NoijcB}jkh zz0*2ax_feV3UU1AN=(%k;HlPomvR8)yn?bdyir`;b00P*{h=$ZZ^r9b*y5EI(Oyhu6)Nj9769-&Razx%!Yi<6$Ci%UH54FMa@wZgS0*t8GuP~( z6r{bqRM2{Nl~hsq#L|P@q;I@{*{55x61<1oTOB&r&u)MouE3!ulE|gwqLxUbBlz@4 zN2mcFZcWWig$p^QfgI&DXu*zWPMMW9->mks6pgS?(DqVLaf{w8S#!-a)|hF!tao`t zAgXGN%NpwDC!;oxh7XqCPU|j*w8(+#e#)idhcu(@SHR)RLWQH3%N5SGvEsGDu;3>+ zkMXaXgW^0S%5SZ&S@7oNST>Q&{;XoeJbxWUQEC+ie4E^jL>UD)^U7kdCJlt*!;XH6+i48>cHl z_N;r%fY$ymcae`QI!4iBH;TX`|1rL`W`1Xkk+(zo^Dqa<1ysq1af_vwye}qrI9V38 zAUOnaGM=F$*cm6G-BE4j*{bm$6?}5&Ll5{3NkbS7*U~PwTZp%X?z~J#RSBNnt!^7q z-_Tx>-h_h$JQFgu_oRxj1pF$io{Mz%<-2;6gn>thX)a*}2fB9DBm?Ie-8K$qQuwf5$Dj3pgvw z@&U8?>)4=noA7F5CzF(!@VkLK90@`)z>g+H3=35so7qiV{`^?ot_YZI>qIqNUeU^?Q}OrOo@5mE#9 zMKhX`jj0!SdEHC{_waiMZU2jBkT=|vNHkwxTF7uIfEJRY1M;}IVf3WOtp)b)v)6acRN zFTw9A$dzm-+uJa*$)hVLG6uN4d9Acnck`*8!0tY^ZzM3;J~}tY;AaMaOgw);XqE!s zfIk2AnkcBSV0V7^ArwFhbtMTw2L!#k~3`Em5d?QTEJ$GE$&&!;)&<- z0$frunTdj`>aqbVupoTe5df+j1iN5G%J6zisLG{roRd`b_s*p7Q&N~v9;am25*KGF}KRs z(91~@$ARkw-S1qqr@A4Sgve2nwwnAanMaIi^qf8cRn3j z%~+z|%>6@Bo!R6GqCV^YaH1x2S#@r_87H00WxkFVy$J-ORv+>vi{%(yI@I8`=TF?f z5%%aP={hyXXIb5M1h`hY3jU>|k%3Wjt3p9{xMWS&I|*YH`KkHadG&R??!pP2GLpFV z8x%b;WlM;z06Rd$zno;OUf4>RF2~=ZUG|NS(xUiOw>pShn}|6pojZ^BkM;?R8)x@J zK#Pfw?(OA$(>LcobJx#r>v0(hw)o^>Xr#qeJRCKg=LUxRcDmRI_wX=>y8&zvW*Upji_lP!05@R zyuG4E50dLf2}6=B#EX(xGb?;M)C1&{35mfq zT8&TyP$;{{MsXl@YKL@?zJ92oeGG)7(vG7C>k#?gy3G#ASl`Lex+hqLE+xdt^H!pv z71c6I@8cfiTKs#k2}aKB?9+O7Dt>=@w#! zu}A>vZ))ct=;+^$mQE{n4NZD z|3Icp15rDZR=OkEZBgp1o3Y>Ex6HOeRuC7Bh=~Fl#Ll_!(C(yj%XIxJd<~^gWUszV z=qJzMPjuaN<9t_4K*-o&|F3os{urfUz$sKm##eaE@Ou5+`&I|LWq%qr={gqdNX}aB zvHa~o33MnU>J$gDCFmp(^Zxs*8+;jA-2j5S?p4Tky2tUo%0{C3w7RryF^@{ z`HvpFhrWxb8WMA~zaI8(rC`H5 zln*;9c|}N>nNp6rPnPIJb!Q-{GJX=Yz3Qf9e|*5VIeYmY1DKB`mySfx6+eTOf_s;6 zCAWt8k#cHnC&(B9d3*`D^lU?0YTbmUyxFTgY$C%5^=P33M6i}5;5rU*Oh%w@$Fno1 zUEqIXUIA){RtFHwBP$a%7r{kwK2tP==N_lEAlj!9W?Bl(+yJH>ucSB9urWQoT}G2^ ztQWE%%$*>Y#{UXkTAmlOAL?Fl)5$)i5SVAHV4Y5uuvzxtn9SAgtSXMnra12;ggCOF z<;Wt)NseQL_y`H(nwnW{-Vz~HL|)Y%%Lm{P$8d{Msxr|)rLgbt>I@tskIhh^T93ZL zg>YU*T+u5ZtIe!OO5fxTKzNyvIMZF$XG$7B*lmF_Zp!aae&+{N8-p<;)`}hNto09z zSsz;to0*Sf&u&C_GEB2Xhr@~2|Jnl*eD`39_1)!A#&jT?(CYEem@iLM&TbREOZQ3} zpr3?ElFG_I9Y|Te?ckBbm}ju>y)CIOYu>Ps8@&0;q3qjkYRgXzjufVyJl}sm^C%ej z8N_t^OM|RW&#FjCyQ*>iEv&19rai>_U+mSxT4A52y|@p-&?4Z)ZM*K>72l=F+bgk- z`tPX&QDe=}^tcwa%UkKplcH&*BHx$5HK+r}NVh)T{%L*Kxa3EPORm4ZK3Q@4eFG%u z)Wc}LPF8=I$ujfrxZCNLM!q{`5MGvVx2kCh!-?rJ>cwW3JE~%m>QQggg-HZs(SUpW z$Fe|VP9#29WlMqgp8dc7s)IXucldd$DF&6@a^Y4CmZNq+7O*WgkoKd4IXC_AlvW7S zyjl#W4QJ~xo_T~h?hF4YQx?tl>&&>OPL2HzAsvkiu}~tkh5d?Vi;Df%kt4Ov=lKYe z5Wa~pt68CEN!ZFOc~I#xiP>Is$Qo=m;c9Jch3h!vwszuPww8%LT~twyPx|ESE`Sa4 zDr8m0)YH+9VKI-dec5#3DH&Qr@fsBBtfLycK;d`9Z-2xJHBB_SG9R#r@?yTdl!gNK z^i_T{&Hz&%BccnCV)Kgedh*z#$rN?*F8P%riIblkkr>J@dHR34sA)^zq;BNO4(v4U z5l&&5f8*f&s9UnzWTt1@#1N&Jr$tO3t(Hc0z?;V+57vQl?aho)SkaJ(qt4NZmyNPB zrHN{hP{=uvD`oC(ryA6XG}f2xs5=9DZ$2`@eTm)S^?lnp*oC>@F5>UKcG{}GOCa(-Al9yi6s7~y2xFibm{#@K%!lYFjSBJzKj>Uz^6(jp4$cs1rPF~VW0KC zj!mRE080=dyu4od*|dn85qPtOykug8L`o{!afb8wW@s z=SdXm0`XFql{32&ry=jM?bmPj%a&`#dv%reiJ@WJdzzS=(hHy3qTObk+Q_e@fZ3Ib znC_|S8@#8Nw78G|$9|fI<@KjH)#dEZNG%>JnCV|gfbvNn0RklqWD~*9m{QzywDLkR z$zU+FH{SySn2>L(Fs!ivSQ2+s&IlegD{n}4eKLn;Oa5XG?tS)hpAm3zL;_p= z_19GE5%L2m98>8;)?djqZxYr#Q!`d6;QMHP$G}{ZpaKQQdVDnGe-L1OIbs`HRWTnZ zLb+ulXnZ8aba~_DXP~Twk6(p)nb6Xh-*FN#wKKMnd(NddZ$#H3jj{S!E4r1;&<2yE zF(v0dB*(p|A^FNA9o}nVHw%_N?X5&2jMXgLmu9S<_)C4L`q+Fc0{vBaI}52K+#~#f|&0xJ_Wq7BGJayjr8hY&l5kMsv z1~W%HGdzbc>xu_rhpe3YgCxHyt*sQ1JYf==xBmK4N0$A{YOT@rKYg)WTw^&AQ?^n; zmd6*3Y~uEL77{`#SpsEaVRFS_)*;R*I*Fqo8h$?8LM$uFZ92oWtZxb$$Ff7>BjaU6 z75^bIDo6UNEGp$s8yV7ahKE)&5K&P%zh1-o#rtJQsY_ymAa~vHdyX~nsn+1h0(k-O zP!k|X5c~l#5+AMdo5XFu9&@V@JPdCN7h2kTSDA-PDVi;#GWMBZB9kXSZ0=yRK3ROe zqYC9ML&nKp?A+YD5==H-^D3v_g@&zJHV|?71(}pgB3#W7T#^q;cb|>n@sa7xzdMpz zZYqOGUsdwyx+1%>{DR?2bs9|N@uf!4B&vd}a@5}XhXD6X35qlJ8FXGExFr;acG zUn#0v;Q%Rsq^{n68qQhNqGf?uU^;mu^?5cT=+!HlRFJsePVLRbR8ih8%&Nw{(?Lw{ z=am*^(f;}^Ut91I(=V~Hk%Z?h$y>Z0ONBMwHP~-P)l5FB4MqY75t|3;eBZVaWim^! zaZb4v&Ox;-qZnvEFYEh^j5Q4^z^gT7`mnXCRrZlHM)$LTDscsxYYJvaw!a6>R;i4XXkR{1nUEHARW2Cboxc~++1s&OJYeNvXkIZ;u1X^C0b6lwW+*oK2S%XK z5`FG%>phUT`st-p3i;@xOS0=~7N;K*$a*ebyFp5BD1^&KXDC(KDDRyQ=Z|gX4=v+; zHOL{iv5nsSjMO?qN4JL@&Ua;>l(72GUI6=TI|p7O3if?AZ?0i|{afF28W0ngk%lJ3 zVbwq49poQpvjH}6>~jR7y-0qkMAzX+Rqy$$S!nZAX9ZZ*WU1j+N)Sy%w1$+!xui&I zAbmA{7P0&ta+tEMAVjfZqZiPv3L7ATdIfx?TFTVz4B7$k&L{U1B5jopbkBp-GB=S? zbuHCLHSca)K*4p1e|K_|)=3^X51X&~_7x)z(4T5Ix^QVpK@x=+sask3soW-}290Y&x3easmP}=% zhVMx~dI1AI_Nd;f&CUdS_*8O$VtrEPTy*g7gw8%`&QFO=02b6~)1F^ZCZ99{R?W$g zs4=3;+TTr9NI%Fdb>W_p2I4h5^MkAh|IBCH!%P{8x=bP#-1;u{;ddkytP{a{rcgQE z62lp{=Uch@{PCC%blF6s$VJcX_O_-e=g@w~AqZbOU<+blW2A^~`g8Zw#MULmN1e5< zV0b`&f@4esxYW|;IC65lruIF9U~XoL3Fcg$4cMmkijwZ%y;y&P^Wo*Qx*bbnY4vvj z!Lj0eh+eI-AKR})Xr~3szmV|8%Ap^k&5u_5ez#MyHCxiSuS%dCeo1p|Ll?yae@eu| zc~`=iqJg$1!D4`&i9J!LS1U~@W(q)0WZ9BI>kV>Y0WzYhl`zZMAF~YCrnvIY%NYi% z8=tBQth<>sEp9I{BY(gB3k~5s%@65`dnrRF?QYsI*WW?EhdpuK@rdn%t1!9&P?Cta~h2UOTfLUnG5* zk<%@fCn6=UuZ1sP7qRy>y?ZPqBmJqZ+0*bxR%*itA~MonxLaY+lF!yCk4g+W1Whk= z!=_mce{O>Llg;jN_uv2Aw1e?K_@1u06eB|dAB20+Ed4ASB+Of;+rO_>``NwoTch&7u7iauHub~P zxX7tLG3u2An^!C}NALG13Va|l&Bw%2>coIvcCa%W+$j0pTw;^eH7nt#Sh)E#Jhe>+ zb}BOzK}%D1%ufS#2@jEivAL}8JTEKQO%e;6Y~b2gw6he#BW6aVB9E|9M?mpOl~4u>M5|WkT}-8dN8b;P3ZEqg!<~tC7ZMX9(QmMoQ0whm{KqA3xsET zn3i72v|KPoITFHyJnPY>RzLaIxYYEZ)|6!?l%}UZ=cS9#rS1GFvZuiQGrtpVdwXh$ z*XkS3t?w&pG1!#J_sy{FIlbT;f}`gl&Xjxi_r0{!(SRtE#EaCKyn?dR4}xjBFiKMB ztN=L7akM4{%$cH7OSo3@^&fQZx8D-HK&5wl_dT?&{Y!90IN_E7VLQL0H)ZP@S=8PlsK&!(ka&2-xoWe%5qH!#ExozPS_G?amRC{~<% zEvWRg{MXJruH*d^*a3Lkz@G0T9N3hmtrGBiiU{u_c#?IB96zaL4-8$@a?F0!Si5We zybdb{+Vek!ck4t9Ss=eWh<-$W!*xp}-Bv|~UsR9x$2Mx3AEiq9YS9R+3$W$|wrbzl zMNKF>R(>x@2lh=ZQdhm>XY;`nonb7~gZwzrCVrgbF9HCz$3gf(s7YgsdjiNvJ=qV^ zu0a(FGH9^g{$-|KQ0X#FNA1I&Fm)`FoQ(9*+o?P*BJ@9gb zQ=hFcvg!Q>6ZSnk8v3nZj(wmM_@tA}+QOjGr9 zVpkMK2BTRIw?6L4htFnK0LJw|mXa*ov^=3B_p!)Nj01uWIzV`57ad%sMT*68YkS_( z3YDoL5XDXl>*P(0fFc5Iy1ZdewK>Pt@!H4c@C4Q5SE6~0trsoXq2-_1F0$cUMt4Yl zbgN%S-l2&wJc7x8i$%gLcWKHU(QMN!BwhmjF^o4tDv(4wjd4R09sa)GzU?$D^Z=Yv zw5*Y_i-X-&SKs~bSuoe--FFo`QVcywjmHHtkY-<{6dn2;%CONs&XPHR*>Yy)OvM^s zzE4#@ME4y2M6|gcg>ad=#6*Cn+#2;s<%4z;c zpRN)q8w9Y{#==pGR5PmgeL!i?T*KS`CDe5gzo~;7De78V&0LgO9#~qGxGzzK{vS_G zUG=?X-kTbZAje)-_Dc}GOr;#U3~ZPQZm)YXOXReX--ZYSb$uT}j7ht9LGoVqPv)Jy zshd|&``+WzP`+97tjm}|H0}f+g9_X>MR*4b(Ij`E`sgl5N+?y{6>0co-Ke&^ zUC$Pg9{(Db)*U}z*J|sl?%$M7kmQi)Yk34O1hMFaO#Z{q|0>lf`1!oI4qMly3m}Sc z#A(hI1Lf!zQzPQW{Da282A8z~?hE+tc`Uwf^@60jjwyu#VF5jIM64MZIz;|w?ipYd z+3$!(_Jw?E(=Df9RZpy@Pp`*(n_evt4tRFqAmqgM17-BY702y?GJE$!6R zw0jE>hXIBk39pSLh&#V$XHGNVxv4z^DU{Q3Gzy+yTRV3JJ#t1F%7>K;j zUmQvILPnrGFOZC#&|k8C1ItDdJV)zml*hc;yZy$1XU|c|fRlr8D+^ksFSb#cIVCQD zYyihr!;y!jWNF25e|h;=w&A`GOT+-QiZxFs zzy?p6gJU3JPAEK7U>|?VIvYYOigANN>9$l5X`N6px?ufO==?T&SK$#+>o>e@xS3+~=jW!S19jS&}&A1j}GrvXZBE-NqrYtp_zg3hK+( z78hEYXM|5%fcW{+)C9-6_IHVX=1;wJw>{NQ@qbx|PB-nQu!y4&)6~RmW(`Zelb%su z3^*S1XAlu9{rtpyw%m=ov1p!4;Lmez=hPt zy%5G&^{|2FCl3iT+WpFZn&G+f1*tAy{6G>x|A7p%VRo;${ zwT@ejqwjml6U?dbiUlVmnZ|>GlZo;R#>4frny$({gJCI?KIY$dRwc8&`R5BPs`vHJ zBzk=;bVDlnS5VMl00-c(mKqFaXI>i$5?1Ww8TP#>@&S}a{vc`yD5Hhc0F$9oJT3qi z4pc0*jCqw*ZOjMRS>muJ33ykZ7UMFI3E*T*cL@UXFsKKGaFGPZq5vDpnQde-cZq*6 zsZ%*-^EX15D6`x4fUqd+tLfQ|XD!D|Y`{9>ArlqU<2S5#7GK7Um8w}S8iOs$O2ZYd z=bT~J85jB=25m&JQ?7I>X0s`bxs>>>TNu*qfIl5&i*nBu2sNtygXUH`leO?l6@=Whtn0Ci^cw-bt_MwpVaPUa~N5g8sIQ2Fp51`Pw5~t9^3{Ku~J;#-9#^ zpxj@=?p%|WL>e+WG61rr2Ybbn0iM&qnyR{9!_EW0g^92#I=UbF%9sxCWikL0TVMbE z!~&zoT(GKX0l?ucLT=|Lgq%b5p*hncq-#@!F|wDeW&{6bWG-AQC%88oXHXg7y$lUz zSM%Z511f47$sqCUU>e4|fNj>9xn1%3hTI#HJ_>P5;w#F!y_Q#zqqM~-d%!hwyrPxW zoI-^xQo*m{!2Ysbo%#O9&LrVvDe{7-KQ95wY2`w@9m7_JJ zQ^Gj%_-Z=cVsSb6QidIlL~PFEywZzR^kRjb2CyOqo2SAV7=&OLj7$ru(zl%wP$ z!6?%yC?c07fuYbpifS>*L&v_sQfU^?PslOKXXz?=c#$V{qn`J{+lm~yHFKLar>!$t z%N}L+-_D6C?~mWUDGJN23HIc<;ei>g;KMMassbc;wRdZqn4~2To@S`FiAoC~DNAKZ znRD0*m~a&QVlbg1s>hJOvJne7^g8^d*)B#u8Om& zHI$md!ow>Y*6miV)@%q2lKc0^It3Unu@D%WD7e*7-5lncop97IVSw_04WCFf_QP9e zq=~Obtm)Vt&3_Eo`R(X0w29D>!CsHlR+u$|5zrG%q*J>uHIA88O0f9r7LK$5?~r*A z(JDcJ8AYW3DmKQW4#WqdYF{=7iNwsszj#5suN{qLZU1_AN_N z6qEm_(DmTCwFy5l`Pek$;uV7}z_B59*wj8R`N~>3T)DE@H#?_4^#1^l<x0AO^4oC2eS$Y z2Hln~tsYi{7?P|fHkDLH*yNCkPg5tGWH=>?kG(IfL&|(m8RVB4X?rSnUA@*uTgLTD zca(TVa}8Bjj5gir9JsUajxrV#}SJj-8Ys(8?cW~MN%QO zPEECGRME>Q8R^~T)@Y@9j%Ho+wxDWKf7eaI3J#$&oApwuQ4HsvKfescxt*%J-8#to!`OuV{VAR9p!t z{z7v<$6&>vg94!km4aec4{YSQSP~nWM6gK!PqiYJu6hnDg|0DfxB~oEEBGViEWC-H z0Tt#p9&%ruoCw{dx6;gpvld-F(>AB0&*!8t$6x=kcChZfSsQphv8-z6A;SawjuiMz zJIqQ&O4B@uF;JQSdRbKK;*NG->g8--y^}k5F;mH1ubwIO>HB(k{Y~$DAUDqA=34#( z$bvujPq3L_Aj@ZY#`Zn4rC3H(@UHak`8u>iBIy|ypiUM|{4ia#OagI#{T{Y)`4UV+ zRS}_KtZV?~bxf{{niTEeRa8TmWlVWJfQBU8>0}5s4fMRHSKL?R@r9?0hiAT97dL%P zc#tvU0t^kVW|C9!bqZx|u>}MHNlJ&Mn>#U{wZFQ}2djmGKlY#Y?_;o~-Jq?^lt{c? zE?>(}st=1ZK*Gyhx7jmcfsOLUH^bYvC3LG>JrFfYCp<3`NuWKvzsFU?o+s~qf9W#MX#Pawz=}T&I*M>T8&dWq8qoyrWFp_vk1%!gE{)Z1X-B$pl zajUjv%}JmUq(Pc#)V)!Qa?`%_2A|BbU_ei~H&Rmuk)WNCS4e8>?5o<2vyiPI_2EQ+ zK?X7X#RE8mVG<36aK?I2Gcxl2ZY__fDbCrr6ctQw9b23UTkG_&W>#akM0?b0@Ezf) z-(QWZ)zMOwJlt;U31C7UZ^;P#0{UqQ0(JH*lJ5w)06U?RjsiD*mzw!TR){{Pe47z6 zcN@}Ruh9_K&XnkB8jddiei9s{^LS7*&vIiH&T*8K>ZT^G8+LVE@BTw{s^ze^OoR+j zoCHG92y9*XEPL?@S}(hUvOjG95fPj2DzrT;aGF%7El_m`Rha{tV*P&j?L)r^Z2B}s z>+};YgnC^uoyX*my7y@;Uy(oG;I}7^!pA0{$7HGo0PAUHBb(3a=(0H2n%*Su%0=(& z>SFC69}pd8x9TabfehM-IzN+1fI~ZAmDB!40gTFJU5n1Pi|2u=wP02NDX&5&S{7HN z22SJ9%@(_&|2NTFKqd4M#N)~+IwY4Fj~vG<{adB= zO@ReOjQsUM*%}8AnzZku3EyG-4M)g6kXc?28`T?J8Xgt;r%GTLsMNLcg8U1UyMpG` z7xYsMOR%RxZnUk2J0NdEB)@_P{kVkcGQ%UWto6L*&5hvT!Xc?^E;=?46O<`~mf*3u zT#Ob4ub4>*xiOq5EK@n2D75UkggcdF47>D~c9cdmLw=c??ZpTc#75K3NS&&Oz1w?k zk==<6FoW|Y;|SbI%7a&nq(BR^LtwT7KpSu%&X^7C=)QDRUKR&mT(N6;2Rh(@-yF2M2WMk}}bWASUY&P!q4xK;a5JpC611>)-@^7R9Djl_%${J)1Vi?Eck6~cyiNtQ~c^=%A z4xmQ|9ZXmQ!`QB7&J+|ecz8rx1fIa{y=pOx-k5_iTgGaF5Q|p2!6T#*<<`!1ge`O? zhihG@7CfX8LY~b~LE&;LUUu=OwMDJOJ7FR_-mz`i9AM?R*Z?K1KB|zzV5sn~QpI-@ zqRTB%mn}r%XYvC~28H2uB}BAMFd*{wGEscYNd#a;*$%xS9qxm028lH z!|)u%<`$cR_QKW$%Zd2p6!C;BhRro23T}&oOEbxg*p515(6gv z{xR2Nc~R=(CKo`Y_nqf`;k(j$;cpIqV#YPLpyW#42@#tTw@+?vIXq=!{EAi0l(e1s zwp@2j`$1wQAVmoZ*fXsr1%PUw<-K6$Fo^Ck=W4K+t1&V}J)Sng56q{3)t}&O%xO+u&*@b4kM04PoJel1~uAxJ6 z^8T@y`(Xcvm2&oGSpTKy-Pzg8cyuDWCTS-(z|KdagkF{)U-C|NtvD{%E`~luE)*&? zLta!{-Y3|!JY{pDvTP%x!wn@wEWUMSuDK0;VrQa0)G*-Y8l>V>fR4zC zKjH_K%qAF|AB#@82xYQLOOJ$-wf?WS%-F}52xBS*snKuw+INC`BaBhm)XVRpV0mvX zi2kDU=dbep#8a*(sMTmi1@Yp3!#er!twUWAR+6^82S1cM>g%!d#XEY^iLLb>n|v9g zc42&j5^M$(h96-r+;iO_a@xVJYr7dyNZ|KX7}aFj-3kFaECpzr$gXb?q8X_2?OhQ$U;8>(){Bf@)?gTq6`xj)Wf9Q7GP%s`e<*iW$5MiM>}GDfbQZ)z zb?rRve9g!V2Q4>h?yAQzldZTiL9X5Y&V3&-a-_vQn*O3VGITeF-yzUnAKUe(ovW`dM}2k&-48+!+j)I`!?jo9v!3+>Da zp&@|HIyD~%%T|oK{kUeG3PqDPNvK(1RtB3X{mu{*LXDC5sM`LFo;k4ZcliZC&4jqd z4AnokPtg126b{ZHxX#d61}- zF|=4d9FG{F253`8yDl zQ-;mM%<%rvuEOxy_%lvdcs=>4_(?N0{1J{nnqD8a&7_Jl(qT?NtWZN&_@yxbx`G{Z zn1V`tfeq#GQWl^D*F)UwQq+^g`RJq&cWX(Wg;1CJM9E!p=TA7iJO>zipId4I+|5YY znEhBETCHAu8A-%l&f4NonYIja+Xhg8a0w^ZOkfefNWhA<+46Y9^UQ0xPrY^nj_W!!hx`=?y)$R zYsRw;vDU`co2w~3@bY>e7$6$xtg8tqO1L`*Tpv$NlukD=^H{d(Jn7Yo#WKu-2qw81 ztw|ikmnmw{mNj*w5y;5#IF6nvuhUc9UbCrs7f`3a*TaT?19-F885FGNqj7~uN3Z#Q zWtPYCI72@SfOu`Sn9AehIm&ER73#xu;|*Y+Ro6NY+K2X|GlJf2((c}Hfckaf-XQ;v zs)a%=gtO>>5r~z>=;UeLbHFa*gwWw`;0MpBU5uuEgshYs0(G*(xHhMZ`Q}}D37|An zXJr{_e9NQW)(Q`n()yj6y=ppZ)Uk`v<=wI{aXiqT$}R^>U?`JyWd>IMOa+%0Y*L5k z?RvRDg-7P8q&?e^MrexkQ&_l-EdfoC)a`?k7CjtrA`Mp|D4K(t-ZlQcXyX7W4P~+W z2n%F#G&74?yzL_?{!#SZ-5mBXS)xl8xsA~)%!TGUEQPSEpYnoz0v+TXG5yodpc=lu zXj(5UPHjwL4o+gvyD%!-Ti5u79@PdIEs7e6?EtI)Vt6rX zKPxXjn&pXuUd^%*iP#PE+YoCc=G$A^+eyo5 z%H>uy2z(;5fFcFK{vHkm=5Wgy1=u-7)YhcM&HLY-fAclS|wCyd9TC9jrE zj5=7NU^_tDptL`fLx0BR0y&7OLi?uLtfOa20+f#W@2pc6#R|fGuj3$vo7HhnqNrga zBb#TYdNDjcGg@Mw!X;WZh3ta{ggEM9MY^9qT)Xva7W^ud8WI)>F`m6V21mMzto6WW z7*WG^?{AjHu+c8K@KOnq?}fhS3XhW>Qe zI;2YyOnB~Lh<00&rsO~MWbS7jx}5BSu$WR>iF(}l*tb&eu@d8VwWWtEY=!r3mso4S z2?P>!SZ2sXsE^_8vUUfONT(Uz_IeFfK3=s15zlwS97T89`#c>8Sk(!YgZ}SGQwSlU zTN``5 zP;Iba`T&d^2gO4kDG4eEdn7^@gH?~bwIAOBb+5Fzad|Z&lxgaeOyY%|cz}Z6a}D26 z>&e+&(L>eD3`w?{<$)J@%}%h%*n$F0(dTo+rD^k%g#%UJpvR={yp&*Y0IT(#(-zBi z*Ix{uHyDkVDu-aO@iY)esoK1}o}{$?gu;^l83rf>U76jQ8$4v}z&9|<4tM35m>z^M ztCbD$g(K?6!)hd_3(R&Tk(afes0oMmoSus(>|44E>0PfsscpY!aLBqKE`I)aq zVVd&WfN^Bg;a0e8Ry^!s=8mXa21#wv@Xz3Np5C`%avcn^P4_`26Vy3+=kjWVNJAt? z{H=_+5kMy$>)d+RY|_HN-O|Z*#mSP9S+@ZVXu<&UKIZ&{4lSH*o5u?eJKP|Bze8 z+D4!DPGW9q`^t76-;&YUHPs3dvxR9`(7fSrNplpi;eo@4o+Mn9ouZIu^hO1Fu7P1e z1P&I41ZYh|sLmL3P5&Oh32*$0O^b&EH&73glFi9f#nPE1#VXXEgRRr!j@ zSN{#`2DH(cnngX77PXCW5*eXP<2BaPBWHr_{5>udqtb3$fPO1eLnQIGTxv{4cGf|( zKvHyc`vFt{cxJb+)CkkU4hxY^7e-D)_>&;Zf~N~9^eD9RD8+ciA>G{mrETBCoL%ZC z>SN7)=*2g^7P!< zt-T}VRq>ibjB!LTnx(8_P`M((_TjDp<8{!8!V&&UjZ0<*%B#+Ez0*oj?GocPi%U)A zF#fG#2e+%X?Q}fdwY-HyKCo4p<&91yEYRrA@x-C}DZCG9__?PGld_J7`|Mvl(~Vmf zW^-*Zp%lf6_NU^%l;d#Mfjy*QOrdZYpWUqJp0(=v7k<>YKRJE&ORLRij5P^CeB+!S zQ#s(c2?_P4-ur6OImC|HF{v?T;Zx^EzVka}`4=Ow(yy3Qj>-jt7PiMQh!nuc?afn znP_WJb0mGHPUZ!mPq{y&g!4sR>1oA#8%lh1z;gzRdlJ?`{E-l1fb9`EXJv?s0@U{U z|Bo2QgM)P92bWnMy}j4hjf2GnlVgxaVYoxJXm%cRs6L-6| z-U7$o(mu$DU7u*Dz7w=tx+PLJW9>;M&VT>dWdl9ObQ*+kWG8zIb!>98=#Wh-HYGvC z6uFz_@bf-%q?0hVGWc}`6E9eRct+Ek#ev0AiUSfHFvwKHn|s1*Xpxk~a!E;&sC-IA zX@s5AmKZ@PtSATco?taFX>$6;YMzGrjrd1yiveU7*CHz9#W{6XTpK%J8KYIYNmq^K zizEXwzZG@01BggCh0Y5Ma$m>RFPTgN?~Es@1Bc7%Ly)v~U*eB{x?#D^PR`8t{}XjT zm7ZhURn;UPeOEdN%h0_3zo*3;nS{kXa^{ES=45lo;Y>F~=u$5+_Vz(js0o5hTPAdY zkp4H`=Zbd%ceqY?SSedW4@$%tjIp$=PLun?-=w0s=CO9mZ1cIq9{dk;_stzgp!_x% z3bf*~OQn_2n>6|jVFft?|ERiXRs~~ZDao;88-0^S`d#|C17v<~1MgOYY*f1XOYu?= zxm7!*1QytPu)XrC31yHjmq;u3Z$F zazs;@H61fro-1^PqV#Z(5LI8iyL}#`wC;k>vEUooU4)kFP9hDEs)-NB;dwd0p|Yj1 zG8pgkJun?*=@on=t%^V1J3lOtbdk!q5Q*}KNEwtDw|o2i(l6uEVD{*gyr7HRkPnZ6 zp0I`$^k5-W%m_JHAF$S;Q(7FDKI&FLEGz1XBG7l>M^#H~vw8LadQXy8RvmXDtJZMA zm&;hD-_ub9?$5~Np|=%<*>`mOj$WRO2Yur)oe-7lJie!>EVKW}Di^bFldLW|`0?oV zneN7Ozv>|Xj5TV?eMLoi zWl~4X)g>R?2lRq>rNSD&-_*$03uey(5}wpy9CmFBYsgs-rJ8J4MyD%xWez+%?tUX9 zNX+sCKuONK3c|!tHM9(jynej4m%vccd;O~w^f4KRx)hI46l4Y9OGdhilqRZzkNNjR zDn$hYrIrJTSv&0=EuK$H_W6ZW9%I=KY+TIdT6=sbLB=$ft&V<&y9PqAMi{Rqj^s4P zgTdS0ek&DjO+iTc;J3ltr4ALVAfkB#5m{vTL=ddh;y4a@6OM z7IM=nNMRCvGhtKr2MtykmQyxyPB2nDW_4YS|Ieg>n~M2h_pz0tLliyzS^5=y%)8Vha-q@aTz6&-cpXovs?3_)h6R&yM|R>TLaw>zjJEAN zd2J+IkX=nS^_Lu~%h4PZgBPjlc6S=xxlR>tjh=9i?>ZElVfJqrMeAh!_J2cU4EV(V zAvubS(r#kI8X;jl1Rd1~ult8xI32%Zt?u6a;!7TW7sjSBIbj!cz8FwcW3c3&a(4EX zX{1Ov9;-)#`dwLjD4Srt6k-;!0gk3h`BiuhFjF*YB0kY`xraU?zC8m5dU+^^^Q;;N z3AkWt1AdZWsVTLEm0cs3s?K@AavP(M6BjMu5)>OupZdZ2r3uHh*ksdp`3X70Y zf>HbTUY9e zwT(8bs|?MV(KUU0i%Q`+C4GmQ94dOG^Yvps_=%uT>FMCc!m@baA}%R;>lhhg>`%ub zpUs*Psnb@OT<^Kty4bqH!1Joc>-SPI>6LwXN3ajagS9@)Aq?a`J^3Ww_=b{%k;2!Y zy9Hn{q2u>8Y2g>MkZ<9q2v4HB)nhX$`Pk3RC|{#fDIaVBFIsX(IUX}RM4SO-?)Z5r zv)>i2!J0VV&j|+382FTfH3$Hg%Svl~}m1_)M zsBP?`Ip9g)Lf&oH{LUHTy=ZyoXNdCOAlS{|S3;Zc+GJRBJ@yS&4<@%%baV~x;eb+# zigv4eE8Co`yEG;U&y1!wQGc&Q0(0vdYG6RrV+zX{LIZGOT~L>#91o~Sa&p504WBns z+n8=}|KO=Gk@$q(k<8a5|XQ8bO|t31N4G zy>W(mA0NS~DHi-qMj`(RK=1#BMe0E1=^s%DwsUR)7DJYoK8M6=g;;p>q*!{%uL}WX zal&0xmFr-*AV>#Fh#msGhdmxx3pI*X9x(Gj5W)v`%neHE0+8#<`E!&1T~TNy)qd6e|9RMjN$77Ust zx-l(OZc||;wk!%hVit&5OyE7QFWrlgK-RFzDNW3PXg&+dxcK9RFC~fbI`3<3lgBYd z=0&k%dhUg#Na!)o)Op4a)>BRC#Kij z#2#UF6F)ZM7{*EtOwZwtV6SntTo?|Q#=XHnpYQw}Z|dP;_%?CpIDic#i9DA{GYHZ&C2z=>OU zIRNZTN-Z){ONl!aV)=U_hQ~K)jt^_cXOEkWc;7SNOH-QH{t_(-uZqI_-^Insr@K}f z)iw%nw}dHdc`rBv$PFL1H}UZqa_~)a8gT%m{L_%sXt}jX;uW$ELuLPNG7`{@kyhB% zuv=H{Ww1q?1bcrkg$}PM2wNvSCMehpQ{I7REJs`)LUbh44eK>yyeE~fa*Le8Ay34|PS>8iU)>49=fkcX@*oZ75P(hp_lvulHk!(+6hzSbDV8$IOtiAP z+ZK)x92#w)=4uLcssIyEFKVSP$$&EHF*}k6R%{!+hA%UK|w zk*+6mY&+Guk++-3S4>xDL#nJP&)2GwL^aiqrQS(RO@hHDm;KeF_XoVoDg2j;*m9m>Ba*YUZzp435ELZ2o+_pM?Wjpgr6hC*|Ulx$d^N>}QFp92`Rt=ObPm!;%uGNU`qP$G0i*X=!C;RQy zlds)v-pghi9+TA`vi_6tN>Q&hLgiGrw#*4CedSdftNDDpb6@#`w1AuDmQf3Xa=X(5 z$>lNXvs@F0TT3V&rHqhRZf7~QzH71Z7ikqW9!+6d{jrA-LMsST-OX!8Sv7BK!|h|z zy#&{@lP}z!pLMIJ`-Syu_Y->_{D;okzb1 z2)fG=J^1o&IDC2c%?@OieY#hG%?i}!_ZR+jqU_{hYg@8+pJ82}-WrId5~TAXopDP0 zQli5~R%$WkJMw-VCv}5oTO+eUf1AwJ2cc^V0aC}jfPof@EKwE~hr+WsA~YSB!7j!NjuT9>(l-ZK17{+B$(S7`KpbsE4rwkzzU64EAiI*orvtbNowa|A6Vt$2O<_H-Ug03|d|K%IN%!d4K zaPg0T47m-m0P`ha`5{C)1$sAdz$8K*@;dNA{O6F17zEfbCW8QYY1Tu?R|8jt0#48s znk}^1&KB8d= max_num: + break + text = abbrev(lb) + text = ': '.join([text, str(score[k])]) + location = (0 + st[0], 18 + k * 18 + st[1]) + textsize = cv2.getTextSize(text, FONTFACE, FONTSCALE, + THICKNESS)[0] + textwidth = textsize[0] + diag0 = (location[0] + textwidth, location[1] - 14) + diag1 = (location[0], location[1] + 2) + cv2.rectangle(frame, diag0, diag1, plate[k + 1], -1) + cv2.putText(frame, text, location, FONTFACE, FONTSCALE, + FONTCOLOR, THICKNESS, LINETYPE) + + return frames_out + + +def frame_extraction(video_path): + """Extract frames given video_path. + + Args: + video_path (str): The video_path. + """ + # Load the video, extract frames into ./tmp/video_name + target_dir = osp.join('./tmp', osp.basename(osp.splitext(video_path)[0])) + os.makedirs(target_dir, exist_ok=True) + # Should be able to handle videos up to several hours + frame_tmpl = osp.join(target_dir, 'img_{:06d}.jpg') + vid = cv2.VideoCapture(video_path) + frames = [] + frame_paths = [] + flag, frame = vid.read() + cnt = 0 + while flag: + frames.append(frame) + frame_path = frame_tmpl.format(cnt + 1) + frame_paths.append(frame_path) + cv2.imwrite(frame_path, frame) + cnt += 1 + flag, frame = vid.read() + return frame_paths, frames + + +def load_label_map(file_path): + """Load Label Map. + + Args: + file_path (str): The file path of label map. + Returns: + dict: The label map (int -> label name). + """ + lines = open(file_path).readlines() + lines = [x.strip().split(': ') for x in lines] + return {int(x[0]): x[1] for x in lines} + + +def abbrev(name): + """Get the abbreviation of label name: + + 'take (an object) from (a person)' -> 'take ... from ...' + """ + while name.find('(') != -1: + st, ed = name.find('('), name.find(')') + name = name[:st] + '...' + name[ed + 1:] + return name + + +def pack_result(human_detection, result, img_h, img_w): + """Short summary. + + Args: + human_detection (np.ndarray): Human detection result. + result (type): The predicted label of each human proposal. + img_h (int): The image height. + img_w (int): The image width. + Returns: + tuple: Tuple of human proposal, label name and label score. + """ + human_detection[:, 0::2] /= img_w + human_detection[:, 1::2] /= img_h + results = [] + if result is None: + return None + for prop, res in zip(human_detection, result): + res.sort(key=lambda x: -x[1]) + results.append( + (prop.data.cpu().numpy(), [x[0] for x in res], [x[1] + for x in res])) + return results + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMAction2 demo') + parser.add_argument('video', help='video file/url') + parser.add_argument( + 'out_filename', help='output filename', default='demo/stdet_demo.mp4') + parser.add_argument( + '--config', + default=('configs/detection/ava/slowonly_kinetics400-pretrained-' + 'r101_8xb16-8x8x1-20e_ava21-rgb.py'), + help='spatialtemporal detection model config file path') + parser.add_argument( + '--checkpoint', + default=('https://download.openmmlab.com/mmaction/detection/ava/' + 'slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb/' + 'slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb_' + '20201217-16378594.pth'), + help='spatialtemporal detection model checkpoint file/url') + parser.add_argument( + '--det-config', + default='demo/skeleton_demo_cfg/faster-rcnn_r50_fpn_2x_coco_infer.py', + help='human detection config file path (from mmdet)') + parser.add_argument( + '--det-checkpoint', + default=('http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/' + 'faster_rcnn_r50_fpn_2x_coco/' + 'faster_rcnn_r50_fpn_2x_coco_' + 'bbox_mAP-0.384_20200504_210434-a5d8aa15.pth'), + help='human detection checkpoint file/url') + parser.add_argument( + '--det-score-thr', + type=float, + default=0.9, + help='the threshold of human detection score') + parser.add_argument( + '--det-cat-id', + type=int, + default=0, + help='the category id for human detection') + parser.add_argument( + '--action-score-thr', + type=float, + default=0.5, + help='the threshold of human action score') + parser.add_argument( + '--label-map', + default='tools/data/ava/label_map.txt', + help='label map file') + parser.add_argument( + '--device', type=str, default='cuda:0', help='CPU/CUDA device option') + parser.add_argument( + '--short-side', + type=int, + default=256, + help='specify the short-side length of the image') + parser.add_argument( + '--predict-stepsize', + default=8, + type=int, + help='give out a prediction per n frames') + parser.add_argument( + '--output-stepsize', + default=4, + type=int, + help=('show one frame per n frames in the demo, we should have: ' + 'predict_stepsize % output_stepsize == 0')) + parser.add_argument( + '--output-fps', + default=6, + type=int, + help='the fps of demo video output') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + default={}, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. For example, ' + "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'") + args = parser.parse_args() + return args + + +def main(): + args = parse_args() + register_all_modules() + + frame_paths, original_frames = frame_extraction(args.video) + num_frame = len(frame_paths) + h, w, _ = original_frames[0].shape + + # resize frames to shortside + new_w, new_h = mmcv.rescale_size((w, h), (args.short_side, np.Inf)) + frames = [mmcv.imresize(img, (new_w, new_h)) for img in original_frames] + w_ratio, h_ratio = new_w / w, new_h / h + + # Get clip_len, frame_interval and calculate center index of each clip + config = mmengine.Config.fromfile(args.config) + config.merge_from_dict(args.cfg_options) + val_pipeline = config.val_pipeline + + sampler = [x for x in val_pipeline if x['type'] == 'SampleAVAFrames'][0] + clip_len, frame_interval = sampler['clip_len'], sampler['frame_interval'] + window_size = clip_len * frame_interval + assert clip_len % 2 == 0, 'We would like to have an even clip_len' + # Note that it's 1 based here + timestamps = np.arange(window_size // 2, num_frame + 1 - window_size // 2, + args.predict_stepsize) + + # Load label_map + label_map = load_label_map(args.label_map) + try: + if config['data']['train']['custom_classes'] is not None: + label_map = { + id + 1: label_map[cls] + for id, cls in enumerate(config['data']['train'] + ['custom_classes']) + } + except KeyError: + pass + + # Get Human detection results + center_frames = [frame_paths[ind - 1] for ind in timestamps] + + human_detections, _ = detection_inference(args.det_config, + args.det_checkpoint, + center_frames, + args.det_score_thr, + args.det_cat_id, args.device) + torch.cuda.empty_cache() + for i in range(len(human_detections)): + det = human_detections[i] + det[:, 0:4:2] *= w_ratio + det[:, 1:4:2] *= h_ratio + human_detections[i] = torch.from_numpy(det[:, :4]).to(args.device) + + # Build STDET model + try: + # In our spatiotemporal detection demo, different actions should have + # the same number of bboxes. + config['model']['test_cfg']['rcnn'] = dict(action_thr=0) + except KeyError: + pass + + config.model.backbone.pretrained = None + model = MODELS.build(config.model) + + load_checkpoint(model, args.checkpoint, map_location='cpu') + model.to(args.device) + model.eval() + + predictions = [] + + img_norm_cfg = dict( + mean=np.array(config.model.data_preprocessor.mean), + std=np.array(config.model.data_preprocessor.std), + to_rgb=False) + + print('Performing SpatioTemporal Action Detection for each clip') + assert len(timestamps) == len(human_detections) + prog_bar = mmengine.ProgressBar(len(timestamps)) + for timestamp, proposal in zip(timestamps, human_detections): + if proposal.shape[0] == 0: + predictions.append(None) + continue + + start_frame = timestamp - (clip_len // 2 - 1) * frame_interval + frame_inds = start_frame + np.arange(0, window_size, frame_interval) + frame_inds = list(frame_inds - 1) + imgs = [frames[ind].astype(np.float32) for ind in frame_inds] + _ = [mmcv.imnormalize_(img, **img_norm_cfg) for img in imgs] + # THWC -> CTHW -> 1CTHW + input_array = np.stack(imgs).transpose((3, 0, 1, 2))[np.newaxis] + input_tensor = torch.from_numpy(input_array).to(args.device) + + datasample = ActionDataSample() + datasample.proposals = InstanceData(bboxes=proposal) + datasample.set_metainfo(dict(img_shape=(new_h, new_w))) + with torch.no_grad(): + result = model(input_tensor, [datasample], mode='predict') + scores = result[0].pred_instances.scores + prediction = [] + # N proposals + for i in range(proposal.shape[0]): + prediction.append([]) + # Perform action score thr + for i in range(scores.shape[1]): + if i not in label_map: + continue + for j in range(proposal.shape[0]): + if scores[j, i] > args.action_score_thr: + prediction[j].append((label_map[i], scores[j, + i].item())) + predictions.append(prediction) + prog_bar.update() + + results = [] + for human_detection, prediction in zip(human_detections, predictions): + results.append(pack_result(human_detection, prediction, new_h, new_w)) + + def dense_timestamps(timestamps, n): + """Make it nx frames.""" + old_frame_interval = (timestamps[1] - timestamps[0]) + start = timestamps[0] - old_frame_interval / n * (n - 1) / 2 + new_frame_inds = np.arange( + len(timestamps) * n) * old_frame_interval / n + start + return new_frame_inds.astype(np.int) + + dense_n = int(args.predict_stepsize / args.output_stepsize) + frames = [ + cv2.imread(frame_paths[i - 1]) + for i in dense_timestamps(timestamps, dense_n) + ] + print('Performing visualization') + vis_frames = visualize(frames, results) + vid = mpy.ImageSequenceClip([x[:, :, ::-1] for x in vis_frames], + fps=args.output_fps) + vid.write_videofile(args.out_filename) + + tmp_frame_dir = osp.dirname(frame_paths[0]) + shutil.rmtree(tmp_frame_dir) + + +if __name__ == '__main__': + main() diff --git a/docs/en/user_guides/3_inference.md b/docs/en/user_guides/3_inference.md index 2989fe9994..f492892253 100644 --- a/docs/en/user_guides/3_inference.md +++ b/docs/en/user_guides/3_inference.md @@ -117,3 +117,62 @@ python demo/demo_skeleton.py demo/demo_skeleton.mp4 demo/demo_skeleton_out.mp4 \ --pose-checkpoint https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth \ --label-map tools/data/skeleton/label_map_ntu60.txt ``` + +## SpatioTemporal Action Detection Video Demo + +We provide a demo script to predict the SpatioTemporal Action Detection result using a single video. + +```shell +python demo/demo_spatiotemporal_det.py --video ${VIDEO_FILE} \ + [--out-filename ${OUTPUT_FILENAME}] \ + [--config ${SPATIOTEMPORAL_ACTION_DETECTION_CONFIG_FILE}] \ + [--checkpoint ${SPATIOTEMPORAL_ACTION_DETECTION_CHECKPOINT}] \ + [--det-config ${HUMAN_DETECTION_CONFIG_FILE}] \ + [--det-checkpoint ${HUMAN_DETECTION_CHECKPOINT}] \ + [--det-score-thr ${HUMAN_DETECTION_SCORE_THRESHOLD}] \ + [--det-cat-id ${HUMAN_DETECTION_CATEGORY_ID}] \ + [--action-score-thr ${ACTION_DETECTION_SCORE_THRESHOLD}] \ + [--label-map ${LABEL_MAP}] \ + [--device ${DEVICE}] \ + [--short-side] ${SHORT_SIDE} \ + [--predict-stepsize ${PREDICT_STEPSIZE}] \ + [--output-stepsize ${OUTPUT_STEPSIZE}] \ + [--output-fps ${OUTPUT_FPS}] +``` + +Optional arguments: + +- `OUTPUT_FILENAME`: Path to the output file which is a video format. Defaults to `demo/stdet_demo.mp4`. +- `SPATIOTEMPORAL_ACTION_DETECTION_CONFIG_FILE`: The spatiotemporal action detection config file path. +- `SPATIOTEMPORAL_ACTION_DETECTION_CHECKPOINT`: The spatiotemporal action detection checkpoint URL. +- `HUMAN_DETECTION_CONFIG_FILE`: The human detection config file path. +- `HUMAN_DETECTION_CHECKPOINT`: The human detection checkpoint URL. +- `HUMAN_DETECTION_SCORE_THRESHOLD`: The score threshold for human detection. Defaults to 0.9. +- `HUMAN_DETECTION_CATEGORY_ID`: The category id for human detection. Defaults to 0. +- `ACTION_DETECTION_SCORE_THRESHOLD`: The score threshold for action detection. Defaults to 0.5. +- `LABEL_MAP`: The label map used. Defaults to `tools/data/ava/label_map.txt`. +- `DEVICE`: Type of device to run the demo. Allowed values are cuda device like `cuda:0` or `cpu`. Defaults to `cuda:0`. +- `SHORT_SIDE`: The short side used for frame extraction. Defaults to 256. +- `PREDICT_STEPSIZE`: Make a prediction per N frames. Defaults to 8. +- `OUTPUT_STEPSIZE`: Output 1 frame per N frames in the input video. Note that `PREDICT_STEPSIZE % OUTPUT_STEPSIZE == 0`. Defaults to 4. +- `OUTPUT_FPS`: The FPS of demo video output. Defaults to 6. + +Examples: + +Assume that you are located at `$MMACTION2` . + +1. Use the Faster RCNN as the human detector, SlowOnly-8x8-R101 as the action detector. Making predictions per 8 frames, and output 1 frame per 4 frames to the output video. The FPS of the output video is 4. + +```shell +python demo/demo_spatiotemporal_det.py demo/demo.mp4 demo/demo_spatiotemporal_det.mp4 \ + --config configs/detection/ava/slowonly_kinetics400-pretrained-r101_8xb16-8x8x1-20e_ava21-rgb.py \ + --checkpoint https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb_20201217-16378594.pth \ + --det-config demo/skeleton_demo_cfg/faster-rcnn_r50_fpn_2x_coco_infer.py \ + --det-checkpoint http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth \ + --det-score-thr 0.9 \ + --action-score-thr 0.5 \ + --label-map tools/data/ava/label_map.txt \ + --predict-stepsize 8 \ + --output-stepsize 4 \ + --output-fps 6 +``` diff --git a/mmaction/apis/inference.py b/mmaction/apis/inference.py index c67b9a574d..64038e2c9a 100644 --- a/mmaction/apis/inference.py +++ b/mmaction/apis/inference.py @@ -127,7 +127,8 @@ def detection_inference(det_config: Union[str, Path, mmengine.Config], '`init_detector` from `mmdet.apis`. These apis are ' 'required in this inference api! ') - model = init_detector(det_config, det_checkpoint, device) + model = init_detector( + config=det_config, checkpoint=det_checkpoint, device=device) results = [] data_samples = [] From 0341f70a3f34f6d2eea38a63ddf4ba12e2d0eeae Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Fri, 2 Dec 2022 03:24:54 -0500 Subject: [PATCH 29/57] Sthv2 checkpoint for TSN/TSM (#2082) --- configs/recognition/tsm/README.md | 26 ++-- configs/recognition/tsm/metafile.yml | 69 +++++++++++ ...etrained-r101_8xb16-1x1x8-50e_sthv2-rgb.py | 6 + ...etrained-r50_8xb16-1x1x16-50e_sthv2-rgb.py | 56 +++++++++ ...retrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py | 117 ++++++++++++++++++ configs/recognition/tsn/README.md | 31 +++-- configs/recognition/tsn/metafile.yml | 42 +++++++ ...etrained-r50_8xb32-1x1x16-50e_sthv2-rgb.py | 53 ++++++++ ...retrained-r50_8xb32-1x1x8-50e_sthv2-rgb.py | 99 +++++++++++++++ 9 files changed, 478 insertions(+), 21 deletions(-) create mode 100644 configs/recognition/tsm/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb.py create mode 100644 configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb.py create mode 100644 configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py create mode 100644 configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb.py create mode 100644 configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.py diff --git a/configs/recognition/tsm/README.md b/configs/recognition/tsm/README.md index 99a79edcd3..5c322012e5 100644 --- a/configs/recognition/tsm/README.md +++ b/configs/recognition/tsm/README.md @@ -20,15 +20,23 @@ The explosive growth in video streaming gives rise to challenges on performing v ### Kinetics-400 -| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | inference time(video/s) | gpu_mem(M) | config | ckpt | log | -| :---------------------: | :------------: | :--: | :---------------------: | :------: | :------: | :------: | :----------------: | :---------------------: | :--------: | :--------------------: | :------------------: | :-----------------: | -| 1x1x8 | short-side 320 | 8 | ResNet50 | ImageNet | 72.95 | 90.45 | 8 clips x 10 crop | x | 13723 | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb_20220831-64d69186.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb.log) | -| 1x1x8 | short-side 320 | 8 | ResNet50 | ImageNet | 73.11 | 90.06 | 8 clips x 10 crop | x | 13723 | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb_20220831-a6db1e5d.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb.log) | -| 1x1x16 | short-side 320 | 8 | ResNet50 | ImageNet | 74.64 | 91.42 | 16 clips x 10 crop | x | 27044 | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb_20220831-042b1748.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb.log) | -| 1x1x8 (dense) | short-side 320 | 8 | ResNet50 | ImageNet | 73.39 | 90.78 | 8 clips x 10 crop | x | 13723 | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb_20220831-f55d3c2b.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb.log) | -| 1x1x8 | short-side 320 | 8 | ResNet50 (NonLocalDotProduct) | ImageNet | 74.17 | 90.95 | 8 clips x 10 crop | x | 18413 | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb_20220831-108bfde5.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb.log) | -| 1x1x8 | short-side 320 | 8 | ResNet50 (NonLocalGauss) | ImageNet | 73.37 | 90.82 | 8 clips x 10 crop | x | 19925 | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb_20220831-7e54dacf.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.log) | -| 1x1x8 | short-side 320 | 8 | ResNet50 (NonLocalEmbedGauss) | ImageNet | 74.45 | 91.11 | 8 clips x 10 crop | x | 19726 | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb_20220831-35eddb57.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.log) | +| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :------------: | :--: | :---------------------------: | :------: | :------: | :------: | :----------------: | :----: | :----: | :-------------------------: | ------------------------: | -----------------------: | +| 1x1x8 | short-side 320 | 8 | ResNet50 | ImageNet | 72.95 | 90.45 | 8 clips x 10 crop | 32.88G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb_20220831-64d69186.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb.log) | +| 1x1x8 | short-side 320 | 8 | ResNet50 | ImageNet | 73.11 | 90.06 | 8 clips x 10 crop | 32.88G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb_20220831-a6db1e5d.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb.log) | +| 1x1x16 | short-side 320 | 8 | ResNet50 | ImageNet | 74.64 | 91.42 | 16 clips x 10 crop | 65.75G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb_20220831-042b1748.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb.log) | +| 1x1x8 (dense) | short-side 320 | 8 | ResNet50 | ImageNet | 73.39 | 90.78 | 8 clips x 10 crop | 32.88G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb_20220831-f55d3c2b.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb.log) | +| 1x1x8 | short-side 320 | 8 | ResNet50 (NonLocalDotProduct) | ImageNet | 74.17 | 90.95 | 8 clips x 10 crop | 61.30G | 31.68M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb_20220831-108bfde5.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb.log) | +| 1x1x8 | short-side 320 | 8 | ResNet50 (NonLocalGauss) | ImageNet | 73.37 | 90.82 | 8 clips x 10 crop | 59.06G | 28.00M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb_20220831-7e54dacf.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.log) | +| 1x1x8 | short-side 320 | 8 | ResNet50 (NonLocalEmbedGauss) | ImageNet | 74.45 | 91.11 | 8 clips x 10 crop | 61.30G | 31.68M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb_20220831-35eddb57.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.log) | + +### Something-something V2 + +| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :--------: | :--: | :-------: | :------: | :------: | :------: | :----------------: | :----: | :----: | :---------------------------------: | :-------------------------------: | :------------------------------: | +| 1x1x8 | height 256 | 8 | ResNet50 | ImageNet | 60.20 | 86.13 | 8 clips x 10 crop | 32.88G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb_20221122-cb2cc64e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.log) | +| 1x1x16 | height 256 | 8 | ResNet50 | ImageNet | 62.46 | 87.75 | 16 clips x 10 crop | 65.75G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb_20221122-b1fb8264.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb.log) | +| 1x1x8 | height 256 | 8 | ResNet101 | ImageNet | 60.49 | 85.99 | 8 clips x 10 crop | 62.66G | 42.86M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb_20221122-cb2cc64e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb.log) | Note: diff --git a/configs/recognition/tsm/metafile.yml b/configs/recognition/tsm/metafile.yml index 910a912bec..f5931d0ef0 100644 --- a/configs/recognition/tsm/metafile.yml +++ b/configs/recognition/tsm/metafile.yml @@ -166,3 +166,72 @@ Models: Top 5 Accuracy: 90.82 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb_20220831-7e54dacf.pth + + - Name: tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb + Config: configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py + In Collection: TSM + Metadata: + Architecture: ResNet50 + Batch Size: 16 + Epochs: 100 + FLOPs: 32.88G + Parameters: 23.87M + Pretrained: ImageNet + Resolution: height 256 + Training Data: Kinetics-400 + Training Resources: 8 GPUs + Modality: RGB + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 60.20 + Top 5 Accuracy: 86.13 + Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.log + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb_20221122-cb2cc64e.pth + + - Name: tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb + Config: configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb.py + In Collection: TSM + Metadata: + Architecture: ResNet50 + Batch Size: 16 + Epochs: 100 + FLOPs: 65.75G + Parameters: 23.87M + Pretrained: ImageNet + Resolution: height 256 + Training Data: Kinetics-400 + Training Resources: 8 GPUs + Modality: RGB + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 62.46 + Top 5 Accuracy: 87.75 + Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb.log + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb_20221122-b1fb8264.pth + + - Name: tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb + Config: configs/recognition/tsm/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb.py + In Collection: TSM + Metadata: + Architecture: ResNet101 + Batch Size: 16 + Epochs: 100 + FLOPs: 62.66G + Parameters: 42.86M + Pretrained: ImageNet + Resolution: height 256 + Training Data: Kinetics-400 + Training Resources: 8 GPUs + Modality: RGB + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 60.49 + Top 5 Accuracy: 85.99 + Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb.log + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb_20221122-cb2cc64e.pth diff --git a/configs/recognition/tsm/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb.py b/configs/recognition/tsm/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb.py new file mode 100644 index 0000000000..9429730700 --- /dev/null +++ b/configs/recognition/tsm/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb.py @@ -0,0 +1,6 @@ +_base_ = ['tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py'] + +# model settings +r101_checkpoint = 'https://download.pytorch.org/models/resnet101-cd907fc2.pth' + +model = dict(backbone=dict(pretrained=r101_checkpoint, depth=101)) diff --git a/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb.py b/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb.py new file mode 100644 index 0000000000..5a946b1fef --- /dev/null +++ b/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb.py @@ -0,0 +1,56 @@ +_base_ = ['tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py'] + +model = dict(backbone=dict(num_segments=16), cls_head=dict(num_segments=16)) + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=16, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=16, + twice_sample=True, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +val_dataloader = dict(dataset=dict(pipeline=val_pipeline)) + +test_dataloader = dict(pipeline=test_pipeline, test_mode=True) diff --git a/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py b/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py new file mode 100644 index 0000000000..ae8c5f4dd3 --- /dev/null +++ b/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py @@ -0,0 +1,117 @@ +_base_ = ['../../_base_/models/tsm_r50.py', '../../_base_/default_runtime.py'] + +# model settings +model = dict(cls_head=dict(num_classes=174, dropout_ratio=0.5)) + +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/sthv2/videos' +ann_file_train = 'data/sthv2/sthv2_train_list_videos.txt' +ann_file_val = 'data/sthv2/sthv2_val_list_videos.txt' + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True, + twice_sample=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=16, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +default_hooks = dict(checkpoint=dict(interval=3, max_keep_ckpts=3)) + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=50, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict(type='LinearLR', start_factor=0.1, by_epoch=True, begin=0, end=5), + dict( + type='MultiStepLR', + begin=0, + end=50, + by_epoch=True, + milestones=[25, 45], + gamma=0.1) +] + +optim_wrapper = dict( + constructor='TSMOptimWrapperConstructor', + paramwise_cfg=dict(fc_lr5=True), + optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0005), + clip_grad=dict(max_norm=20, norm_type=2)) diff --git a/configs/recognition/tsn/README.md b/configs/recognition/tsn/README.md index 52098fd716..d7fdc81276 100644 --- a/configs/recognition/tsn/README.md +++ b/configs/recognition/tsn/README.md @@ -20,13 +20,20 @@ Deep convolutional networks have achieved great success for visual recognition i ### Kinetics-400 -| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | inference time(video/s) | gpu_mem(M) | config | ckpt | log | -| :---------------------: | :-------: | :------------: | :--: | :-------: | :------: | :------: | :------: | :----------------: | :---------------------: | :--------: | :---------------------: | :--------------------: | :-------------------: | -| 1x1x3 | MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 72.77 | 90.66 | 25 clips x 10 crop | x | 8321 | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20220906-cd10898e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.log) | -| 1x1x5 | MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 73.73 | 91.15 | 25 clips x 10 crop | x | 13616 | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb_20220906-65d68713.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb.log) | -| 1x1x8 | MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 74.21 | 91.36 | 25 clips x 10 crop | x | 21549 | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb_20220906-2692d16c.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.log) | -| dense-1x1x5 | MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 71.37 | 89.66 | 25 clips x 10 crop | x | 13616 | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb_20220906-dcbc6e01.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb.log) | -| 1x1x8 | MultiStep | short-side 320 | 8 | ResNet101 | ImageNet | 75.91 | 92.21 | 25 clips x 10 crop | x | 13616 | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb_20220906-c0d7d41e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb.log) | +| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | config | ckpt | log | +| :---------------------: | :-------: | :------------: | :--: | :-------: | :------: | :------: | :------: | :----------------: | :--------------------------------: | :-------------------------------: | :------------------------------: | +| 1x1x3 | MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 72.77 | 90.66 | 25 clips x 10 crop | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20220906-cd10898e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.log) | +| 1x1x5 | MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 73.73 | 91.15 | 25 clips x 10 crop | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb_20220906-65d68713.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb.log) | +| 1x1x8 | MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 74.21 | 91.36 | 25 clips x 10 crop | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb_20220906-2692d16c.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.log) | +| dense-1x1x5 | MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 71.37 | 89.66 | 25 clips x 10 crop | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb_20220906-dcbc6e01.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb.log) | +| 1x1x8 | MultiStep | short-side 320 | 8 | ResNet101 | ImageNet | 75.91 | 92.21 | 25 clips x 10 crop | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb_20220906-c0d7d41e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb.log) | + +### Something-Something V2 + +| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | config | ckpt | log | +| :---------------------: | :-------: | :--------: | :--: | :------: | :------: | :------: | :------: | :----------------: | :----------------------------------: | :--------------------------------: | :--------------------------------: | +| 1x1x8 | MultiStep | height 256 | 8 | ResNet50 | ImageNet | 32.55 | 63.27 | 25 clips x 10 crop | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb_20221122-ad2dbb37.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.log) | +| 1x1x16 | MultiStep | height 256 | 8 | ResNet50 | ImageNet | 35.22 | 66.13 | 25 clips x 10 crop | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb_20221122-ee13c8e2.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb.log) | ### Using backbones from 3rd-party in TSN @@ -36,11 +43,11 @@ It's possible and convenient to use a 3rd-party backbone for TSN under the frame - [x] Backbones from [TorchVision](https://github.com/pytorch/vision/) - [x] Backbones from [TIMM (pytorch-image-models)](https://github.com/rwightman/pytorch-image-models) -| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | inference time(video/s) | gpu_mem(M) | config | ckpt | log | -| :---------------------: | :-------: | :------------: | :--: | :--------------: | :------: | :------: | :------: | :----------------: | :---------------------: | :--------: | :-------------------: | :-----------------: | :-----------------: | -| 1x1x3 | MultiStep | short-side 320 | 8 | ResNext101 | ImageNet | 72.79 | 90.40 | 25 clips x 10 crop | x | 31832 | [config](/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb_20220906-23cff032.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb.log) | -| 1x1x3 | MultiStep | short-side 320 | 8 | DenseNet161 | ImageNet | 71.83 | 90.02 | 25 clips x 10 crop | x | 22701 | [config](/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb_20220906-5f4c0daf.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb.log) | -| 1x1x3 | MultiStep | short-side 320 | 8 | Swin Transformer | ImageNet | 76.90 | 92.55 | 25 clips x 10 crop | x | 24014 | [config](/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb_20220906-65ed814e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb.log) | +| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | config | ckpt | log | +| :---------------------: | :-------: | :------------: | :--: | :--------------: | :------: | :------: | :------: | :----------------: | :------------------------------: | :----------------------------: | :----------------------------: | +| 1x1x3 | MultiStep | short-side 320 | 8 | ResNext101 | ImageNet | 72.79 | 90.40 | 25 clips x 10 crop | [config](/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb_20220906-23cff032.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb.log) | +| 1x1x3 | MultiStep | short-side 320 | 8 | DenseNet161 | ImageNet | 71.83 | 90.02 | 25 clips x 10 crop | [config](/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb_20220906-5f4c0daf.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb.log) | +| 1x1x3 | MultiStep | short-side 320 | 8 | Swin Transformer | ImageNet | 76.90 | 92.55 | 25 clips x 10 crop | [config](/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb_20220906-65ed814e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb.log) | 1. Note that some backbones in TIMM are not supported due to multiple reasons. Please refer to to [PR #880](https://github.com/open-mmlab/mmaction2/pull/880) for details. diff --git a/configs/recognition/tsn/metafile.yml b/configs/recognition/tsn/metafile.yml index 0abe944863..5d43dce81a 100644 --- a/configs/recognition/tsn/metafile.yml +++ b/configs/recognition/tsn/metafile.yml @@ -190,3 +190,45 @@ Models: Top 5 Accuracy: 92.55 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb_20220906-65ed814e.pth + + - Name: tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb + Config: configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.py + In Collection: TSN + Metadata: + Architecture: ResNet50 + Batch Size: 32 + Epochs: 100 + Pretrained: ImageNet + Resolution: height 256 + Training Data: Kinetics-400 + Training Resources: 8 GPUs + Modality: RGB + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 32.55 + Top 5 Accuracy: 63.27 + Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.log + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb_20221122-ad2dbb37.pth + + - Name: tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb + Config: configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb.py + In Collection: TSN + Metadata: + Architecture: ResNet50 + Batch Size: 32 + Epochs: 100 + Pretrained: ImageNet + Resolution: height 256 + Training Data: Kinetics-400 + Training Resources: 8 GPUs + Modality: RGB + Results: + - Dataset: Kinetics-400 + Task: Action Recognition + Metrics: + Top 1 Accuracy: 35.22 + Top 5 Accuracy: 66.13 + Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb.log + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb_20221122-ee13c8e2.pth diff --git a/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb.py b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb.py new file mode 100644 index 0000000000..4fd06b2168 --- /dev/null +++ b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb.py @@ -0,0 +1,53 @@ +_base_ = ['tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.py'] + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1, + num_fixed_crops=13), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=16, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=16, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict(dataset=dict(pipeline=train_pipeline)) + +val_dataloader = dict(dataset=dict(pipeline=val_pipeline)) + +test_dataloader = dict(pipeline=test_pipeline, test_mode=True) diff --git a/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.py b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.py new file mode 100644 index 0000000000..14189db4b3 --- /dev/null +++ b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.py @@ -0,0 +1,99 @@ +_base_ = [ + '../../_base_/models/tsn_r50.py', '../../_base_/schedules/sgd_50e.py', + '../../_base_/default_runtime.py' +] + +# model settings +model = dict(cls_head=dict(num_classes=174, dropout_ratio=0.5)) + +# dataset settings +dataset_type = 'VideoDataset' +data_root = 'data/sthv2/videos' +ann_file_train = 'data/sthv2/sthv2_train_list_videos.txt' +ann_file_val = 'data/sthv2/sthv2_val_list_videos.txt' + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict( + type='MultiScaleCrop', + input_size=224, + scales=(1, 0.875, 0.75, 0.66), + random_crop=False, + max_wh_scale_gap=1), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=8, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=1, + frame_interval=1, + num_clips=25, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='TenCrop', crop_size=224), + dict(type='FormatShape', input_format='NCHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + data_prefix=dict(video=data_root), + pipeline=train_pipeline)) +val_dataloader = dict( + batch_size=32, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root), + pipeline=val_pipeline, + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + data_prefix=dict(video=data_root), + pipeline=test_pipeline, + test_mode=True)) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +default_hooks = dict(checkpoint=dict(interval=3, max_keep_ckpts=3)) + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=50, val_begin=1, val_interval=5) From 26d0f8ea8a7f16ed5f0485b6e05b826dc2f27a42 Mon Sep 17 00:00:00 2001 From: wxDai Date: Fri, 2 Dec 2022 16:36:25 +0800 Subject: [PATCH 30/57] add pkl (#2106) --- demo/fuse/bone.pkl | Bin 0 -> 82992 bytes demo/fuse/joint.pkl | Bin 0 -> 82815 bytes 2 files changed, 0 insertions(+), 0 deletions(-) create mode 100644 demo/fuse/bone.pkl create mode 100644 demo/fuse/joint.pkl diff --git a/demo/fuse/bone.pkl b/demo/fuse/bone.pkl new file mode 100644 index 0000000000000000000000000000000000000000..a5cc72b3a1ba835d3b1d7af712c6a3973b862456 GIT binary patch literal 82992 zcmdpfc{r8r*S1JRvr5sdq9oe;9yg^@Q4(#3nr6MXKsgNQG$@{JK zjqf+l^E}Vnih1H$1{hjcH-W zW_oydWIHl0o;jO!vB>Z&HN1{Z$FD9pvht1TnT`T8GiP%~vE#zUBEJkX>um9bi{~;N zS(l0m%#6zxKf^oh_tz9*>6z>-zl@6oXU=7uHw*GJ&R%t775N!6GBYv@&Shm5`DI-a z6Fe-XZOYQk_+=8m^Agum+~To$xR*k+z)_G{a53X@ zmRWS6;F2*-EIBqC|8((!!*MYyBkR(+3ui7Fo>Y0v55IL>EzHU&x^OM4$nYvPriUH# z#xIL<3QULLo#=VY3;%cFlEavhINkNE7YiD3UWwBVIE?8B#fw4A|BtoP&TPi>|Ln)`JvIZs zxOm2K;nEgk<~_sDZumQmS$1P~qDSI1r!hwibMI$D4i~dB9S}p2U$S{F%gj2r>q4f( z&zQHtZp?QW3+zTf(J_C#Rw4c=qbRe$gcuf<8jEtK=Xi(Z%*gRM)&vRQ=KlYY{vLd( z9Dk7bC0&X7nfR~#!QywVh4`=hzVP`4jo*rm3%{&s9{fV1do)hp6mF%FB?|X)H8rYh zcBV@EsERuk8d=8=@sA5>RN)6SyjCsYZO#&HgUg-pk(N97T&-mm1VN38{Z^ioNSkCN!q_Pt6r z_iUG_gDdHSltH87$r}7lNtq!YYSKY|E^!-R=bLhfpZZAK`E!~wpFW4kT(*ebOV{V5 zOH}Pa^%7O`?JVgARVBzfJr>B^$Nl7#uP@xJQhx$HQK&WAL*?!(X^M|FU!ftYN|ggG zM~K)?kbIF{FR5eQQ{}mbyW@B952&IPM}O(d^=k@iaWg{`}q|~ z#^M}kf1ur1;xv}pjUZ_M6{is_w13(C9HHdP9mWc~v9hSR)L8XP{9avZteL>?ke?KO zO2Mt9D%#KV{a4`DwRRHR{sgmY&l?>1e!N<_EFdWYxHW z-l!orxlxJdjvSfZP9tbiCAHCr1T?JYht`sL{tAtsrANv>e&;Wj9^EWc)#eC=n|FE2 zr3yP`8oUZ6TYhk^%q`PQX%$s2vhtmk^wMhK=gXDY3+I$HAm4pyBXFu+j=*j(V&$jn zHmN80J%6=QHYjmY%KM{BCd#6H}ZF`k@EQ?y~O)%jlkbf1&zwN0pV6^#rD=|-4}h(%45X` ztkhM=A1pxxoW^tWsD-Qe8{f71@<_UJAdVkc=_>^K&dNPhHP@;x=WYv-ebCziSy$0> zEA{S7kP;WL(?Ykm&y&}r@GI5Cy?wD9z<6J#wp9LP;f7M~vT|=C4a#S|>7kXoZ`vdy zN~rf1yJ2w}R=Xj&K~xs%t+>@&br_o6&;^Lv!hXwKiY_%`CSY{yPcZs$N-4WVl`TKR z(O*$^*V@U--b5cub#A>P+XCqBtyp@%)%NUDOO?F%?5zOZAAZ~ZHJ9j0su3w|f1-`vfVJg)gm8tg#eNtxgXu7nVM);AGM(<}T{7s?;S;}7|%iMWN;-o>QmHPxw2&vfH-!Dl74Iu2SQ;YPu3VoywJzI3I;uLU$-!9YN)CD!n

&kv-y2zp>W$)BlNO4~xUsv`<$OJxV?k;-3aTvrbB&=PCT5=Ex> zJa%5@=7rnUEq=h#7uiRER12g-be|teL9tXRQP)l$k?5OOyyRzHr*cQZE?@>gXHZF; z!C#bH7HDPhkt8)OAyQrgg_T0rZI`lR=r%rvb@a)4g1)bi4pU8#OhYTzsesCtc-Cs} zoUR4}UYrtj!^rGcHUlZ{SaawV-<9a&Ng9=mZ9>gs?Z$SevBPfcbgOxsQ1e}GHQ((p z_SlX1qF=bV38lu~oZqPR)Uu8&+K*dDV(ldB$n*ETB4eMBYoguQ?=+I^#sRl=lq}?$ z;+E?{hheuHsRBL^{Wg3aE;Wu!z~|AQ;Im;$!Dm#I^24^0YwaZX+)ghJz#R%Ws(76# zR;c&Ft=Rks>X3ncjr>@Klh(FHE~5KO74GIbYLm~V&Q<8cC2H0(snel)KXJNYs02q5R6y(x=VwR zSHDE&4n0Ah8!F7ZHf~Cn`2DOPm7F^Pp&{H9fmI8Ss?qDXS1qL5E3{jC`Hg1f#>6L3F)*mqzA!rrK}@}nqt+VlAFO8JEFot2c`um!Q=Rjd*$`zl8d z<;j`+&R01GQ>s%=*Nwfq(jO?|@Jcsi5s$ha}soN)Q*8|=4+6EU(E ze-2d_aPwL#gFf=|#W&?^DJ!&@T(JXYEZ1yOLb!sP zFjdo8h6qU4} z$CQ4}#}sQPp(s$!^<$h9a?P?E=bc8j-NaC3CF|R`*J~>jsEW#BsKKB+ zwpf1Q5l!P!RHyUEDK68(=^cI1>B+?l`W0b(+7R7q<7y2m=nU5L=+XLU7<(^{#6Zt` zHtAUMDNK*$nmIb+Qy;JB7mo8oEpyXJedhx1)sN6C8nJOZEV?@y^~KX2-3&YyonAQI zb^_*0gMM=M3>$#s6*t?cZ7Ds{0YJlCu2CDmJrm6pJ~Xa*rFrPrgg2!}@%T*rN<^5R zOSidM;{m!8X5;s(>8TCAR2hxw+S+;1FgTCeijGHW0GJ?~3hb}2mGBpSqV8LEOiSff zZqiqIbB9WwvjIVg8Fsxeo*Qjct#GFeajmi$(djd|OUt0@&H81>G~2wz)TseV$_s38 z(w+1!=CH8DbRcA7Ty$O@4cf3f4fAXLWR{R#UtEZuae0OgW4kd}=a!pP9m93rHk~Vz z6Kp(m{#Z1%+?=gnr`9bxKbz)lQ|WDXOdjmtx3(*`blt>$gBz-%L3*MKbC1jP-GJbS zww+h^+wzWCY*e>4KKc?pogOV5Mfzqd-j@^`%wNviuq(aG(o3m2M9&JupX0tC1La$} zeV$I=LTBiH!TGk?T%Hm=mu~B}0A;SujXpG_a~I#CkG3PzR%ztCRA{4iUwWe_(Z>xNSGe-S~M&$n~yUu1<$>&u&x*{H*+CB=ml%Q8j^{4}L<=DJ5lV zl=Q>9lxyuIDT|bt_%R*|DOcN#8mCcfH|pF{t`|~%o`TbBiml0<+LI zW52cl;k`G){%k~uq>abvJ7SZgcJ=&H>cTo7ehq&&IYoz0{hS`?T(?S*9J`~H>()&Q zKRk|DMC4p*mAO25gL3O4m8*z}v{~_jSLZ9CdCBSlBtV=BIaB$Y;+;4LhOt54M3Oz)RU3O|j2vw*ktOt=9VBB6Ab%IB+yw1};`7@P({rnW+3~J&Y^;RUkLXPXEu53qGSV1V^F*VCE zr4SmmZd`)+4R-F2>TUzCo3JlS@}jqERrpK;x)Oxgui~}%8mxY*F2dvcB82od;*vt6 z^rk@Ps$;p*BEGlO7O;qiB-{1_ik03Xz|=)=5QcFc;qM4m@%vqJ31YJia4ZU`M#-NI zb3Zc@(Q(AU5ynIoZq*)TwulKS9`rE=X8DRxnd)+AKvwBVv`%kMBZ^Fq&Rv9OMK#F1 zMP0w9@H4s}rc+(~JbaqPh|==g!dddIb%_c#%Hd#nC)JcnMw3wOXLh66X|&jl=kAT^ zg;4F6Zq;se7;Sdrl>pH8-!?!yN{!bO5Zd_@gtkm6MUPUgKWu=y)=pA%&6~^Nq2rV6 z)Wet32W90F#5?)zS?YdT!o7u2HfKF9s|%;+IKRq2H%y;m+% z&3-+7IxG(DM2;4K53o!{s&P15!Cyc2(&!OCLK<}xLCc3QLmnN|xC0T(a#%fC{6Gt! zuQ9fwy;Pj%{AD_Q#1>x86pFCy^jfu;UPzyf>5G^8(QYCGxKS-6Ao)T6*F{lHEw zY5FBCo7??Vbq7+*NJb2wSD_T02M}{quj^^BTgz;oFfD75h^AH#Xyaabtpu;-=SY9Q zM)YveQf@^0dv};d--}?}WzM~&P?cqeTC`!040L!#udUpiCQQ#ddT~|d@<*rQSC)xnD^3W zx~oxlBp~@L-49pIO*>TCoBFZ9UaA#c2S#~N_gr-+y+PcDyTY?|9y^ZbzY9S5s>8f* zMYbOTYSR#Qm4hlBo7jpjlN%+qaRC+ zPdUHQ@+oB~Nl{ThY$&?09?Ek z#FVZbVn^eT>TpLs}wz0;v;mzPY_ zb8gVX=o!J}1WZ2US2|1w*kai78|c0cK)p}xT3jY|Ys-OiP8B)IF-fQKIn#9NN5Jg9 z#-sEV;ks4SUZm0^{}46Va$aAv+*5}S-CC+{^uaAI$NZs6gML(_mp~Oit8kjz_#tvX zYgZ^CyXY<~gYU6Nx(vA4Q>CBIYy0|6$-5SVxh5)5po7ToZM zZD7Bw62DMU+AL}3L~pqE#%?V@iZs|@ikAQB&eW}Q!;r9BKID`PI)d<8Gy3_=IYE$q5 z^wEeyANgJgjMr&As9bXhx(#;1I_2ydL~jv}d3Ofk(#PO5`S8ma%?lK&aV6VIc#8(? zdN8}k4`!>gV@)*<0RBjZLDmPUb}yqB%2xVLZCVt6N=B^pR3bJC$e|w4&o6_K=<7a- zdj@$xq59Y`WdXviy-ErGzELrLM~mUqJMRQ)Hs#IDO;_-z|micpSade5~r_QnjXeq6XN{qcH?h2 zjlXF({+3&ue_M$2@3_VJcOAyxvm1ZECe9k z+=70PPi}&@)TGkGh-I))KT)3s20*))4W{7-daiLB7L9B1$)&M58okqyIe!NK6PfV4 zG_2+!uvsGlluxfIzSQfZ^MJn9h7d2kuw`*4J%*VH^L9`ziKRMKYD;!SVFlkkz5^tRtw306Uvx+-0{TBx0jqBpX)AYvG|>-K?NR-bVpP7fBUmk^ zFAQ6=1yR--HFYb$QqlwG0CU$IQrCEHRlPSys&GvCw!Y_ZI9LbEkfx)toBT{T)sJaF zTM_85CtZR&jLmX@zALdjdO;24JHA@bR?r0i*LNe7v_gNymV8>t^jxRpIras-lQ=_x#{WaW8j zeqONex!IS#XhE^53bk#9^Nk(zUa4l;07u7*pKga2(Z`R}RrCs6)vqZqIW}O69#wb3 z1$(S|EIg(9?%^sepPLJD1K=P$g*jlrOkZi4r4=CN+==H;I;EFDQZK~z@$|B~F)>d? z1g8Se(u2nsR!<{!Ieeg9KNK4MkL<=jb{hZ0Zv0dC#`DjFM*nlSM*jvUy4Yq^M%}(G;9(?IeKA_Y)XbIV)cdI~J%e z3Rw*P&q{6+meW)Iq*BGjbFf~&bD2t?a@J`6JFmnZ_03W6wIB4-VcPX9)UNSY=xlNN z4E-uFut`zrE?)C+JTVjz8JW3ImNk!rd59W=C)j-i=<|K=gGN^{PC2q8NOD zlY)2u)l=B4?bu#~)t-S6j2DCEFM@^Lg<1%I#4GpmON%lW4#X!#qZ)6pAHD(kxEu)G ziQnK|G^)JC{p!XbQAqHF-k(xK_+7FVMJ@bTjl8@KK)4|QFju!qO|)>OQa~@LSI5dh zI7(E5peTYgt1oJBmayfa8enb;iXW2ryOIM={$~W8vuI3?wk|@jBmlIPQtDi!nA~$x zg$3=LhpoyV&%2u*1n69zPv5oEr{H&3=LvpUP2mzH_#8qo@miYL=c=eePNLge?yYYYADEWI(YtsY#Z7e*kaIUU!q zoaj>1855o=q>5Y?ND*tu!Q4pX_lUO^pntcNUOZ z=%pZW4)P$pP1r?cg6LSmJyw5iy%n2E?FbEakh2tVv-MbmEHE!1jI ztF`DL^;m#p&1Wn@T;U5K)JJk^>Ba1}h}EUqTd7L>}(IiG3_5@TjTSr|$wi902>ws=>?e zmQ$!R6zt7>FjRwz7RYmG*i)DJ@pc(l-3Qv#Waj~{G7O8q511ibt% zIT;T9TYOLl&w~|4{fp#`Wrdb)rC9g-mt1QnnMC;4?R>6<{IZJR1m`D z05ARx^>``M?R+2H`HtN(wVXXDVVj*J^IcDW)HV9A0P8!F+koT29*DROY?rUmeGfHb zN0_!CrX zWWa3ADrGk}AomoFKYFK}aPV!!{(6ydZOdB$_I)$gsi9lL3TV{0 zWS?q=2B??SK*V5UQEo(4$53pDyckIKkKm`@vu4t$hvFsb1A6&B3=LRujR?ykkJN9| z0FWIKTG9Ia7-Xiqs8Z+ZITx`J6kJu}sh;{&7+qxup_T-59k{ix@mzL(brKjEK2w_) zw?-+YuNKA1HKGcjEP02#o6BG+gKj=~&`<2M#{RSJDo=o{Z#2p|7s9w7dC95}-vTn|gC43SOlyEq>?WMi>< zY9T~HUExdB)MH3C9jDKWB^X(a>t%i#7z=W1E{c)X;X5&g>WFy@@J#uj^zRHvQVLV4 zRWg?g{e;S|z}0T9#OO3vx#p`ey3uuwm_g-3aCNKvTFe~gI*fL6J;tJ(v!!MzCKIYY zwa``9f9^s38oG+Llh9REYKtE;OvneK-ry43MvP7~+%?~X(T%PVVg^x*@^QDQBQbNB zQ5fy!7K{R2Ex3l|6qK4)OeP3sQz~z-M9DvluI{yy&=qE7zepcFkDnJE&Q-qJiBs9O zjh-h!2Ryb{OP%QzeeK#l8$VjleL9UE;tIeW;In^gMk&|Ws0+CFcpEncg^7;;a}c(+Ey%sgIWqe8nq#6^yRQEo}0KHyuqE^pqJ!==(uLfCR-tW;yOKi zm%qN1xBSC|tEbSz zQ(7SKaTHM5g8WxRUWUpq4$@aah~2DlB{D}USH~Et^=O{94#nvz?&>iTE%fOyYNcS+ zjd7>8g_{%A3-sJur#mYSBK(MAZ#gp-S#FW2d1vFc{c$3<_L#7M!YSqni#0a?I)d^lebKkx({HKFhVD7AMEgY?TSBJ}%e84YW3c#c=day_CBFkMwV z(bnsH4=MdM2$UkU@CB|o4{W1m-6Jv`-fs@#zcAyUGYoFTcIlC}I~2PFf2HbhkPMd2 zMVj!FpHi?PHkx|F@Me{ke9e@EwgLcTT zZf?a`lyjrh+=j`7;!i28Mbh|(Q~9p7ldyJ{8)`kwSRo&Xdb>+(J1{!UovwKtMmN^( z5;KT;H?D3`@4?Jr#$&Xb2^a;|?!`4M=YFZV50eQp)rqDS>8zQ0FzL1E|&M`q{Sd?{s9pcIP7>sD^ zHzpEcsX%JjCZ}T~8-xGw;DG8wPp3)OkwAJOQLhCFCVGsR?qd=4ZJ(w47b-G0T|g%j zxPK6+pi;bEi+?=CRWi6`jl$v|5JY3HzQC>0%BvQHP!XVORHNy|acv_=ea(oAJ&jZ{ zqs5-~keWsJgnByy%Urwhx_TK2udfnJmAZVq>^!=I?*_;s!{~TWut_e4!E;ZET}=p9 zwX5_VpCu131x;l-TBPuuY^ss65r%sv$DUn)XwvzM5|uCBrLLd{OBM5o&{e4YQI`rI z!{{`RyXGe_x;M2nF@xHl#MQ0#>6kgpQyA@L21bFjr*RF-=_obNU^2l|YoAilSy}yI zt5$ZcouvNB^e9WtK;A<-QM|e8Bw1;f}rk0dC~Qw5|77_ z(^5EduM&AyQr6`oFbT4F1-^(#Wc1-L_|5@pkxoX@M~S{}Kz5~yhGtv$&```4>a=Z@ zH(~L2km|QGzV^u1&^>G3QPh^gApAxjEZ`_r!oLUChliBxWx4Y5hAp1ow6UG^O@x`D5=)?vuIesRbgC>GI%7KK1nbP zd+;7m;-;$>`n+hDmHQKxN_OwWe|7eCz3`J=Q2hRr;;M z$o~j7FVZ#-q-@JxDKzu6xY6%k(m9yN4*)z^@zR2~iIdJ#TOdfOGw8Wuo)t<99LU7g zZl1&FG_zdu^BCPYkS%7wfgD`jIB)?ohnb7fZsuVu%K2DoUc_WV|EH9trAPfRi%NE_ zon&dvaT{75X1;w~ z{Mm}#cL;lPJoO@zEPCMFv0h0u@2!Dc7ipc1zy$2eTDpD2 z3h&rrU9#zl&fkr62fc|`QeTO-BGkJ6}}E3Lrte(C^jFXg95 zdBK`$kRq^?R9Yn@6T2jSun~Dt1SFm!?mBY$ig?5ao3Y0sMBtRd>21D@r|G0mmZD-b zG2lvnG<3SYg6~?e4SrlJg>T9hThqfU65T@(8E{{oC>~}Q=`=h-IhCKX6w<3zR$nzz zDxminl6h5VEI{BIu6FY}MyGkhH7~{J#@m}>1`xP~s~ZH$Fmsr?j72%WTWa3H zWJ39;6yD0(kEg|4YbW9D+@J9Fu8einhmb` zV~lQ*KM^yC{3)((kvC%IFq<&i&1V<|@6~L^WJ1(atDT=>iTdFTq-*V@tqsj&9-2CO?@a??V>>lvGy3#f5S2-O`^fzg4_+>j3u$bRlFNg6 zlDYZ)5J}P7HrpfB`R0Kw@0GwfSKZ->^ss!)#67^lizC_Kt<5dJ$nqgfB zz~j&&=o1|3bui-_teIJeGn|GGA^Sw?g5g@uw=D-3gB6yxjyln?2SJG*>~(1GC8r&a zluoWf*#@^BUx_>^zeB_Uj>c((E5=K#^tdn>#>W1GqT6W0PO+^cSs;b`9F&8(cZqy` zCV!FUt_7ff-rb=^dx)F9s89T<{nu6 zUjjfE;d^t$2HAnC-%AMBR9hFETrDq5Tw~pEY_{fy8a6mY842K`4NSwW#9g&6n(r+a zbDgK;i5fwPzK2tXl^XDMz_wV1Z%4Hy7q!ml&ogjYv2KT7Q)iLa96>60#~~?X0F5P{B(P_SP&08_Lce6GzgTBAQ)vfRCm^sW2 zjCS)iMgg&%xC&yd`392-M(31*SOvBIKO-LeN;=ZDb`r$ScO$Ha*(Kxyk-x>&Zob3l zG~c`C-5A{v+aqQWc`vSRk$=F=B)V%lL=ARO)2j|QGVgE{Yt#nwRRH3 zdLnYr5RItDXC$L`r0%huMuuxh%Lh%=soP=bbs()*n}VXKaxXnEN3t#1Hcb!6v)PWK zIVcK@&x5V$i6}~sn&)ZL1Gvj>D~dX)dDEA|Sg%VK8!~}h9!)(VKHA|c+^8X)NzX)c zty(WF8I*-P`qpd|vi0uf52!$Wrr4mmAbs|MsM@Tortug(f}YJoW0WUJsIP-f{1FV) z>ht>Y?Q|DSZ1^3@wDzORix!1G&N}u7=XE`3#xC?3<9o3>fAlxNoY+X;V*mhe70tZ0 zQ4LE&iyH(!KcgPHB8`VpCl61g2Cb{#fdrhekw?G~%|Vl_c?&@VjHS^+)eamQS0gtU zi(uAEKYb2$CF+M4ZPw6SkUm70P{n3j>5R?jJClG`WFiFGg~aD^+$=JN6{!CmIlW0c z!B6-D_{8*lBSwA|tf5QR`}JBVLSZQDZL`eLA)Hpai{>+Hks~~dR<^=h8vv_t+x+Nj z{?wx-@a>seju#5Ak<@&!LSM|a=pDmf^d;ykw3M)P=rSmFp*PQ{Jb-(3YIyDkaoVjw zd`{eV0oB`4{J|S7eU5Vl{JZ&HqHEA+{wO$mPv(pE4(On&uA$0wHF^0(4Le5nPQhfX z!_Cai*o_-s%~dKjT@-D3M(AM%(zWz7Mh!w6HnntCv7&kuYW&HcYPHZ%evMduD?YfU zbVRPZ)EL|B^eo+r-)x5W6pc_4KZ=C3eYpbvX!Z*gg|-j4H1{(`r#a}Ff5GSm*dZ~4 zwtvOdt?k2@Im{7^c5@V?0N8K1x=6HRm`t$gx~7)atH0>@|7%(=)=tv;(t|gU@U_`> zD%*+7F!Zn=yzOl4vQ8~d6LlZ?XzYe>N~nVV6Fb0^MQ)=B$>uBcM#X0O&8qrwQ-V(A zJ4O2lSV8_8npfl~iToTjMzi_B?r3^&>}u>4xbHn`de|*}$ysFlQJqR3;N$MUfbRr< z3DL**KLU|46B zdC0fW_Y*c7db~c?!q-8VyjCK!erOx$+FU0^YDoXn8;l z(B;j@CtxfQZ3Ju3-JQUD%>+@sBUU?GGEWBO_tji2?{uO<&AHhc^tWP~mX#nzq+CXQ|~dPk1W$vsx2s0 zPQZj!7Mv2yh-MxJeUFDuB8kPXu0fP3R5eXa<=zWQ;l4vUqDSwMEK`r;-%Amwet-fr zd^T0ewzFs^LH*MKHoYZRg%Lc0#)rig9+nUd`JRBJ7uSJFoavX2HUzb3oL|SyuvgFG z=?AgLbW*j35{7IgX*NPjGPr?*vN!(nwPJo3nhP)($JK8B4o0W>hg@^Q=mrB8Gl0P# z#?=i5e*`m!`A0F@%|C|GMXCMcm`s4cl(Mo^Mf$mO^($7EYwaW}Yr#)e)}IjafvA7i zCAL3_(P{oE*ZfaobX!?}M$91UKZ~nd)PD{$hxz9*+ReX!QQ+=h#MMQq{Y#ik@a(2m zS0+{Z@!|#7+DW*Z2#cjr!zMcpM$arjTc1966}4@nM@PT{3gga`I)B=SCU~WR=fL4H zVH8o>(Krq18N(KYExH1Y-`CpqKvUk@;HkYiuY1$&ZMLh$0Pc(QXs*rUpk0gRj^$`k z;t>-KPV7j6&9@LPU|!x#4Ql)rye!}S`$3tSbx@zn_p^{qL6A@5s#WMxXV0ZUZ5!WR zXoJgiO;3SuSf-VvgGrXX^_mS?oTdZOJh)=9Z8f#+258cUa9ajH4AIVR)pa0S!!bRB z>vzZShXSAoRqS7e)`)7GW_TXv$kOc`Ds zacr=t(4qfeecikgo$r8{h4PL%e15NNda94M zfvf#({3H^6SJ0wQ`1@^}=#3s{_0U0DXa=(jCe{OBWhy*aJYy9$mbd5v1 z=sW5AbS;TrY}EK&j*mv27sM^E57KU7UwIG9IdU2z7yXI46~^u-)%?psL80@11y{TI zS1~%xzvi0%b&PI!{TpHio&TG-x^@0VJZ(Th#v)Gl%)lFxt(3 zj#0qtzrfYSsr{FjOi=53rjl8!pfkhI0|$SwS?gXq30`5ARKq-k#(hCZEgt?{myGt9 zO=^PIO2K@rpAVh45mH)Ru%gg`>C3WMI9{k$uS2U5eip2Fg`l($=se=BFQxZ8G=IEC zv*sO$duk0_%L_1Vi=|Hv{9dXuM= z1|?~E{16?Mn^p^kXER*HmrD9>QAyr*5P?SQD5JuND~Cyn-f{zes18HMDNS8>YLgm@ zNY@jUdrPX0i#H>gLCrMr8TSoDc`ga-ILyd9nH1{bQ?t4nq22}u-fyN6+A z^6R716q8<}xhopE5SYY$UwR9XYI`hwpJ&ir~_MMIa(HU9gl;F>Ws&sSCME|dmRoz zHLCI-P`_+H7lf@Y1m*GD=xu}?a3e(=pwPDpQO4W+Ug8m(s`?eqM`1`ia&9XTQ3Q9X zd8%%zW-Io2crIOF0uW0 zj85}^xaR*8qZ@qxmzY7+{~K4gsQ(XU4)gag+D#7}NFcy>8m=yqttTcE%+Dz$a_bh$ zkMf!o*V;)UU-Xm6y@Y%q^69wRO>c}&bB1f~gV8N=UonHoXX5G>xgTZ@(;uVVoQ2Uv zvYm~|gs7iSt$og-{=8o2*Y-KFcG5n_FZewUpz1)a8hWa>+yIkmA9odN{B_AuB`lFf zT-ZeP+sMpoS#kvWku|?39zRZKCBFULCC!sWT1q zgby3zKnMeFk18JXM8m~uevqtOIm<0lAbJ#_GumJgxWOp@c&na82%tk_{9J(Qd(+hbkH=%(;J6sXb8Kwg- z(ttqjGdYw#ELAeFA6BXk`Yg-Gwxgq2()DzCK6Y#r-VL8z1zPZErk0<)2klQp69ay> z3>HEgRj7LL1`WACwA4Y5N^A~o%Ec>qW4#`@kE}-6n$h&$*p|1_HuocqD)&6qoNtDW5h>1e;`f=0_W(P6I zmqTg06$GQv9K)O=)D@aP*QLetFgnfouK5Ct?tLvl%%J%Tadm6{BFr4-VvKe(5TgLw zCAhkHwo5UY;Mq+r1h8n1l7#MP8O4e*{SF)GniR%70d>cj;jKH^e!KZ@s)yjSQ zwY7D;MM?|Eu?jk}EIM}TObb5HtpTIraLc@}M;Yn@hd>*kL^+^M1|MPk! z@i_i*e)w{Af5Zy#Xpzb8lIc+bk_6L{ZRk~K_za2$5RT}XD|*~MN9up&?sL*A?zy5A z!+L%oN}l?`1U^7u_O80fntUE|v)(4qiAhANr6wu5}O zL1dz3YdUDnvAJk+1#|YxUeJzFg!Twv_8iQHVd~$b$EJgo5DL^oSwQnve0Z4vu(vo7 z2dxU#B`{d4}<40 zKpkLP3~q(XrdcyU53WYaey8XFE4UVA(h9uEJ1H16FA$X5Y#^=4R9|e35X`&q{R_oW zSDL5HgyMRHvx@k=2``RJQ%?ENs%omDr%zsd?ZeV(xcnV5-ZZ* z-)CzH;V6FK(JQD%P2iWag)@vcB%6@@07`XAU^G2c*P)%j zYou{nvJu?qK%b<6B;=LPQgaE8Z@RlsS$YwmaRFG5e-ha_%4HC>4;0bjp}?!`*{E=Y8GMI@K&k*O*ZodQO|qJgvhVvj>f z-WP#B-(O(S1r-LU2yLT(b}M$3*YY;JE!tqeUxI*0cr-m%m&X<;(H@9(>`us3O9NFE z9d2=M!8(Lf8xSp7G)s-+@|ysu6n-Pm+mCI6n{)U%3_*H5N3<@l!Fd4OUX1D_>Y5J= z$OC*ZBrGfV37Wos;?YGS*iu!b0yisfp-XnHFk|1vC!+E1vq&WmU43zbW%Nas==pwh zfW;%Bvmm;&qUF~toQ;T3#tE)Rq`LsQZrt101w9g&-GD3{zut)7AHfUyU^{W5;HyU1 zOYhKL7R4h7an#9y2u?Ta8xZZNum^V_oy@7~NpIL(CxRow&M1 z9fz62+=bC@?#3vh zndqAD$LJP$l9)l{2XJ+ZJQ*{GnS#-79>nOP*V-|e5cS(B6-KaHezb!(4|4>~6JVTM(V|dBPqm3QmX0?Euqp&yjAHr@|3g@~48^^GM!>SN$UwbK34gyZL z;>-|mN0oi_8mCX50KMqr880Ovf@@{2^b^#Ihu9L}gLK`bdvY#+NRhGQw943y2Sta+ zHblew;GuBMRb}mb)OclP97Y}LY0T8vB8i{nhM~i2Wr8{vJT08kU1f4VdiT-iF!V;4 zM@T64qU7zZj2*2DhBpsnUsrq&iE}3rM*u}Nbi)og{~!*}#R=1gtVAOp2qlYF}?E%YoHoXG%8MG9!ZGS}VOfJ`;xUwx%xM3f4`Yh$H zEKzZn?_dqlNSct`?JLMTJVLPGi517RZNP_`3$LdRk%%TKXD9bt(RU9$Fxt(N7zOaAkRG_H2@3`VDU)-}(>=my?%Vg`|C;p!IodCVMUHb%RdgV9C0y@1Jts5_??@c#JW zT;MmO#Ag!l9?Mw^0D11OZbSg)ql&JL6$s<<2S6(b=Qz>;JgGY)2Tu(rD2s8RqPBF&S|v9C zao{VPW~sqku|)9))W1PGb3fd&c_Mm>qR1K?cSGHCZiudAci}BrfLh}SFZrKVj(Q;I z6i0PP{5}B9@iAiN)@=x4T>>vUc@x5F-rR#0vO@*JsT;>Zn6hdY9s;Tu@L}YkZzhV; z;~1UII?%Hd0i%0ZLhmKL!S@li z>p~Z{@h#MgN{C%3AsnN-rRp?@vrNN|@Ch{@paeB<#?vl@zr`Il_^WK@3cUpcCBj{9MX$^7P0S&s* zvrQ?@;4g~26vSJ!!at=(?1pJ`okpgEWG^#f6mJ;>R{8A4i)enoh{~0~G$e4iHw6u= zk?Xm=4sB?9bgcxi1Fkdx^gFcXfEDFrNR+)o9Qfp7IRb&3ZkT^CnFrLo4UtOV3PwS4 z!;WP9`y64TJ>l0!pyY}EHH1o1hQ0ROKu6FYZMyGkzHNS?@ zt@GE#3_5=USGUfWV&*V!VzisLFuDl0WtdFp`joP;B=v_GVTx<*BnvC(CkyMgkPk#% z?h@M_j85~eYwpD8wy^Gr8AM%yt6S8Sm^sY*80}^iMggx6aCH%GA7U~=!kbcurznd0 zi)!6pgIBS361>7g9GIscy}3Rb?FIMhJH?q^RO3naYzyfAUi~nAJEj7fup~p}bwX8chqm(GCfC^;KQG#ZTw<=`(RipD!}y zK=SwEkiFV$93*lL9xyr-_}ikwH{-;^8sIndyG0y|*hnpBVv#}p4EAXBxw$$TH`IgT z@8n_q3V&H1&9%D_qfDZ<0%UOUpXjxMKIUrvb`M+WA+Sinsc%EH_Xa&(jc$>%kdSX# zhZkPsj~WLZ8-FKid|IMFM#D+ee%{=t9W8(zh#c}D_aZ;mhyd^0mB2={Q$#>G^@?Z< zIDQgmf>K}ZIk2UQX9~(0eS_8biraPg^tY376ga#)TUrQG&wR%dK)B{al74Is z>FXk#-i=$jt$SwMP>6C-ygp7F$kca;1J0cE2u&SFseNyB0=2BM;WMe9pQ|&p%$U6OE0AQlZcHh!P(~!bD5!+M4~bk@A(;_;7q=M0|G^1mu6NAJ%#Gm;A%H( zF*?mU*SsF18)YAf8C1UkSGVdv#>`#6} z$~Fo4K-AA%Vr$0eG+SKr=NR26`$Ehh>X*2>Mcs;-!)(K7H(y~CDBF&!i+9_B$pqVO zN;Umc)TkdWYjCffjI!fUxL#PLZ!@&j;_x6ihRrC(D&)@NsIFWG{aX*749r0Kyg(F_ z-A2lZJ_q1z!!^`+Gny(d&=*kUFLIj=-kLZ$Z)~Z^^AFPd6fF-1RwoJuPh_ZxG_V*X z8gS<8k*xrw&1$ol_Z z?#??Zs%=^K7!U*%6%!^DMN|yXy<#JRIbhmtz?@LQfatP~=r&-B859)-6%-W&Q3Mq; z@6CC`dpGBtbIv)u`+jrvaKPDPoIU)Td+w-Nx|h&h-Lq!Rs;|B}8EtcQen5ZSnEY^a z;PmO1XxlT?T-255bZUThu{3=$6?qbhMRu6HfCJ>Rk3B`{fFyd;Kmpvh*X15G<)u~f z9sO^lOKCS{IF9ZkxctZzufgKf$nIa@*Zt;e27C8j&u@_Bl%Xw(nG))@yucFx~f5$J}Q-c-rA#Jnai* zA57ikn(ZYr=rp@}3o_(sUnvb!zh(`Y`VCsZX+`>-w@Bq_-?6$N+xIB#$j?q?a^3vS zsH^WWx%+GflaKgf@(;>BnEWHF-}!_LI-gy=4H+`||56$z|KF@3lMA$fBawdRN02Va z_D50LO?|IZC23RjukMjH+-Ex+YGgD%Z`Ri4C`{UA8T8U@0hl!9q{`HN9x@%|=N<7z zlQj{G4f%En?G7yw;v5C~v5f1J#nI3y;NH)62PjcN14zY8sB5c8<}~St7SGm)Zt3r3 z@?Sc51(LKa8Z8>9fp{<$Xox$R}P=Ga10xVWMKD zsWU2ftV$N&?4lhf)k4jztV_B!O58g_s)OU@DReOMR37E0&%;y^>@f^3BphIVBkz;T ze-0b}jkaqzJZ^PKD&G_oO@nmt;jp~wogyJ^oCrSSn#K_|17KfeKOl6XLFJ!6jg;m5 z&?||UXRl+e%M~SFBK}8_2E#<)c&W+Yt@<(OO=xn(%{0zpab*l3j0#GHM>dOUZSfYx zgw{B1g;(Daqjl>Q&86rW*)Un{Q$_yeY=m zIH{)&`>5YMFe}ybjcDr!ckNYK<>3PPo*^AOKuJGgkkcW!E{BQNgwN6Dl14A+yj0k?rr7DCojm$vGxfN zlOOFq%Lu)=#y*EjL4JxYB(@ZfqkXX%;*w32_hPOUn?P$+TxvSm0nsXN6QS`qSL;Uc zRz9=*24$J73G%d$Li0I6)9=0G{cYxJi>uS3Qxko#FfOq&6Ia`r-IMG=eZwd`uZp#I z%koI>wX!@Bvg|N%ceDg$Y4Ik1;>_RGWcNI`PUI93`7rmxInNSQn@c6DIJRA0=5tcA?pKMK^o&MA8YQ7Ro4g$$JS0 zt+k?pOGQ5a@G}%Co78QlDs?4ow&KPSd)KZLmbFP%^C^l88{x?KrjXl9EGC#fFM*Ui znO7##iF1d5^;T~;Ge#%EIME1C&2_FcpUqvkSNh)WMUr+FclK~96mhWdPEzpvsq!w+ zg&J8p$aH=|?OGhcFS7cbUqS|*Uv~9hL53W`uPO~k@N29gNAT-t0p~Z6e&;ulF4*?B zP}&_prwX_1l<)M}d{4OLKHDMO8u=yM`fX(&O#M5q*?t!pbbim(e;*kNxBftBnEDS{ zL#F;Cw1D%+NWb$ZNab_?l+^{>{uxR;D?68hBh3~E!9V2b=(sWA!E~ASV@Kwa9z_p{v@!nvW`kIcb<7C#TA`thu}PKi+4v9J?$Ov zKt=LB(+ICnoxShbJY8J#t`skcc20>`2^w656!h?WjdaVxgvBVt5&YvPTEWC6+N`NG zpymSje@0A5Pls0)uV(mTDOWN75%~z?Qan zyAi@&B#5k{T_|q8yzG%p>w8clTOi;ae_vSOjbygBCX3d}7oNj21sR;uL&G%x{^1ng zWcg~)W9x`E;YSzc-kd4o3?M8o;cwb5=)y&HHw7mW4g0l}&Dk1E!~G3j^;O>QYSBCn zyc3XzRC^EtK-Zm71f6^`mtg&s4Ztg2P>sCk6|Xjd!^OO>S8D{M)u5V0)k*@wVH@a% zkfmFOcutHaFom{8TupWUT*9oxel|oxgJRzea}q>)$AiP5-y7p-umHXaVQ% zk$&eNkS@OVA5q#jd#CcR)_3_mzsJA2&vx*y@n8JwKPmfQ>VJ04_Akhw^RKS{Z^)2; z{dc8d>i=L3nfgD`0?vOS{my?Qm4E$ztS-Lxe^A;%pPk9%X6o0|QiPuEVDeF4OdiIG zqfFj~)$epg2AyuM-W?e-dAQOrc@Nf*$s^DLP9)Or^hCP&+9;HEQ&;mRajL7h7_~=! z6nE#!ccXTRx@B=w_{+ueX*ac=*dI22wPMoU=M)4AatiirI<(z6u(M$A{x0f~)oNnT zo}Gcfp6#2q@7=a@%ijHiF||Vi`p?I;1@e+$EJcw# zXJ{)$EBK+KdnPGlL@m_n9v~IvIP+B?F2$os92??F0+=WQV`gvy4mk!K19DY5)<}|` z(@zXm^TS(vjZ_?k4=3A)$&5Tc|w#-(#LN+#vui$q}(%NrD!r` z`4oUqvYwd_a6{fx7i=v{kA!G}vn2yGgD6|NhBwiYZ|?N=CwuZz9Z%x*NL=g+NMqab zGPM{{K2RSxYd>-Bwlp)Rzic$a<}-U@geZlLJeCe)T{cC@`&yQ)0w8v3&_-=FcXWnp zkXV4kmZ1Giq@Fbphze(6kXE~_)B(mGaWRDvqTHh&8$+bxy8jfeI^4=+a|B3>!60W~ znK*6g`e2@jn9OZIt6!K7r4ePiaWXi}+SE-E3t!sNtA>nmZ`crTkzaJCYYJR)NwKd;wB!>C@`N$z#0z1_mM z7^~%LLW*lWRd%9<{K9G8GTkWJ)RhcBZRQ|s){n`iNCknZ03kD!7QJ!+aoxoO&mu|} z=U`*s2VZ}L(O*jtAC~AIl0#sO6~Aqfhd|)I!Lhl`=Z&Pw^1_?6V5sKpMb0Tvv_)a5 z>7$L|)0q$LM5`^!?(fCwcX}g(P9Il~L59w@zDi^F_hSw1{{CnICl={<1|XHg9mwk9 zY6qdTgFQQy!}ZwTaRNX5j5FP5J2>3YcpUYA&R}I9Og@Cw@5CX4&QMn$h738};Y!2g zBUnQwABh%l;*ox56w<}jjz(!W^&|e&X&kO$eLb0N=-CbqS6)~=MVD1G7)a1#8pFxM zWu=eA-^+A0VCOy@J|5!nPufs27_XTU6ql~A9XQ#}90aDe5+4ADtCf)Qx{)!3d|f}v z_ZM_<>y0{Th>ymi%^s7G^SQ!v74biO^BJN@1|d}70zR4E)Ohd<@>7iIr5g6u%p9!H z2TwP%J$;J?aEI~Bmx^hV3G*>j^PU?1{qz(+8X@6wtrbkBFOV-XXayXfNG>8C_}NlX zK_a-c&p>;dJU!ejltm2TVv9f5qmsi^hck1AbcbsOdEHKuQ)A3Jg4L7fW{z(0QHQO8Ri z@rkU`cDo4QVQVKCYx+YBCCV48`Dic5>^{>^&IJw&T=)djp3kc3aOLwLJCY@9HThwm zsPv}tx2I`lXkbQ4b2g)*>ctf&>_o2L%%_c(edL2X++z!+A)GZqUo6VL+AM5NzI zMk=Q}iPeSGPDW{m+I6ayT{GpoBOkvn^it1u@C4iE2T1>kOZ!dE@8`XZ6oMZkyt_S3 zemV*1BAqd*7I50L;l3zYcbHVG4EDAPH^WuCRNS?!NdE0>OFrwJWgTTso;hwa<(z5q zQWWXb7bC2pLj{$Yo8uzwr0xV$@?z{D>u{`mV9Xps26#KWTjhaSUNJTA0?dvQKI=I^ zgSs)-wkTV^hx-pdLt4UyDe_WkZY=!Q3Cy zLk&qejnkv7{*!nu<+6xW==B(n6(0u0+2Osaw(?$F$~Q;sgW46;z;g(07iEB=AF=JI znLTecT!Izz?6vazYE#tqmiKsNL{lH|KFdIpQ|zgUG1rlT`DE-qsL}^FL^+7{65`s+ z*8ILZEFaxQAOJQ*s(*M>Trl?mlYb9+SlN8rWj_EpEL*)6`3GQZVwJ23bR@{fe5{uV?=~>Ywo0^QnNC0O0`a)l2L!DxaCZT0I6uMyqZ?-R-2)G> z_>@XNS-}cCKcP`2J!`{(qg>*n#~a{+o4}so)RG{)#4B7;bB9aTGaGqWBDv!s;xYO7 z29gZqduw6@ba7|o>wMccZZQo|YZLWd{nr>?s(TNqL6zgX^S(*4maW`|QvnYK?^_M| z2naT&sO^den9AyRQjkF>)zv-7kO%N84G*BPhCG0d7H|xt-!YLc$ksw>-}#*?-12(9 z8kAtV&vpp65<<~dm}4vZU}~RhwlrkWnda*0$WXX7U1^v)gEeI8OtgSA1L=2WB9*h9 z#p;4=XQQ+u?k@AEP84=&-?8}*ceaM5p6%dlMfpZ*OO`mj*!JzzRK@FQs=d2Vw5E-M z%8&gFrl%EBYa)^=s~g=#_hD+4r@Yom`6Y@+G=l*cXL6j z?S&|d+gX)@pPi8pXV_Wfi7EYkCg#Gyu9WYfdAO09HoEV8MU8k?^$P98-OrhEz}~>4 z&lsEVZX9ve{)n`Rz6?7siQ>&ea@q7>ee5>ZPS_1+3%;ls^JH@(+%va;`GUl0A7>Qq z|DwSlh0}|z#ofUp%aTcO2Tw2!J3a->mbm_HhEIOjoM3NchByUEsJj9GZsh1WJhZ2s zzca@cS7xo_pRMcTn<-vywT}!Ll_nmK6Loe1xUXCG$uv%hrmzzfF)xTa_7qW>&My^p zGfa75jHt5K#7?yi?xCC~PxXhcm^18_#}XA`gRS{1cE*0N((y}^ z_zZ@|+sDN@j*Y%t{EO^)t6@056bX4GG2;0#HjeZC2~Lrc!}f+H>ApEw10-0(^7u6I zbfY?6E{f~Ge51u>VAaDwp!wEhnu+ln^@;M%K)cYL9dK2i8DuAk&)_PmcKFO_zFLij zGe>PtcKlpczcUXRbmqJI0%XX!E>s#jK8rQ9;}@X?oW)4Lvjpj4YnP(5@AOXPTs_)% zSf{@4E~}pH;9SRiajwgheK7SmtbWIj3_8nQeFZY)TvsX$Q?FtTnK~OS;HVs(-&uoH z&UG!Ti>+OU(hjogTpbLyrKSA9Fr4pkuI{rPoNKqq@`<@=)+}?ff3Y=ry{xt8z!gIG z&t~8(!{{NRCGt9Llw3+v_ULwijAPC)aU1IY7P`C`=5*As_o~T@%tRQWZi3ZxpC6#8 zMZef-S=`g!l`YR(+_qrk8z{?QSgjP#WDz8Q;z4&UjQrCRJxg4lN)zHa`jv8USEtPW z-C$Uib=-U_%wu!Fye>PT>Bm;8SzTqIkIJ~ciECTT{tnD}m1EZdXB8J1>0P#-30q~f zZh_OijyoW)S$hDlm$XM+&gzB`OueL5^~CN5MV&Tlq&%eoc#5|Af1}7~AH=I`XSFP+ z=aKNVd7{1zr~6*-9W6>2kT^P-zfFkH2F#0MRQ8qKkn6=km68xir^B(Sn`oV%kc%r5 zPhrB@M4IuIvF{jNt;ETdl+~I+b2U=VYec^&UW5}Mi?fHd%@d-Hk@D6U?@W2S7dhMo z;;A=5Wf%={V=m;2KyyArh0sd=0Lp?Y`Fb44p2&17X>pi&Fpy$Q7o{U9=#A^CkgarE zS{u4|xAXdLHX=3w`lt~1M)P0y95zgG?pytcJhw#`_ckuU$`=c?k;?!S?OO!zpE4+P zUj(}H4UKvEBz{4n6cy-T%XXR^3_IwD^N~Kqs?6;HU$p=%m<%o!#m0PDbR6u^-idr( zlNab)drs7%)6-P0v4%`W!6YU$uLIa{Mpg4ZjMshNsPPW1@*-HJ7YDxK=dPBOULPT^ zzKMCjy0c#GQ?~vFR==|m8FV(e`etOv%WhE`TYoESXzS;o1)Ob2zq1|b;%axGv~T%N zBI`vw1889 z^gH{I%FFI&b#b*0N;}xIQ<>ajeDxen(|xvs$rHbr{D86#CJ(UsorB1rbI8>XBSR)X zqBKl?lr?1XV`u@V5b1Z0BVAnW36yqISMsM$b6YS|^{;Np3q9N6KwB*DuD0fn7LROP zZWVJAZisga0H94#)FLIjXd4j0!^3?lnb-haZc#p0cMo}-m*U|B^RT=SYaS4vEPHET zQEoCo>zz1fF6*`%s;y-D{N?T3BcN|LMuBfzE#IbXCVl4*RQwd?!e>obn0S7DLiw%LOH2w?Deqcovw@CG??MY>3ak}X z7U1bbt_boi@2X@WHxYSvBVBoUvQ;$O0r`<8j^-^)?wH!oo*^#IXC_g1QQ{NzW4XX> zEnr3l(8Zc0dzQQlBJ17`x1oGzQ(IBtSO!w0JPkbiP%)m5qOr)$g1_2A$Kceg+vj){2zI#y`s% z+W0}VfO8J%cZ!k9*Pdr}QMDIP+Cg=X`BUd{03PeBX{Sv0*$xh1RFwEg52<){+y?hZ z1t39GPGN3|XpXRt_@E~0E}k=U?p{2*rEI`N+vzbVZTU1{vd4BY6eU6y?;wx4Z-a_b zilE`xElS3KyzCE9@;#UN23uZE8cfsJgB@^b$W};}uVi$wWZRK7gjxxr8lSR4-mtP~ z!Ml(&A=bB9HgE7zrg$SRr5&81(4RLq)4WTuwJOsmTe^q&#I0c@Soym0!%6EuSNOEo z;u8~AZjXe-8lI{Em~*lM&AtHz@DHp^2OLVa@EIGHFtU~3DayItH`s-_OsuUPkLQWR z0Lq!jb5JW(h%2yh*NAu7W{zl4#r2y*=2BLV7mrlvT+ZEk=``}aX*;QtFJ1u=Oaorx zv$b>udrVYkjiHr-4t4W=`acI6HFX z7Kk2t^zp4bE^*nHG5SNQP&-NWh}z@;Qyjtao^sRIy+{imj-ctT1kx|h5ISJj=Tl~* z9K@Jg>SXT$Rfo%}xjxynWGTJ4WQdxWhc-r*S(#ys$RWJGOg=tde(q&K6Xp)*kxzQq zD<~G-f*s~=gcWhZ0V0Om~yvTvYoOS8$2d@2E6i(7EF3SCJuCP@*(kK`Cp< z6UWT# zaH~RTm|8_pAyca|SHQW4^gH*F%Ij9Ky13eEly<0{`aC+1*9DdIwRr@Z%y_ng*OeEc z5g3z~^cx;ngm16z$takOcsxhiVRb48EnN zFO>A8?#M5w;mM#ki*k=aW&!$Awr#eH-*BRJRJb@zh2gefSLV$qYNO#Lml>rCzGXr`PD>)nQdJ zs!Z=~4Cw;UO4f}K@AU2Tv(k{2t47n@&%ikHX$)f&mx~V@&B)P|e|Zfcf2iq^Su=={IO5t|BN^yVG4=mUi>}=d6pv@g;w=P1`++4-TNn?HU^!gBZ#U-Z1Uprb z?5)HDoi%ckrWq$`R?>Tqg93+q3KqSd2>Z+~Q954t9)us}p!mR;$oZm?8L`#zl)`d> zb=}osC36hKhnY*E<0WGIppEDnu8$(Hf|`i+U1KifKkJEWH@bWil}JUes&O1GP!n_YYb9P7N~XJaTmvCWTz=6Q!~5Ygt44z78$mJVpAQ zdZY`jZ9r+?@14rUdbRJ&b^g92f_k=ti%t6CVxKAdVCqKKY|oKF=Y^{`Aww?qrP45U zGi%7yEocGf71Hm#Mk*KkhSi1EwxYD-Jv)`jQ?;+|7BSstJD7a@7n8qL_QB-uSpCj> zWYGEG>K~CIlYdegCjZPDGI<+X!1-TDzw^J5F0@vlw3}KTa-HZjv%Z>f!c0}qb~w%K zu6;mB4PJzkN(3N@J8q_5HX{XOEF@q1;Yzzs4N=}7Nve6Qyfn!imMP8jlMi zSph4WlLl!S;i8R5n{?<^W9<~8M0t55yz27pkb(C2!U)v`)&zAHv48m{W1piWu5NQmULTj(KmiiK@Glh0{0 zJ#DP+D{HNU1&nME^>O5?i*~?&v0b)CfD${8dnYT>HpAE}>iU?Ir|jW;=dOQSpPn-o z?6C|&Y?HnG5OMNZFGw(1HUJ)-16-wBwl4<6@M?rLhpc~f$F4W63J%r)9|3~^}>xq>qJ zZlgYAf&w9z%Eo;hWcFl&h^Q&%eySK$SfNEHK4%0qXyxU=H_DCZWx(3FYF&yj=ZdD@ zdjE}^RK~qk6(7#Xx7(pRavk`o+~^`+mUrZ)=yHsyvcl0T*_|1&!%=PoBu~x{mzdfgU*k+`i~<+N8A5V z8k_(BvW7PQPoM>ypG5kdpF%2s`_rs0vi4_C+Tos^$=@1Q>Q^IcL(g{bw-dhj+n-hT z!Q?;3>UVw~8FYTZ)qfEg^0&XFG)(@>tRa*C3R=MVRixkfHKdEI{dJUfQ>&{-=kd3i z_dDEmKfDTQy3cm-x3V-Z#$LN%3&e!ChnTrtB1v1qYR2@f@#$=Bn!N1YX|K3_#pGOik*uj`qhA4zEG-$`p`p^3en9AxWry3Zjx_(9Hkf(E{aQsENZ`v&{$SXodr5*4cJP~wac^V zJM=RFr9^%lV93T1V06{l8{_NCO|0BF#-a3D8@g6?&o^yS(C6P!o0Wb4o2-84w~#^S zw_W{rkRez5yGmo<{~l{--~T>Z!1)8D-}ytNaf&kt7^NLmr!EUZU9vTDy|g$B;^ zHLXuu$!XRrf{)WwkvVKXB^udriEsjl2R#j06NmZ_R!3 z17&)EP^UFIf(NnVWT|Kw4#KtqLgU?FD*1-jlI4A%iHGu99|#3@l3v`(OLzjUR4-N2 zfX0@x(D`kPRB(2do__|G!S}p{+xJIxcfTe%>01kjSVNgOKmtQPG$BCPd5Dltz9xm* zF#cr5N>LJF4ND!Y5|_8*fYP3y!fR`xqrYozgq1d8u$s&JdZkHr;{{cwZQOkEALUUX z%Y!-Q?r7R=YvhL!3NaDn&d+W7-55P)I{xb z*xVDti;Lqh52?(v^g)+z9c<){qGuVSKVy)>Q!et1 zLd=#Q^mNk9O~wKIa+3|_~Uf`|MuT(Pv|CPo}hfMk6peg{BgGEm4)z92LRyTSK zOC*0YJj%?p=#*MXn*zS~nQEsvKSZT7DrIFd{NC3|Ka7#@ z$fF;Js{y8mOGDRMy8^v&zJoSd-tDS20;B@PwhPPY2k0V7M_~I%`Oaip0leGi2nX64*I3WVZZ0dL0n6SaBq2Y<@ycm50+ zbpG7c{{k8E2Y;zF{J~$bhWx=_qXnG5LHeD)MY`zPze8#F1Dz@4GPP9W2a|~3lT~n^ z?GSQJ{1S5gy|NFc{s-4=|A-7a|K#fbj0}Zb|DrTZ{jaPcQ~w)U!1;Hi-}w)ua=rh_ z>Y{7^3#A=%r*gcV$o1;qvHACeUWR(MgX_(aH9hTY{AsIpK)hU^wtVg)Tb%6)fxf5) zW6H>u)*%b^DB`M0<`h-{?Z4Rl5S;ZzBul5p^ra-;j|`(Ir$9@CKXmCV*}hS^4>Q##7l6cmPT^9 zy*M6LoIUdK6bmxj*XGtmCo+v_XnMi84JOP@YJ}_qd3TC6a1}3mRwOZ)Y#i3xCSbh3 z%kV}d0KXk`kkb4&k{5Y9MmeDOM44ypW3omuaJ`@5HF8%la^^5D{1oxrV=(^QWgi6$ zd1u=McxURNf-K(}&BU-Lj7cN3V%`B$aJZ$5bMWXG+27Jj%ANRTcVpM6mFhbCA>3Tf zWL$k>+5kex^`cb8Jf{-^aI*S6i%p<5e==>AbNjXaOf2>34b{U9@ckO8d_5 zRL<7)d_CIMeYS(MP5$C+Bb9wHbx+r9QOKYZ?drXdA!plLX_&eXYsl0wXaT1$((m*` zDreiD)kWLJqO?QrRGG5#I9po5e(-?%9%t)5+rioHmW@MwM1D=9edGHLoJP1-)Guvm zl)MMjJ~_c6wyL3ff#9xX4g9L2InmCZ3Gzl(gpW^`r%sYTiet}x@{N$KqwS+P6Rh0X z4mVtGv*#ceA1vO{LU3xgt?$UQ=;W-3u?rSbgdQS3Ildm_d^T~@oj@9K)5`>~FQQ^0 zj{mG^N)s2Ct;on8tB~={C+JrYH-?EPz9f0W4^Wn4;03IJ%QLM2SG#iPuBsNZG@EZVCz?faDS_sZ;OiCNmPnP@SryO}Z4ZmL}-vFJcTx(|> zh@yopcOxmxw=^R#L|T;jV!Hs@NdY^2-tvm_T$}6YQs4&i`3YFGaC!}v-!IRr1i%%l z_Gy#3!iy&f{cLe_7Pr~V6|$b|9+}xjJ7Fc4gfwwsF1~x^LSKfK>sydF8^i9W{|#0g z;6Q8fT1D27Bw6B{1o-QPFEwT^7jyD|XM9VUl>@e@QNT(@D;yA_O`HHwk)O>9w~14m zW~FT>lrOb1a(em>k6&qLqz{A2YbF?pZrO~he%q7JqxX~ouBQaqm82yIFLSw&+%oq@ z%~7OfmlLh*5g@2YGHkUb^O{R+mH6dHasZy`We=BaK8w3(FDoKa-9jp$=)GVb6BUHK z7$^uC7^JjG)-YoG8}O2i>I;F`8INLy#dS8>cjO{!rG?&L4&raE2rO&IqInwjGJm zzT-QUleJU7li2#6h}M0!gOi=~#mUAi`(Wx(uGvN-gHD30k3oi<>{z8?>O|I%smGxO zoFt^*8IM#>b^@ylww;L5jsWdcCWjT}2Yu}KnB0A~gUKgUSn1gU%FJ zpNb5bJVj}kJe4(Mat~U-@gn_>hIGNUI!e2#74ohVoo5Wm{Mx*YrJn6@o{a*1k*T=9 z>*lJ)HKvw}=XPI7{Av@#mJ=riWmS@i=jTAMx(ph^@Zx&}lcJUIy}_CyZUMY1CI}__ zcSk-}#E+lJHZlN=yiWt=tB0fP!3;iN{0?Z|V&mXAFs1Uws{Ta6WwW6$ctXzu3CfQG zT-Q@lMvtd2HgJV6vk6wk>rDB`nk;H3fXG_vwMLUAuw?aIyy6pitJ^e~$}Y>RmM9h_ z6No~Wi#mKX)gYs<505X3k^#cqfT@90R4PZs;9 zVj)|;+YZo9-pByv)nJ2-csWV+e!Q4&%mV7m3HJrG>xY}jWNPF5+Bo16RCy+;?| zmZ!+e`U30%o$fpFo>XQ@-(1+sdQMk^+8+0^4(nrp>@8hoCVBUnYqKy+=1Ey%F3X+( z0yn*fA)lv%NnK?}SOr}emME^QBK8*L%b5EiKXE-Umv8ECjfu@L#pR62B)oz4)*Ev~ zMSml?EAO#o%zm@*WHz-p+GFk;lNC|BcsyZ(9rG<+mrdfuA?9J;SETOsLpWz=nIkQD zfs@P)TUHqP*(soIU+Cs?a<|5qO&;^$a#Fx0$t1WMK>hFoo~;JFd}%s|AUWly=1`ie zKWmHSO&=XJhiGXOFE$S{3grEs@K&6b=gq!x@&h22eQ@oi4i^;~A0n+W_hjpKz4sDQ z%0L>$V{PJ$0alK@9B(*=+M8^1)7`QbGU(W@?n8zSu{5Qz&8M-3ws|^Qz?qKpI~hpj zSu*Y+se2^1*sDec=ul_} zsq6MMxhTGiP1*v@9CN(y1!=2EoZcRg{9lKQF73A+{xwsWmK|`El;-@%f&lW z8#gw^oDKB#2BXg(@ERP^qUsX4s>SLS?OF75)veuVB#GN>rfIVon$|4t^C~N5u~T!` zs)@KCMO&CLMqFND^tNUg>#SMEuIaHpHqNP$#vt5WiLFf@s>h2<6ipgQeCVP+W|e+Y zobF98@CSUCXAx=ABfQo+^VpfXe+s?gix*gNk0* z#s)CNca7M+vjH}c0WLOjsOXprPpUD84SX@qSlQwsh_hyM5fSQJyi*L-C}3kRNV8lt z){;VK)7Od{S*AXUnH%ELNLjBZZJT5cU8u%0lC6QTtoN{J9iA+DrnBU|-imeWnQHq6 zLr|?dGu4j83Cv>kJF}5NXO64SMTVTfJf-0T=Cg*Jzyh>@vk>WbvXCyQb`eVZ=I>l# zmG$+AFI_#`A*`D6C9GPk?1QP7xMo|53_8nP{TpN`tnw=jQ!i%?nR*3Uz*&j(JFAe& z-)6JApjySH*ABAlT;Y}N{d#}vKHI_Hvc;YqW!lZ0bQ;&+&!YnY&-2Wh`t1qWN!j`) zSJaQ+TB`@EYxl$DiHc_An!4##5g5JEQ{;OnjovItF%o6#NGplHBy-L*x_6FIH@jqc zyQo~ly-@@MqvYjqGwHx^5}|`2Eyk0ysT9^`@9Ga;H9*Kv8; zPJ%9mMBZz8juBKX)V!7+rnn<+`)~`%-I9IWSldL<^zV@)2|T6+z-)xNU)xUrk`=OuH| zmJCC@3dBi1f{ZHz+5P9UjpRFq^I`x5sk|M)o3bP8oHL{(iG`NKR*{8#d! zF{UUFk0j}`4fh*99X~sQhzgf^X&|I8u z3`X9%DVl=0xHxwuoIfxZ7-@Y+S&Q?487*6vsL~xwn^9c4E#i$S-mc8R_41dvoVO7(RO> zH8~&U`e^Z()P4ERICC&DE-0uf`Ej_>ZS_h#^%;Ia|0Ma@+I(`A>8w$ElkL8i)$goB z2A%b;z5yBXtQ(cacHhJr+U}dt0?rns-`R?Ev9viT?c2Olc~)EZ8b1(Keb0r}eYS&V zo%+SIZd3Nb)Z1ON?LY>dTvyLShCJ&|rD5t_tRYkHMhiH5kbWm0sXXgmRu@ZKfYJ`| z>{KR)o974d2H#_H_t_36Px)f35DIT`cV}ly*}q2zuwq5$MoH{J_`ydvf0HvmFjGmG(_1Cn|35zy;4FT`7tOiO2Mv z%2&O7;tG6{6m&S~+c*a-El<$C( zo+5R9A1B`=Th$n{TRC>va=f1?g=K-5nqZmcwTrRq_|>vH(jGirfvcJeX~gP3PQF&W zU=Wab3vw+_I0Zm8+n5%wd&_D3B56?{Va9W_xTjv_)kO;K`HYzlVd7p6<;&i{NxPD$ zAZbx0DGCBk;Dv%Ry0xII&GojFCSBE#`jq?2ktH_K`yB{wqT~Z0qJ-164#mq=M=}oW zng;4%e?qn=t}rVWoaqZ6hA z&5e>zP5*>dW}bXH$BvNahuagEj!olQ((18RiC5`X_NXnq@H^_&R%|MSHmBtvDU+q* zJ>7S$B%g~pF|di>UlGF!Wdh;n5ia+w(yTRFdqhh#fY24 zK{w4X+G2DtJf{qE-4t18blrK}$mf$UH7CyLZ*@B|ha`wM%v_N_md@)((-gVKjXZ;q z%tTU^@^-k-DO7uw?SI_e$|sOP=UZ1li3}ZXryQx(_W#{p@rt$S1(C)JxJZ{FCxgOCP4ykI^9$A|@YNWX3 z*(G1^QG&TUHC;1&nHv%n4|Q2@Jtt$7XYsOB?{KmL9{F&Gc;R8M-Ba@nPRpxZb$M>~ zcyHHu$3vdBLeYb_j-F0>j*ED`SJVfVXtCrz8IbS-V%0Gb%wCXX3+H;KPT39}w`d+h zT%Nt&BinY#Cht~pXSGK@5l>?13mhxocv3{0FLf%ppOVx<@wT77bOOw)AaSpVQtd>F zDAq{F-i%h$D;^|x@&BLUY28Jg6AElc-to%wbn&N(2RY%MG~lRB9$*knmKMD}R`rg3 zmajcsMcc`YoWf`e_FKl|%A#$0xJTA%8S@n4bE`` zNdtQO%*#2@lAt=^P966gEkU(EM2{X>bsWDf0DPaVKRvn3_l4zQ93vqiqXJZ@xV` zWjv%q-ppb0ekz1$cXp=8meB_QHFjGA9FDj5+$$=(`#n;ji(!vFAReXC(Js!Ua+;mn zt3Z<%vOVGxj$f1S`^4#@?gUieJLRY8YD&cgxVh!i-SGWHjAX2tXziQt-3jL6CMhyN z&z`|CikG(t^h$}jVN<+uWWT(wFPbjSE6(vtbG>}jZD!wLUY*x=*#K0$d4?vMXnNN` z*e=os4p^zt#9gFKo-$5zuB+Y2MlW+W?G0qmx#{Y+kRjh%t~56KZPw66zk?QVDv*As z5~+OaT~-%9dk>`@HrKh}vo=HqUmIe%&vw}Gl!dE+3AJqQY0=RBicfFp5_ySBU0g0< z!}@(ze<04EbrNYexic;dCo)X4c>>Dncke zVQqCpsbx}@u*ga*reAeX7uD+et}AfwY`{LES)8GMR0>zD87?mD1RP70L1v>u+&F9j z@j4^V8z;r}eyXFFrdUv38Fr4SWDsoaQm(Ks66csxdx02Wd)!SocpD|dj7732JZ-8t zbtq%CtjP3}~h_BCz(&gGN)!B4fv<%T9K~Nkt z_Z27E-8d2xOJ`6S0(XmC5B!QCHf@?tPJ`p#Bx^}fzgWqH z@|p6EEm{wX7GIZdK&&wX9ty}^{P4Fvj*h33EZLN@#THMNhzhbJr^W&sA@TCUI(Te} z;@(l5H2cMQ#@$qko0DwkzS_L_gDO_PQ;iHd4_y5rGUN|xl!iZe#2WGkkI@3o6Qtj% zMY{0WI+S)l(5Yf9-TT!YOQ!p5hZxKACB}NH?1QQ6U9&YHgU&NoZ$yS-tmjI@)Gt^= zrfxzDI4_ZYrx~eSZwspnpM8bW4x;N^olExD&n7qBXFIrFu6vJ%dXwb!IbLoeHw$&} zl8nl5%l}BYwdxS>Z`NB(8gh%Flt+Ov-QN8ZP=pHztQs z<7)J*$Wwf)PvojfNK?g(}tUw_=-4BmZ(~sDn4uxmucTz7VV*yH_VHhX_j><^5L)r-k5~J zo`i8@jlx1%;SJ-p+?ooKs&$@5_3A7!CNo+@b&9yx&+ipYbGxKw0zLfzr{x0P39zX8 z0u4txIR%wVx<`~RBo30z{l*hNdHQQfvVJ%;0~bc7$|vK*E6>uzfX3+Yy zg~_LI@pEogcsR1kx@y}a@gSt9`FD_WuXg(Ek63 z7H~cx{my5k3!ZI5Y2Wvq%GDa4?*L!E=Q`^?+ria(zqs1}rR;;L|F>&4febp*)qeyT zaS}Rrm4wmkRClW)? zc5t;^p2{Tmnok5l8j)*#FDfcomUW1o08z=OVUfnSsXi^eJ0meZ?xEU#QhwZE$BNJ?e;%G+ijbzj|BxL)ta7vA| zuFeqmrtLa_KN7WFbLj<}th!*U7fms?bGvQ;EAZeX?HdD#Bbk_R+kmr6Q5IZOW3c7p z{oi7PtN+j)#$iXo95p>|q-cHK0pei*G|8urO|^ zc|_D7;$#v}cWMW^#~JYpRJx*d>}cP*K6H^34wkRrgKynU;Qx{SUGZ)RIYwTma@&;f z#+rxuf^fxAvv=NE8*(@u<;93~R`$wB@Kt^ilna@fWjQ~k_A1-|r&;~Z&me=&&${~0 zAw!<_=at6x{{`02_Wwn+fb&a8zw^sT7eV_gDDB(6GkIF(GJbWNLg?8Jo>u$fX@6DO z2UGu=YqnoU2A$t<_1{E>Jne5O4O9O%Ysl2UgBEap7wLC?52-xu@3XoH+CM;Phjn%; zlc(BWU68cgXFHf&|6=k#RQAE-f5hr{{umi_{>0V)6d5x4pD7KK|2b>OM00Q@Oo>uG}o$#5f877sI) z%DR~I)bt`zi-q<~+zmX)L_J&8j8I)8{`EVc=|Iblcye#QyoSf4W6k3()mi};< z(N=G^N1MZB9r)S`4g%78Hv%*TH007#{hX@KI~vL=duXCu2Dw?3ZI#b-AX$(tGKv_o zSDnM;K9v@gt%a*ZKJKw5O;#O)VNtfO>JCWY8St%fTgO=BUF#N_2j_8Sn4Eqe6`{Zt zh6#<3AD5VGcuREiwbB+?>W!thnIq3DpBTxwe{xMnRm|HqiF<&&JOR%0O%vz?Ru!e% zG>*I=khBHdDjvX@GBDTfzFGnOvcZsU|o)R6XP@_G*du-nJjCnv}fD{B=l+|!4Q zrFbnN1b{{}N?$6U9@o?2I1~g7b7%Dpv(h+<_wCS!MHZf1cq!dI&&#;ImwhFMhDk%ojUK8?|qwf z-XJV^L2+fH?(bQkG4O1&eqy?;(||``=%gN-{zBg3{qj_c;S5 zoQQQq4$j}Gjmn-f&Qsbxfy*OIVX7hp8L1u?M&< zpDpD2P87mGD2)qb=|nB}8`TM4ws*1){8BV&QOUBw!~QVHe#)}7`eZQfyWos|7_z)jizHe}{Nnq*@rx9`su&k>%3Lp7qs$)bG>J-3cP&^Lyub#Dl10?`}Aw&U+RXj>CD$SCRU5Tp#I$i{joMiYoH`aF2uk z(!|sLgS3S`lZbT>?eNOSd6qV=xA>@1CwFBrpCq}TcixQt4F5Z1WqQTa$l-cS+ELTe zw&_DALy0Ncc1s3MG*g9nOCxPmm_Bu$&nuqnCfv?n%gqDKTFrLNnmCA$s+Dn#KniZJ zF({9{oXO>7Dx?Ruw{Vvc7uHQP(#0h>Fm6WYS#k2}FvY|YoTo3FwbPb0d-qctkRKvd z^}b17(4GHK+mxOEpR9i8zmP%azg_+RAwxU=f0V|~4`VKJXyjLc8haBIK}PN@XsEEg-RPN%QxC-8`5Dy zE$6XZ!Aw<+EvhI`lMd#Z;?uT$IOW}3qeZPx-lQl~tA;ecB`A95CI4A(Doj^oPO2QoyHl<9MHyyx3-M%GDAE>X?g8zNlsF_~roj8fs9*N0^N zn=q=0ywkU|9_%M|`;j15woKCxtq>Kue}Onp0$5z>tsiujYdPJ?uFLB~#T$G-Hosx4 zl2@a>eHR?%B>^3)1;`}M&Vs2=zTs;lo0bs?svHqSJ&(!jKQJ9q;`P=fk_~5!0V=Gt zxAmgQ<;4N7o+sOwBvHUmIA`XIk^{_|R9hME(4W=s#3F;v09PM~3_0RKO2a!0W(|3V zA!q?74(WG>B3;PsFqC$$(5W1;mhv6Gsqb;b?z0^nvH8Uj4_EfV)FWK8jYI~Wcvl~V z3_0S_O2gC%tRYj6K?^uzk$xu;sT}b*Ru^)cgwl=!?NlcBK-lvG7v1kMx%+GflUrX* zK3>@elTTpvI}?#XC)w2}AwwpgtTaqMg*9aIsb~Qw1?hKEkuK!cgVJtlMW5e^&O9UK z>yrxV*$!vkN`hXRY%j0lQjno4i&Xtr(1qYl9z-VqGbpTmePzAJ{00O=_YvdG@k#UT zrJOSKjuweqKKRV!OUU##b%7+cuc$smi^U!&_hgZLMQ2>^1&mD;cXNPHg^8>A29vnM ztW6NUH0krin?zk7fWIe}rw^86_ZH-fbz{3)UFQs>!)&xHhdTn-ZN*Y9t=1-OfsIwR zcunxVwJKL|FXvkjNTS9MbV)Xxpdz})(Zh_vAonbWh> zNF{OJy+@ijwd`01_XBYkI*i`(EPNJE50DyYq+5bhsZM3`i{#_s5UZahl4zO|&=aRo z$P7Q?nClK=*A`wjk6sF>Bl(@UyFgp^O?P8=B-FX{<+U)g|HvFp&26BaRnECZ%Re#N zTsoZIS$c&bz>yzLz!;aXOMM9SvQ=JCt+zL(8_r;+<;|Pk6NFCQ1|vcici~s5vt9R* zsmEeLelPrJmwIt#a{N6xsVyfx17<9pbb8`33IuD8W|}~7F4L0I#z5!81Qy1!!ytM_ zWHJ-a>kRBejpS*qc0Aq8-7twFt+E{ZPe#0Y6I^Spmr6S zsOvI7XLlB>Y{DnmmdtCoI~uj;O(vUng@sv5S$gF@-z^ zuJSR%iS`du6BC{=&9XRFd@yk47pOprig{Y}JlP1!muVH?%3{U4RB=1E4>9dl1`Q`2 z9AONZH_uGZC)3a(E7t4ddrh(D%MXVbCW&_ho$5Pi?%T*Uha%c(#XQg=Dt#)q(6H6= z_oXwCy5N(O8NKmV%_@dRy+_=XFD4q~sxC7Bfn%;Ao)Vu!%r8IC$gOR~NkRI3 zKHcwu^j0)z>i&0@D&mVJR9XjrLqB3RKx3v2w^9YIz=@{IntVlJep_5s5a;Hhil;!9 z@cZ;*`l1=K$SZEA$g8C3_}^zn$v+<8T@D*)Gv*ElllOVdG<~i(JyXv~jMdllVRov# zC(d|1I^1cxc;f|q6E7bP)|U1ljXX?L#9CaUS{LcFjzRWaIF9;pGhH9NjE}kczA>HcKtVK0mqN@JIj&E(XL>1!L}<=+Cg?|Oh%^(uJjb+t0(4$p6$5%TW5&R zq~S$vqE@(9Hham(o!X*C(%u(OJf!>i*yENB_vSB8(q_ogXkT0}2wGdVWiW>248W?0 zF0vMA7s*M_N~r7kO3Rmnv?Jn4e^Y)~*p-xBwSwSQ&K<8QKy~=qOmowS7*OYl5o%X| zN=cO$MgOUlvc(4h@Rb^0UPiPsDZ#Gt1-?g(a^zkxd$68Hk6eseFHBO(bo& zxR|qSmy_b&I4_lpI$DyLQzq{iv7$6v5po-L@!@{8=jg4~b&#!|yMu50bh8RdK4f2QT?FAvNXz^V7MN2Wf-o zksyPqxX)H`4=*?U0P_h*)p)S>g)IE+o6+8`LrGVQ;%+0gf+;KcSef9EDjG-7rU2W; z0IEAX#m!XtY@}%R$l#H6q=iYn++g0r5?KoZzM2*XadtR`F+d=%n0|0ro~8fcajs~C z^RO{}2VA$0gC>KpuDKOieVbS^}!81&2V?*(N^%V%Q8| zv z)z>0J&S0I=a0csHL(X6WTEN+e^gEl7F4A^0O1l&2Owks!QD1$T4L#c-+VXvgwzeqy zVCt=|*>aFUXPc{UM~0%U9ZJL0xvU{m=b;6hok+j43#t6>ZdMm*y9cEmai?bKbQ-^F zdCZg_>>&H$QCH~M4t_V$Pb4EMd+NZ`si3aj%53h8s93MO0a%GkVH`9uRA}EVG!Bbk zq*l;Xob%2H0{ULx%(l3#cRR>EcnQwBL_3x%E_f%3#y&<5YCjjf3+3f7Jzrb{F;p~@ zGLfjC&Bnu((St~J{SLRa#GN`#Me$2OE8*#=v)&}LM~?H%RIfD*A5v_}`Wdnj8u`h%lzZg+sglxo(OhxS z$XX(*ZLZU=AyUm*t8$3-vAA>m(KQIWaTPCQxfc)7rlOukafLaSRbAvqYv`o;L^$H1 zU}j1MC3hx@6jM@eGv`GOhsY*gzObkR*1(Z}LOxUdvky*YbX$n9BK3F0OcIinr9o3wSqGW{w#R1XnzW{{J%u4!ceQp4FamsKvoVqe$4a z6j%o?F%>_wO3BnD5;$3B0PG-dFp&l}OSc$D0T(4qH(~|0M}f_{!%f^K=_$ZwDmPFE zaFAj7;i_EcDb7O6v=X@cY0IHCpv}fpMS)vy5A_O|Sw!=h^Fe0d_5#~zz!_2C zh7Mp){_sLa;PA3-C~!1H9eC0W@ci%zz{p<>wBk@h0Wg~DfOGRjxu6^jEEMv9bWl+~ zkZdg~0J95$B&7T;01pM50VNegZFs8o_lN~`8$S|y^1k|;zZqEaM9MHJ6- z&a?iWec$(e-}m)>&HU~!_x*jo_p5&&{n)yWz?-Ga4f$8|=X=na<$MigBp_x~(F87l2^tqydTr>aprPJq5 z&6cw7em#qN~-?-nurU;iFJCz@hd#>=r^7D!U z@-K)*fWz-Vr;nExm3h;>fOGtno^IhU(P!xOo`vi1%bonjN6aO-$pazo8!U7d<`tgH zy_|0roi4oKm?kbc5QP6aciQPZm!F$|;l$}<7aZPHbszx0bzZxepIdbLIv&WU#4$bY zfG>VoR8VL-9ljaf2k_orr!P1iGcu;TpY>efg;Nf{jOmvyiMNeA;3uy3JdVT5^LPPU z@L0~w-+xBOH20fZthy|zs)Qel)!}*Fyo;9{J{ebcIUUn~6t4zJ|39vsc5K~MyovbX z7OK6GLJj;S>(Ml@OXoHkiqPp9*HN2ZvizFHBiv|cYgL=(PoL?v&fPRbLt3o4(B{Q2&g<75 z^jxERe3#nwh--XH!}GmNFsOSjb*c+bbCtS;UvrtpkLUy6SNIoMaYY}V2jaSiYs?lk12Q}ayQMhO~J)wts7Wd57ZDPf zdiklYZ=+ZAOye(kT&>2(QL}DMJ>yTF8MS?P3(Y)qJRL zi^`w&LAVEUl2xkXF@CEBQq?7uKH{T%uI6R(ZS@S*UQ_w@DQZ-?gWFE(^pYyCs)zUm z-Pe41_=-+<_$f7O(brb##neVs8aKr!YW!()u|}=*c7@8-{B4Cs&D66&4WY_2Dh;PE zu<=W(ry4coFuC-_)Oc3axIa^)uJm1MEI+1lm0GxoUnvg%12gayJ$KA``hvwV^Nu6H z?g(@_X4xG<8D1GzU5?oq(_D_=yLnK~bNP8r=w?yCZu3MwEXwB7dCmYw$SS*Ij?*#M z?g%Y9FcYtJ%9(ra*zx>xMUHtTj`;=C3w+}WW)%1x2%7f)nL*eAqw%}!L82t7KeGqZ zez!evUkiJH=l|J{V?hA~9%grhyBrJcjtB@GvW^tPqPqn`u8WvLRaLChUmqD)r8x*DCeljR$cr{B;rj zo?la~%+LJjwiHZViCS22p)!wd(_J|~l`cBQ?l_2G&Ju=+3N)LSTm_-vy`x7ff7r1&Zwtn(i;`F`Q<^0YnN?T1Nok}a+>S8 zT*~I*T{8Es&sMP^e%`I{qw`dwgy4z0R4Vt~smePdWp5fvQ3@|+%8OzWWxV$tL&1*x z4jarPe!;gT>RL!W3O`s&)w1w4x)rVsrx)t=BKn|}&Y;^0jo{(iRQJ>%Q*$uC#OHFI zdn7#@&chY%xgg)*r<>Gc^w1jul+TIJ@e_YlwNv~>!M6B>UMOe7s8gbTYR^)!hj($6 zmEL77mUr&4%k=izTA7CelT|4sMDxzYr^b)?Yq%nIb8i)H$&mO}7jEOBe9^cjhf%Gs z9E+V#=C5bt<^Ae>ncKDb68wq&7$u%w@*@?0Tg9Iw3}ft)sbLj8mib9G4av(^+Z67w z`pYypH(Yik3T3z09afh^vO8qY9;OIoS3SzEIUTy)VF;6I`=v=uDsilzFsaE?(xh6o z@87uG*G@92-oL+nIW`C>r`R1EU5+2@j!hnunkuBc*(2pNr(=uVvGuo2>b4Tc_JZF? z_TJPosjB7YKEbbSh5e%K;oQ} zbjx~W8-24Bt>V^%lk_s&x{97rxlHBL_?{&$o0=?kdz6K~<*t>db1&VoQZF?vl@gQp zTlw`Su1Ms!*Hudtzv6ERsrlX#w`}Lvi9C)#;J%fg^0>boS$f0@uMu|c;?1+xJ<$G2 z3ly)LpINdN(wJ0wBgz^%V}XUsl2a{I4d>b#Id{22WvN^%;f0^1CDQ0&s!UAWwA^wc zA=Huyce~FLx7kaE_2^Tr+?BNzmXz-9mHB-FJ+RO)%+X7Uy5~?&;zAzWjQ>S6a@)!y zRB_#kAmk;FTQ2(qS-hR}+(PZE_#g)5VwRQO=fqpM z5_Y@Gk{#k_O+Rc{xiXDmz*|nAu|!ZiRU|^NCuP zJg~tUH2bn8i|(?^B2i~zBKKy|3n_ALzV#wk9ZP^bzATrpk3FS(3EX;wMiaR$>SW?s ze#ZkA>Y#hqEfDa1f6LYD0oI+<__Hm1-*{2oS~r~6Z3X*ed3B- zdT4Q^3$@O$J9fAnJME5L9^<)NsP!I?TL0*D*zJx?VLbQ#(s=GGaqOQko>^1Uc*?5s za}EC$uit&`B;zR{3Gw4NAf$ZI?l|OfWZNBwJ;pOfNco6I%DGO*QM=>VZyV3!C62s; z-$=G=N*PaCQ@$HuabG*hct!)u_$v9H-fM*&=}MCo9xY7?2-+y|K=wR|p77^Q!j$y; zDcnF0Y0z52qx4#)-UU1&#qALGv-9S8m4{6pRRUJ&vd(X)PmU|lB^rq@u_mvhM^^4A zzNzK$ON$D0;yt&h0YF5CWp06+HE#8%N7q&Ey1q&lh)uh7=HXN;Y~d)~lKEpYS4dRG z6-%seg7}@p50j_{UKhi!l(^52t6^~I0r$!quWbSZvX{H9RKf45!a|jJgE6_3%2rt5 z`#ht1If^UjgS8+6K5fn>dT6C#;2jNe_%iNNB4^VbnalVK*U0RmI(lP)m3T}~Es<9a zT4{6}HCySPTwIOC%ls{n9}ADXzZhQWW2%%c(MNcw z0+gz-mckt0u?Ee68*ZCRuPj!$v|20;gj=uw5Fjtzi2sB+d_s4v!Ew}P-C0Cca)iO} zEIIsOnl*#oY>@$6)Z$_3^>W2pK%Wj;#dEDv00X@bksy>0p;9V$!o$oCg5#aSFZW6c zy@R{BX&P>x2e>>^iK4-DiS7e(!f)DWG)E5RCSCU4St`LQHYidKHJ8YH_$3cosrvFN ziQ3klk$t9R!hAcaT#BFV zKv3-xViHDd&_ETjh(iLFOEFhz+{*oU4t%=6rtV0~sCtdHSf+X@kRI}TDK~Y4>^LFx zJ>Tv)>2jR1I|@8j^|a9UGah|E>vWv6JI)KMdf~V6+QJgY#R(*87R;)*0_%R3rhpE^Crmd?NvjWzf@k^^S)r}g|1@!4w|e_Kvn$?#-G zsM5BEIzw&H$pLP(rKNGbjmEv!tS_W*`3BdA^Aj7@ZK1~XX+Y7&lA#05)V`h@3+O|V z&51vWPNk6ux@}}aUs`R{beKOT!5n=ffU6@XY1GC;^mVHZ3iRG9DR(b* z+IZk1{u5WUT(+gqJMScZjrhr+ktIGG_Rt;OR!Wa-wp8r#4L0u2r83)beE&v`)Z<)X z+s)-(>(9@zuh#;@U}vt-P!e!F*Sw@XixLffx%Na7JeNMIG=k~PeuF#kJ7e#HG#ixm zYq)`cxt5=5iNHL&4QS0v9<$-y>rbxVia2=Cpiz_V8HVpNoA1&kNxtx(8g1MHdxD2; z!Gn%M2FLc;!f1rw8QhyreYVv%=G#iBA~30B!zmk()!ViMfJ-0PZsu=Z&-J_clOc5I ziH&Q5ZrIiv^udU_8Eyj*3YeL@Z@Q(+{d1F2_*J6KpC5+Xc(8y5416*JQ~~K-jj(Zj zXn>7d`2&sNbD~@4lem&U(%U3zy-d|M9^LEQ!1eTsZv*-bwAhvvu?x=v+vzg6lY0|w zRC$PN4Su_9o~pjaJ2n2MadQdv8A0dh?)s}#;}2=36dIRd@ShlPqwDFB0UKDq#)dD@ zU8*)5rmzA={!U|mVSb=L!$921-_?H;!jS@#m!EdyFN@1vqZ2h^? z|B5%|zIKu~B@$TT$8l50wbbq?b2)C=9pxTxszS*1wnwfmr{j*@Q7Noz)o)wZyCsf$ z6V|o*C+pfiwXCcD{efwsxOS3t^`}a&I-H}gG^hZQqk4tw9mUG(OZ-yZcU8nwjc|XyD6xgusi~Y_gxGjE`;3~- z9~Wp50Es@Up)j<~T1v?&jayGsg_^u!y-wZPG_GdxaFDhsZMzncIbG#}OEjY8QwxnK zTrrD>1fDfQW61QZSgx%^OmAm&LX_etmwQ3k*#2O7I`>Ru397Nw_ z*s1aa9e4PkM1#9GHbY&?WiwQSfR%XXLaqr{R&dWvJc>W{YfHH{NXGc3 zYpq5-{3#9h1OxvRe|MN$b%gwF*$ThT;6B|eHA6YWUozEjd^C;P)(bqM_{{TCgYnPj zGJV*LEEB@r0f8yj8cPiX%k)VNk3WFiVKk6OyzpmgQc@wwTBQK+>M*ifx4BwLoX=(I zGQK?vpHS)*IVe0^rT0|N?^J~4^~f<6VXRViETAr#M>d>L68Y|Id=^(z6dswqU2)tO znqOmgJa9Q`?T$K+MSUnV|B*-Y>z$4UyQA@!7WHw7qp9FGnm%PLsw_iwfzVY=N=Uie?s)BTysa)pyJ1mwP`(K9j^Fu0MON$`b>4BTvtpQ0DsdgGnO4fICN4%mfOF$ZC z0ZfwjTDq%ROHXSlFtu-WzvWb^!}L~xyfK=Nh@UI?@lkE-Mjin=K+pNUp0SJHX;)^a zW0z@1UIl*lXZoaj!&x6xFE69liqF=|s+3MWdIsMEDoXb+u2ry8)N{8^Lo?|mw#C}3 z>gt8usV9Z-W1afBnJcujd+3=$gUI@SRzr9YR0aI`?Hj5@&8CJ(s1Fvsv*3h{`qt5q zf)Hhh-m1Cuf|}(Evyvpff0ACQI=1U4;DvD8HJQdWr+fer(xAewTv+pPdT;}Kp+Vo2 zv+I@^{Af9k;i<0Kj5*YTSFfO+B`W9}G^%2+Y+ejQe~vy2{z4bmDvLuh1>51S$p2Mr z#J$F)u7v3x<+loWF8$P^a^nfUt<1$v)+Yn7<|<__Jw2?hxDONx(r+b>u?a&uJ|zvQ zF6rMD06^O{_qCG@sbFx4AIBdMQvQQ>2e}+tI z{k;;$pD6f^WS>qcLu%2!4^VM?0^-_9h7?ZP-4sBb_Yve%Z9Fo^RvPj%l5-Ag)PFft zF9z1~0`A$pn<-vyJmDmj&C{rRi;tF)?W3)sra&X{N0=FyQ$P_K?BVStHZD_^89Dq+ zHg@_R*29(o3E=V5DF$^3WG#IOH|xdI;In$gNk-VwHAXy~Xw3?bd0Ykl5unAW@b7c; z_>ty49cgx0-{-T0b>`b-cnB8s2BPLrI{T9F3kLd7@q43Njp6 zSv;;pt<02XFQ*Ev_>?e1pZM7&Jc!8n2orc`5!~<)8^95&0Y6U)6?UZ}>yowtNaSbj zaBZ%J9QYS{s{jLTP*wW0 zwFn+MyO4Bm2HNpZqsIq9VF-$b&z^9EJJsOK4H_&XC=0)`fZZv1IAf~38$$J~{Ky&(@)zfGaxHA~d2bg@O)lJ&OJpoQ}U}cl;$`PXBUT0m!9)WrBQmXDVs8W*FZ` z-Vuh=eeERerhAJ{>Q|I7Epjd)4DX7im)d&&9a^4o8dOv63^TYkkjiu)25HsbkKSvD z!ybm|@HKAHH;o>EN@8fpfK+M)F;D{mWCECsoxa27muA(ao!j)eJ3*;jy_0V1+%paS z3%yPyAaO6x-2eiavRN~^F3y#7vaLpeK1V6wZaj5m+s}x*vv@ETL4cahbsaQbT63$--Y`w z=$m-NVeo?hyx8bweATXBeQ6EN?*LFMB8(HGhEtzS^)A7 zb!rie&KIo+Sj#OiWh)u7dWA%>4#$;hHNV+l-)@5Kx~E-M=!?!TL;a0FfD4UU@?ktZ z4M*??HDRT{P9s@xNU3t8pPmvzA2qnugV|aVp8YPErvXQ_(rFuni+h_o!7y&SqDj(;E|`48=mf8=ufW4q&@cwC=< zDkS;OJd*t9PRGBnJN~7xi2o|C0F2RpJwa!jQsOjJ>3e|>&3)}8aSG0|_;LIjA(m>cdt&+m1v+(7RT@GS-Le%Y*eJrM-`k6P@-OE83QZo*kY1_!$N>?eSt$B%2JV1X+B)QrEep1o*jX?$0U;#TTI47_iHZePz80{U#C zK|L{lpS~)3yH1UTKj>LUHyRsP(HkWg_PJff##xPGhgAA72b6ujqv}!g5t}Z#Zf??! zq{0u{iq~j0sPjUkzG+5~7JgD-s_!LkQ|8k*fjqy=L}9`inAdU@r3%yy{y)9ABK+H4 zz6HYzX;i}R0|fq5punyGtq`9;4=4)vAU6DFFh?)liI5okK}C#K=*72%TR2vvN8}zC zt8!qrfoXoI>6mv}TfO>(?I7RxGWO%+8P@le&cz1z{D@}TxkQWQTYI&|+;$dq2j)4I zo>?S;AlF?`Gw0!cfsU8^V2jG&Vf`G9F4yQDCLR7?8Kaz{mS3+zIL?m)wX8KL_NY>* zPu~O@)fbgq5%WYX!)1)UD9VP;OJx!saO3aRFGU&D3kC^aiL! zzjEjYpyR8-7pqnk5EvTZRu%%rfse69D?+UQKz00gq1pe#?)aZB$N#cB{ArT7=@gY%;>VmJ z4({F`eSsNGu`U|jBZP628tC#J`1x)4b2di18H};0U|We9g2jZWo2Qa( z)eNirb6Njx+v>S?l5LGg66(3Uk*X8aqcpHX;+|Y?2GM#U-BCjMb)1(%=%A`p-v8g$R5>WOAKUB=6*H8q~WI9_w^u1;gs>i&unff3Wiky!$I8A77farD@b3qFKsV<$n6GkBkPuFmOY8aDbeZ0lP;cVK zVDQ7!md}&zM@%ciUisTAiDxdX1?k6SVDkqA zp;Up!FF>TrQmIV_k)$@kLbo>KZ_?pK*IE;)cZPb3Um$U~TnxUpFyuA(AVg#rHICkD+vqeiF7KA#wQb%be$4&&TMtvgQJ@g4Ty&_Go=LR!(yvM!Ok-Q5f1t%y9+#O3X!AOi&!Bl%X}G zUqEud;_|z%on&Z55wG|$7Yq47duhpHYHx)k-Sn`Y@7VFc=Qs=17mQWcNSFpeS-@!FvJ%iLn5 z(ObH^0r{W%^fl=aJ>9PRuAC3@#J0t@-2wa(6{?S~8AzQ}aupy^Zdi^g+tYB7=2J6| z8o+aJu}jaPd(#1-%txv2YS_p(Ml{`DW27zsOmhg&_C&Xx%Q6y?{OAEbAV=ttK3u1H zp?vq1o)r%_GBBB|M24w_-xzS{Mo(+G0f1;7)HK&vvnK$!>Vozu z!xyI(Um#6fS(2%TpuV+6vmuvNr{Tdz^rN^#RDzY!gX2g_U7BgcdtCjJNOZjZrk(pP*3UM^#1aVU}&*1YkjDJJ+vRWMwBLj)d9YW;8F}>gL$Q$n(+gG;X{|S z461M<0(y$H6+(dyzNe=k*m3DWaVu4yX}FSm=))O}s}>yx`ifg?(d_5EwDcdRBayWa z0StHN0Oc~{gsMX8SGhHKHAa`Y#=TyP(PL=i#R^(4%1k|4pMaIqOvGq6Ef|HNwPKDd zC@e80EGBqmQ%w7n@6RGz#kG^Q9~{RPeEd=50-JLy;6;35&D4ZE1wxP423G|jyBlte zx6MWYp0OJd-F@S@1|;n$w=DyM^b382#+9B-i9h0FyO|{@h@ZHk)X3)#^N~)uKuG1* z%Lgk>8sb$ zJEb`5fO0v5%C&qrjxRwBJ-0XYvrL-H#pLp)X%YoyL;6NF-e4 zuXdEBZ@p?u=C>EMJlHgC(@q}IV2d8{n6{A~0wP2qCA8$gq2sn3Ai*twI7jge=KwN~ z2w4Q^L0l7UqmDGNFq4sIMw@`1UC_!NkT)xm!Mq#`H9~IkfU$x`Lh*7d7&f%aN}mlH z&ZB1rz0C@;WrYQTN4kWDlR*A{7YM=b0<|eC1&N?>eu*vtWxJ?Fw^Pe$eHvKw-T2s6 z^H*&TGGZ+NH@N9ETxc3Q2t+eYl+usG8E)1i&oGi4_0c9;C2%`c>kKw6SY?Ad06h1= z%PrHk^T%}>jo^#ap+#++rbpt#L6+nty;eTJ;O>VrN{9u`8Zx;xRXe)vgl5V@ zRiXU~X1l3kbeWoatz-0Pzadu8ej8?w_9tQGG}mLao5>i93T~8`8?czr`l;kz>510w zX61OUo#bB4@_1KXW{QvxM7_~1wjVIM%uVifDn^fcwOOnn>NLzAQE$P@X>P@6H@9IF zhITvVxPrSSW;zxVbhE0d6wm63vZ4Nr3H7_{qPR5~U#yd9VTTZMO{>M{Y1xLQWd{cohnq^&em^K4{U@S(=i_mCN-w z?`|B?^V^(TbLl+^~y@QRv4z=!!wrWK;7wXjMQ>+B- z0P<9@o7>g6%p0ozk!9MNbZS+%BOQ&9vTeahg+|jt)l-Qp&;{h)5j7<;PP4$11JlNW zDnfpeVL|VgJ7`1Vr1yRbNRUhP^Hh6SO@pX6x+onua7k_m_&7dklKgD1C`Wiu^PnOKLg-+%<#ijwu(z=rf8k>HaJQTSBiy zJvn~L=8M51aW`P9{PsjRFh%pM0-*65@QZDW_sIJe?Gg#hD%3AU(DTJWH&A+BrN_o> zRLu;bn$Y$gnC<3Hj4pGRd%YW@$FS}ZD`@+Vm_6EV$I5ADVzirkF%}g(E;09EF`?yC z%dqPD&pSo@?zk1iNL)L~u!;sw;>X-Cxy5z>qsu(#ULV5fF|65Q1yLWy>=AVi zR!;K>M!T7dQ5e>vnBxk%O3Y(eOz0Y| z=rZ%&>ysEgB0nWo5P1P+kH}AB;1d; zFweD<_OKwt;PP2&$g!5p;8Dv`bXAi4;S9FRrAtxOSdZYb9+ujXDpN&dphQ*M}7~}<;esj8ZoxcL1y=4!Sx9tYM9w5SF^vh_91mwzf)JQc_E`0!8V8V|a zlB_er&~9V5O*(#w`jjGLpB#0VTID@A&q@c-ipp|o8_L!1AE5y&wVpx?5;&E&y{sm8 zo|i80%O9*8!|1)0tFA{_5t4qSeyJESeYeD)!$s?s0TeY@v~4yS1TXp`)4NE%WuX_` z?1fGp9e&Cw%O)!|pjga${Ukq9xNm8ygq`&rngTdxs`-H%QLlf3#}W(A2Lbk_+MP0w zhhzH*+P+t!fns`PT}h8uNiu!i1b~(ricn1AgECKQE<~e`;OJ@SqXNgiS&pTqOXxAO z2K8H4chG$c2vy?_l;Of@_rSv4^3KZzmi#PKXohavY@u#`6D8fiUsp?BKs<*mS7HN! zX?Du;wfXc6c(e;|gZ>AbtXbM}D}0B?G-}<-Eo-D?q|ln>6vGa}QQ>k)-L_sS=9)mo z2UQMILNf5=(hI2j?*hbvHUZy`@!L74}7+vN?_u7Hc zvx}Kx1wAjq?9p>4R!;L0M!R_#qcE*kFvk^qD>1KPF~N+SN~Tq}$lq7O?@n)vYbTl3 z;GazEH6b5}{5od4S&Y$T-f*u=FnUbuO|gQ=OEG&yUWS#^yoJ$jmSZd`_`MRd0*eVz zPn{l9LVmjRi`3%pHm#m(Cz)1yop(?U=SN_n0|G#&JI!kiEFg-i(5QqyofQ<&GJ1j> z1zH{4BkcpciWua-l#Je&6y1=myBoOH-m@q5Dt`hS2_{ zx&&>Y)HrJ2pq6X^*&AF)G@Lr`;Csj=M{uhG3=yzMCxU_+;I<%|qw!D~eY+}<&>@8P z-qf%JU|rHIR{)E;#gj4hfNeM0p*>fxWsR93PL)mPi-KJxZA z(bVJmT2ut&Zco>4a4r1n?jMkh1N70rPthG^E5Db|BQw;had|R-k$~FVf%!YL8s!Vz znYc?n2LeJhay?)(NH_V3$Zd^hq5smOSQKF0qx%SDisA6yhRYroqXw^Bt8JqCMWCdj zObc&NNl!9VGmbvX@b2fSQCW6{K6=ko96+3Is|D2SP`GRM84%&>LJ_IJgH~cLf|=Ww zb`o8%^2A;>z%NbdEaC!mH2AatnWT1V;SVx@3WRywQwsiP8W3%H?smE_Em|8SKDF-M z%FdlAD$w$yk*xv<)~L`|Y;zUL+%4#8p!3^kL9DI910w_Y8ck|&qt8JIXxORdMxk~L zvBEGvlyh<+9nN1!As9e3zQEm>PP9cuy(qHGeMq{aP_$VWQ9sfLcR^(zkrI)o z{wyQW{x%*y8WIh1and~X%np>iC`sI)&fF2E_{^WC0J{9*3sUGS)YR;b$We>4kT<4B zK>^ydy=XE*6L?kR(512KUCrX&9*&wtxA4_K zBJY+11((sLJAV<`434Kandf&_R%$r^bTlw;&f)AVDN@ktVSD1i@nC<2x zj4rd@y>7thF#(NY1rzWXv&RH9VdXTNG1|>17zM-CY{6ne^{14tB`a3_XYk;!AP4Si zC;3{UE427ATZMce>Zfk8wPAFb&)n;Fj2>UBL#!a`=a@aB?!?Mzc44%eFE9#g`x3KY z!J4nIn4oV?DUs{n*`dnqo{4KGiF}Sn;$CLAkPk%u8nfMegVAO7xYxZHJtBWARuFj~ zW{=3LDJ-HWY|5p>ksf-%2(BwS(8H@<^Df+W^L*UCrS2J^G$ZzKe3zTRU{L!lq6= zeW%kFw(X#vG4H&MYQoUYJf5D07~x4)J?{XQBN;eveNv?J80xFQqd=SRchSZTguVvQ z+5S``=^C0Y=qt8pe9JDXCq|DIs;8*-DsqvBK$y*(Ngsf%bN>-+1a^O22fMcpZu>ga zC&2x?C!zGG)Tq2)scl+L4@45QEf9%KdZOr? z=Agkk5aKU*Wb%gTIi2We>_U8Z>RQ;1P_koUc*e)3LL)1`fBtoBe?w`|7y> zA8u(8KltjaxG5TrD?U0DD_2}RATp@cR|Ji>7GDs&L;IQ-rF{6vc2(Grj6^R8nYg#gh379Hif}W_74tI68AvK&hlU} z^7%_60hPOohg6@1vJAdQAIcl4A7?%&YtA8hzqPdeobLgT+Q~ zy0_RcP?ON_hhTkZIm*sX?Ky_BQGf?PKfkO()fhbjzWSW*Y!yklK``zc>Fav&htFrg zmTaU4=%b9N7tQ&GVLQe|$FR>^6?pya+vGW0P^(*r&RyS>Q)qGa3e*OEa7ji@K@Zo! zWe(+MQUDEKQ-Ze98$2?q-&C}rxw{NWS?*njstwqnI_sh2!^$SE%|qXuJ!k>Lt@xmx zK%8j3C5lUHz%U-jqQ?>{_9K-;N!%!30i1yrh%Z8<)XOL%!E@ges6Yob?~(J+>+n7b z1o*>3MDK@eQbG)F4gZr0+ zd?50_jM;Af6^t(Pue#TN4Wq~4{&lf}$o~drkI4TfR!;M8VYHin8>5>k`**OI5Ovek za$FZ&m|B;rprj zC8V-|D>U!DuAVMI*Af1dlBLFEBpOJSbmCl?{&>-6RY@7%*SukI@U%=u%X$Q|oR8OgVlYqf6*oCNPq>Kvtq>a=QXc-vnA; zKjCziz65xMAoo3G~P!^_b*&Bd*{6#1&%bgBPh|AJupgvAiiDl2Yp&>uDL_@nt7}4sL^F#nJN(0KN zJ=hUp-CIF=dBIOn$BVWzXSJM62q!)Eyc)0{y>Y?=K^y_+^1G6?E$A&c*ra%$uSiz}JoRNC`r@S|{KZ#6Z|dwk}cZVaw1lE(e_#xCuO5 z1U_5&jhu+^rcudTlcgy9c&$pG{XmXw#1{Zh+Ke(6;9yt%;P?NC{nh-tLQ|pif6uMJ ze;=dE{0Hv!Kg8&3%^c51VuB5_{D>^vpyz(K+7d6or0wixtAZas-8<}9dFO={4O zF-SW+1NPs2RUm0iH~A^LFMV1HMDOkjt#Dx>$m1c{u&LJ1305l-pk=6+d_!N1O^Lb| zK_k$_H;bn0+qg1Gopxv)P|@&YZEJ8KypANqT6f_$+`@O~MyZ4#whmmV>HKLi(mmS_ z%keuwgW~qJ1$gze6Pm_%ripK5H?U$Z^F5|<+Y0v%-kz*x(~~3xxbA-C0CcTWSqB1^ zdY1Z86ve1FMX3z1+o$jyl8+_;32dB$C=bx#BPDTrx#kfEt-h~O3uVf z8Jk7@>!^Uh(V>Z+?NiosjgRafK_~{arc>+&4#375@(DU=oZ z|F1CH&3}#2W&RuY`rl&o=>NYHE9n2f$L!Jn|A3X#{Eryz=6}NIrpo?jEGG1RO8Hk> z;xB-!UkPa4*G}@U=KbVf{TCr0i2A>}#rEGYy3GIXUjGk_9{=ipiWNltzc715{lBqt zn*Rr*-SonFBEr;8!|bNYdSfv`qHCN|rdCmZ!J_*WQ|rEVvZ+NLv5)l{ctQUh3=^#}NseszWb zrv~XR;-(54T$fk`cW=}{YEQt;Pz`#Ijvf`X_wIttdRDBV6>UpVuj76`YHOoKkEF(9 z$hvc15&RqKjyc3gmTfz&!J<5$rDhm&)E_sY!e~DzFk|4#HwUXl za9>okoqr-y{kP!SP}$WB3M@Vj8N?|`uupg3QQ(f=DOBUVSoP*-30gK9sA}7I6x`fc z6ac|ZxS546-!rbNnK`hQ2>z-SbSOb!TY-@A+kU-Z6@LKz3hkcmD5yukS$wc!D5gcE z2n-T_M~liYKo7Q0*L-~s!$57kpnAx}Y?~3$7*lk4KBZZ4K>R3HjG~TP(p)Nx^0Ce`L*p7z_o{1geAP*&L z{LV~P;5GMxvX;9TnJ;w5#y+%rRhT9kEFs-}d)*8L{^Ca@CsDufP;54%@IyP~fSIA# z`S4~SX}eNY(?{qjRDU{VyXlM3WzKM~{V;m0tiM=6^=D%CsD1!ePBRdr-JFHd&6^Fv zVnXGol$DjB#@`*qifboX+4(}of}cblA>;#*M`E^{i!i#(#qRYI zj2@9ki4{a1joBmerC2%5Wf<*d3`RF^b~zRkqMlkvt^A!wHi%54``Sqx+CeK^!~>fp zrDzXKdHU5NonMylfD&WpN;JFUidBYJBo8Avo*{bd-Szj@H^jn_rSXGv8f?@qbr#pp zgb}=U0Cs*wq)HE9^Y5#fg@SMUdQYl~BUqBP#b>x*ok3+tj75{^fq<%2^nyWZrTs^x zIPeH|5+0Im*#E$23SOvP;>V_nUS8>jw8cjU9$SySv^56lqHy35Z}7_QjbRinEkJ-# zD)-iX*3d^Z;TArs`a^_>XP*GfRIxh^3)V*))#B(vR6+nRu7xjOiRv%J5RIx-g3!Jr zI~p*_Vr)AZMZkz#PT=?%YM;qvdLlyGK2dq{h*6SKnGJpte~tt97aWay)Gi2W&5G56 zrYIJ~7x*;@BPd=8qgN~7pW`4ul*@MDWZ7oyVz(4DidX_9_8fL3!~t)C{kGHRFm1&R zaTs9}a$~is6W~O(Qb>0q)y3S6Ghjv$Cp-st4%8Z85W4`WY+cG9<;!5Y*2{_f=%T!D zi4b<*jwB^+9iAS+Ks~Bin&FWE2@zubIP_Bxy`SakO{K%#Yv z4SEm--$Ug7Iut4c{}9gY5>*&`{034|12k*B@jx+SA{C4 zE`g;B)S`w^M>2Mk%fV`SepWG82$hBQuXJm2EJl|Z=U%VE=-Jy=ixsqg4Q7w_uf@t~ z#$&Xb>o5v~n}FHPrcK0Rg5o))46d4}es2d?-F@vOgB$jf!Lupb;m(KPlRdl;!BY@D+RX%uKwNWc0>n zJsMnlb#XCrWJOW_+8TJqwR#bMTMQ7O_aw6V%Xh2k2{tXAM|NsxrGlDS9yUr-yaMevJPFf8Ycy3F69C2?(x7H~=yCbESdA;^eD_pflTgCdhR zPG4{fR0~qr&`PAtxxP3AtUUU%Qs=w7=(E24FjidvIx5xkF+8YTr;ej)LK=#04h*)L3)n7PvKjo@!CwlR)~7q7>%yL9$daxiHyO$ z#070p*F>WiYZN~N?y{YRbOn}jY=xRb56;81&*pC`eTx!_rJ-3xsu%YcX^HD__Cs=b zqC!JCB5^rPu1%U=xt9h`y1T?_=R4t>JEu0ZB+cak_2 zy^O1*>3ESxKwPK_=Rx3#XsbEvl(KX$XmvCaaZ)~;fWivwNM+|GuJJmn`WBs%MC+D+cP)${HPS|UmyqkfIe;YAjiH#Lc89g z_)e#B2_-AO0L`FdZB`mblg?)A8owMXcEV?(+0RH6>PitTx9~SquB7^-4{#f_?gN#r zQg$ERk7^x$JyXsGT=@ce6F*N)K9yxnK6}-Q)+E!B9p#TWzw}Wg*ppAV0f&;}WHrIk zu1h)zjd>J;Ts3u~{-}T-8Q`nnGKGK0WuyErFKsxW^3jS!tYgFJ+V(N;% z4oxZmH=p}v;YiLGL7E&2$)W=H4MOoUC9Pz&HILr!kl||bT?tNX!wS*N>f0ijYPVdp zo+WhBvX<`hmQEckMP3}uVHM>vYHT}-1f}N@y)1%vMAb``vqv0OxHIUcBKd=6gk$=~ zbUBI|kd4~7o>7F5-mjvL}|)GyS4JUhQ|SfaDD zxzJs9ogj~Lxt zSvwXJdOx*1DeL!#uGo=&7P^N21yLWw>=E@L ztej>xM!R_!qcF8OnB832BUnrjIH#7#zw;<;5y7hB+DXpq!k}f0}MEz(=Ij@?M`13&FSDaV(wUc(W zx!k-)omFH*ip9!l|x0&IA$7S$mZ^+CJxer0)=9gGaO;<<=<=cBjd0LM@eS}jQhRr==Yl3>tZyt`CrSV|pM}xkq zzz*s`hNwhybHKPe1c~W05{2zJk)TG>4kXRibLQUw3*az%;ZpT_Fa-Sh^)5xByS^$i zra-yjV%|aEoO%uW6g=zOfhs>=DZ2R9tzV#7!J&Ewu^&7L&ekD3NC+~dk2JUyA3$Gt zermIzH1vTca0WZ_W661$@2MfDMd9UDktCLUa8REELrU{|oh zwFR5x;vCVsqPmbjqU>uFg}22)=v?JHF_?%P8#2V5sk0JTpnMTjGlT2*HPbT)VkPk$D z1+(3}iqU0WbFZ&s^qAOUv4Y5NVD^Z-1S_X`6QkWM#ptHamSHg=>M7Or%t%zV@5Ki^ z*G@9Aa7JH*3Z_pF6o2o2WaYy*gFX}sYGkFd$saAws4VcHk(;I=X-8!XM7O%?RF%I4 zGb*@HPKCZKqX;HGBknvb7(TsAK(}wA`U{E#=uXK@OF$9q=nb&9lqH~4w1FjGeNc2} zL4g8_vYw$SVJsZk53;gFG)U{*2eh^uPUM&xMt#RXdcDO>>Xo^<0CwTCj;LWe-Jli&n{ZM2Re-O{XRD|f7($A8{$W()?M8cSFq&b4pJ>a_6G7R4V{w5_ z*kIXcco|wEy)4}>?4cFE&tUuwi;GU^Fh;TS$g!oLGe zs<{5jF~bi?OFir@Rd2U(xpj@PntNs#Q2++MqNWQio5Abg@@Yvy(8L<_$?BtrMhSJT zM|Bj~_9%1;W;9~0^uKI`oZxS0!}xHWE&U?SEWB_U{SdYd4l*OQa_|ajXzk>_B(5{xBmFtd%ySfI4%GVxY46`E~IIYnpA}M#1Vt9d2q3@5m?7<(DDu<9|wb8wZ?{Q*Nr3yxV;2sbRmB>qENz-lunJN z4G!`X(>Meoo4zw8(!f6}c>o6=AszV4wiGt{wh@gz^P}O-<)RHGy7D~GQJ&DAiXF<8 zhvV8mUBq$i@VAYlMfAcrv-CO|)#Zy8v|Z6RjliAaD6UGpUW1M5e{|psw$JEDi$>$f zkK%S~xz#wq!3e*tUrkH2)c9kLI^ymO zVKKpOmr-mOcZ$1gXOr>$Mwg>?NgJ=k+OE*uP;@2b|I6KZ zM@6+Q>mEVGgkS_QAqEsg=&(XJA}S&Z#zsYDD{K)_QPD-4b7)ZvV9p6tR8$ZHh@$t- zd2`M==Nw+V;k|d?Z>~NLIF4}~{>?dK)U2&b=&tTrvu4#-Up)==g5+uu9iH`*XM1Hc zpefI0d1+7vImE%~^5rsdbK1^qA~M;rMBX*a*REprd=I`;Nkm``9(nh>y?lW^JDGEG zk?9(uF$~tc+7`!vyIl*+9c6{QPP@t~I(BkDrEWUQa?Uf#sX=b$6F(BrM|ie`ozT!p#h zh`D;XLZ;Qu*k#U&r}SPa-on0eSzaB*cY^8wfWbHNNk4PHYXs;C_YzM!zvQ{c%yt>p z1GQoC0S{Sy)+1!VYOwXk$e<5+qBMNKQ`VplXhic{&yYUrInu_?zCh{R``s$ga=;h= zt$~*PY?nZ5Dpn0a| zV#JK4Jy4vbbCdR-ojFEX;r8^==ICizOVUj08jUN>@xvKrHJb~~anrl1Smn5G3^h{3 zQ9swAHSpixP&d}IGnX5l4Gcw-jCZyX}?$ONrd^4$pbbHf@6 z*O6pAuZep*jNPJXtFcR7PlUs|MDgL*Rw!ce6Op`PvTF>6GQ`oa4MwV)ZmII}@AU}& z-AzMWVn!CP{Jc{qncC>q%NOL0Oc3g`XF~?-&)dxdQ%rU0Tr@+tX!l}4O*-~H*5>-W<28O)g z6)gi4qW>B-FgmfC`$Eu@Sg^;Kw*t?UH3kFA3uDxvs8x^yh8FL?f=MvnZXo6~_A)EGs@Q>f_8N;iCf6pll%c$8rb zgK)LCjfku<#y)HsFh<1Fh;V2ZSJ#sRm#c0f=0@_`j%FL-l5PHh)n|1e z1J*}d|AY*B)E`qC+x*8_gWFu7`7Md`SwDfaak4*&(z(66mq&Gc!4LJ7tE)pj+r^`f z`s`7EO4$cf|FmtkpFsw!pSAU$Lk2zS&npd6{{m~!)W3-4w|)ufvwj(=JnFBo+Bn%? zMd<`@b}N&+^e^U6eZ}PVvt3L+`m@P@P1y&N|2nJB`VC~j`b}H^Eo9K-zpXS({yVHe zlm9N7-}*hI&-#6&jg$QYlulEh?pAjNhxX0$6g~E{T@J4WgotO1UYs}$R3>Ow!!w3W zh6n5%Bc9L8`V`V_p$D~;>HV|_Su;pao(D9lJed&=XNQVY8X19Ba+MXUM`@yCHb~Nb zvUwBDZ-<~PkQb+d@0>7jmR2}=w6SZ-K6m(ckz8<%t(3Q9vr~_EB&Z&U!_#zM;&)mq z4pf~bZW6bUsfmZQ-^&w&y>X)TP^M3GL^G}TaWoWSqAE%oyrr1>mi#oy9hO(5%YX*) zIAQSlIYt@&lS-5##sv|2wL*XldF)(Z zK`Wh$X8RVny{{#VGIPY)16mTt=?6V>EEPE5w>{mx<_e`Cy!OVssRqQ@QtYf7J*YGi&g1k4u5+{h}kwM@3PfEk& z|I8XR`M;p~t$#)OtpA3zA+i4*rPI`>x|MHDbA5CFvd4b5i*J=rcJ(4t#*U`Ke?~M# zkvLoi$)b1~1HhhK!J|P2o)&cf(=p0to!bKUpg4Q5Lf-&l{sc{S@*Q&+C~CA&O77Qh zs+LLh?6I5SG!;~tu4>vcvse#FQ~S5!fM{b3E5GZQdu^iAy(UVPon95vcp%$`8d@SL z(9_Tzh~x5#mLIyqSQL|G`Vyf?xk^JCMIL#U5f}J^mufg{3ta%>NxU`_VCyw~#*`&m zg?tcCK(44`afQv_8RSem>dlxaUVGNOU5;vC1I=U$OlYm`Ol5B+0 zDy*+|$KaCg$d@iBu;1J6C3zwG@O3@3eTj6iRqe{xlV$yEH))N_(Rx_40`h)Jo^8x9 z$Ilf$uC>?f)fnQGa?)lG=F%cJ8OSmeNr~l8wyv;>#9MzW-xu; z%eio@S|Mv(d40mEnaxh8Q{f?Pi1Nj7`hXvg+)Cr%+Y$=&@=Azme7;J9z=YZ2p?i$H zzSBLYC)RWhllPr;M$Y5MajSF9NGMRf+NU5d=$%hra}y@kDgWX2#QWWob-EeLv|YUx z0)X5z6o;;70i@^~@8%wJcC4ycKBP+K957LpGha;A*2ZKRgDbXZD>%N|-31_%Pr1C} zsozP(OT2dtEs5ojjS}R8E{i9_6e^*amPpY)y@Yl;{_L9BXAsOW@{?Cy<%o#d=HgEC ztc*IMCTm^R|4@6AZT>&)E&IQa0qcL;`oAHAp7r0A#y0;C*5Ee(Pc*;ve~>=w|00!V z{eP@BH1@wxIw5iwyVrKtz98cIN**9hJ=-@C(ZiNxYn$mN)#AiCdCw&poUlQiUIZHZSY8y-(;<1CD|`*#$?zr7W>G$g z12D2rK(;JCNHVn{wAdgIdg)N6Q_YTH@`)lrX)Bhm++oEu-+0C0bz~CWh<36B>m*D_ zH~C?>ujj*MhkL|w%Br#AdYPV62hsOrtxdTKgz_3AdWtB#6#xM)w8hR_;2LxBtcn=(5fX^-XJwL$Sk zosm^jh$MUBUvp+pG>Gd?uHd60j|)s{Xf|f*5Ai~DDe+(a40+$RV5>Y%y}gouTaio1 z_>9=;IDAcy)WB)x5022)sM`hrO1%`H32?1-ZXjZT2*a1muHQ0gB%IdRzKUrF+g$Du zICJLOUd{01siOLWQl>U`%)L%r8wNr6u2MH&x4KQP6$7blGyD~&MKygAc%qY{)oq2a zFV)8H!RoU@kpU~r)_Wp@8$VoWZ2VrV!Hpk*=C^tyeO4c&jfw4x(z)TgRVd~7a$NFP zuBrC3T|%k&&!JR5Wgkr4-!@w$GGGm`^(bU8l!{gwrXI)|H1!}fzcm=?vxXp*j~&Wt zV`7J)bOJTImC4;-^6!1c89%*A@$D(wa`Y!(^4u^{BzMCl`-I^oc#c!hBbNE#LaDjXhrq%=7Lnf_I z?7`ct3V%gO#pa@&rB>y}imlsws7F?)kwlv}`u&?ri%Pey+xY#~?V)%MHObt+XIp7e z<;K0GmD|G(Z0|W~ML2(0xuL@HZx7E4Vk6Q$*K$zix`n8i* zM5w3z71sA(dw+iL5BqNKVgGW6({wWmIL+huav<8iem~jwrjtDUB^k|k2(h~3z zF$x#POM24OSbaDI*)&y5;3`V?^Ejx|15;sHpwz?&@XHpM(C1{aY4#}Eq|cO%9wT`L z9jxM1B%D_}!^zzCng`&hq%XY z;b#bY1>A$vwY@8c!J|by`OGEmWJbWM1JIV?=i>fWMsn+na%`fM+N?1ghGUtq`IKyM z9-_1N1HDf7c-6p-sDm+LM%?F$E>4Vd%@8$}3^7R6+~W06)dX^}Ot!hmfWwjNw&K^g z#ml0ZD!RJ_$2a@-a+N{v&@8-3G|hrlkW=@g zwr|m97x;i1BN?wv*KUh@#u5Nxal9*dmMGY4p%;(`Ds_O1RCp{MJ2J-wQU%OVNprCnncs4Mx|XIwNZX3FT!&t#o-OEC3Co3Oqy*#4782EnTthkIw|O$?m^3M zHFpE}AP2d|%C@oYxTynO+J1T5wKs`e)O&e;qc*pmd@(yOX~)J??KVCkCJG;%^ghIO>1a zRAnDbp33U8(vShmVe3w0(BHb0hRNNmL6d7}eoIICECXo+WKEP#Q(xl0x{bdzJ;t|> z1-75<>TltqN;J#l(G}o$J^=L>Hxj6j-AtDq>2ad5pQp6HJkQ+1EsW{CD9`QmB(8!L z2VU0q0BR3VX)~;e2aL&JMIY1S`54sv!=+FXil_3;E~uniLCt(=LW9Srx<+hLnz9{z4h|6SqRU!QCPae^zH%eTR56x+N44>(Xf}^3=j!T zumnxnnP4s|q}WC7++Wc7FhAAHMetV&|~*f#NMtm&Yf3M@)q+@fdoJ;set{jpZ8M1rO!9VxxT4Y^u8L%U1j@4<1Q$C8O=&KgOR$36Q47gc=@7}4;?&9(J;$e@$WRT?JGV-1>oKAPWJfb>}lkv2GX5lW}2>-ev3<77Q)#uuzd zUkPgMXS+Jtr{W2?vAVHf&KiiEK+4{M1@ej~ezB;59zZ-TGv*NNRa3uz4%{=7*5YbY zwiW;uSTBxG0c0TV?l7Y#;0c#Zlc33}s2|-oYHp+smUoA+&Q{_9Sdp zNdLell|3O|JZ9=c`lZW;DB0x6nS*;URu1(pn~T$z7l&pULlZ$thzsR?(`9{u8Wzw_ zA+3%VEKe`iL&atB)RUOlmj*aleaOI1zmOM65p5P{J-c|p$LU`izW`)Jqk{rsk^+vl zjb;dZuDBWv0JLR4{~fW(E6**S%s_jE6|I)l*`7H#Ze~do&r^7{)<}~#8USCni&V!e z;Bd_jGlS3P7}M?#2#}?xLNHGlMX1B(_FW1&drvRrFq|jeG0Tnr#+yr|`~@=j1uKBxL&c(J+Z5815+w~lT)?&43+5bye zeU=Xyu<~tvDKhACmnn_?znnF={|nIkmcr%xtd&URa#yk15ZOYMPMF+%{;S*g0n-CF z!4Ki&ezYG5KHJp~w1Eh9Pv|oj!Y2k|x>ID;ND}h%$p5AG?BR|j0BcVjB(F?x62E}w zRK4i$#3zRgghA@FI#)h%kb6G|8$m}XqdIeT^>E0>L*hv^z?*<1wVX6@ z*d;Ef#W>`Pgz5C_i>n(_Ird*4a1N&ZC*UmFpF*LV(#QgNbheXg)5{)O-a3ZDiOXd> zAZZ#F?o2jh1N;K`fs6FNWXrbf>On_Ff z*};wSE|zHFMtg#3u~|kc&zviAktP`H0C6DhIJS$AJyKUg;N&ERz5;E7tjRqH*#l0a z+Brsk3U!w9@1Jw=eXmBMa_xWakR59sj(C#QX*6N^ATk&^%n>$U-sJMWWQlVFC&@F{ z0?{hpC_0EYvYn2HEkk)rP{*bLysjDIa%IZvyzGpA?lt1xY{w3NzGI)fH7RYVJX+`) zBcJqkSl_8ViyK(Y>a*4$16Gl(7bAmi;CrRv2G+6$-9QPN-&%+CS*1uDCA%J_bNhFz zhzp|lZ*NCZ&vuEp#($2u%9MRD^#RvRU2p>)FNs`;;O<8eK@`-`FBKicC4pY7st36##!xK|<|*G|gxj?eB3@Lpcrs5<8#3^r5dE%$6W zFxp$VFT{@1(2Wfn>9@>CT88#AD_ zJ>4iaD)f|t3IKd|DfIbwvt!g%wtj%=&!Aa`Eepa8g0~xS(+WIbJ(6#PX8vA_@YjD5sMe|$xkUnca(uT_(K$$6yOqg3o-bfVUopA;Y!{PH{A_Y{*eH{qX7yQTkOAwgt)D{%Ooa3 zov)I2K~f^mibo!CCP&=zh+Don#{Nn8t|#1}aryTNr_XxFh$E5SaPpR~J=(76o?WAT z#@ekRMsmd-bJZmBtfEG~7~u7Y+5y~P>-PX1oi$Hi@Evng&M=-9>`@ql!jx$-kJ);Sdgn6I?oKwy1UCf z+o=Z~HgftU=<;^Bcx8+v;d|DYJ5Z9cKDxaRnFB8K>R(5wR=K;3P~`rihnVBVkvSe& zvv7bXg@%?(WCR4GesR^cBoVfV+Dc;&Im%~xY_aObeJWosrjcchnx*!JOEq=7!-}rp z_GDAFguEr8JWsX^O^?;{Jz2!>chcpXR62`^5||Zc~)YMG{&ns`OpZ*F4b5sa0}#SH*)&CVzaI!~kHl`;3`7n<9aIaDqt$Rj+$j<~qkv$Wh*S+$=59B*AJZ$!hZ zO?v5qD@UAIr1=^8N|OYB`?S6&9rmolDd~D#PjQ$*4!37ARSaB(d|Nm{3-if`%5ERT z3tAJd!+KsTo`mS3$q>MBOE?X2O(EIdHh$4VXvT8MwLI6Nmri4LA(@ujfDjr-fVJQv z2Mn*8fAL&Pm-hw%V2oL%S5{1+{v;nUSG|KDby$nt#)PCp;wm@*d36`VDV7YLKn_b( z7XdbrErZo?qSv}sjK^~q&R7Quj=4r>%w61fUK$IS2c%n>;-axwzAp##w{xl{uZw4? z`fPVEJ!!>WV+AMYbv{G+Of*!TP_4Uo07sf5?F;&tAL#*p z@N5?kAe!d!or~Fd$mj{yCZI?4gN}WpyckQHY3v|V&ClW-<~q$SFjlmvfJ0HhJ~8&~ z15#O?;4>04KoZHfBdC&(W9Q#zp53$Ey1XH(*>9zO%Pe0lx#%{DE2;sthRb||CO-A0 z+jEO-k25l-FQURg0f29^c$Ww(3Q1C$NH55T^9?Exv6Mm*fPLo7k^wUTck)1wpQ1+p zwdZc| zkZOIHQ$-@RR3XS|EJ@E93;F+ESr^4f*rAbn!UFjmn9U&O6WmM%BYamJRrQ)1Wy2Vd zbz;9e8=9uKdMdQKlpvU;ovF_qq^RCv`O;i3}UnG4#PRq(#7MMswMemhNk+@ zPAc5hb9p#U8zn!DHde`tr5c~$M13$$_BzcHcT{89wdETqRjtpav@$lu$O0=2>~=13 z_(?Xn&a|$#;s}iwWBJk$mfbIQ&k*aioo^6)2sXy<0_Ay5=;0s50}F$jW}UOJp_ab7UozeK7S;*k=1l zWWf3a%_o8L)oM)_)xtH2H5R4U_*SYtZDsh32<@8|kxt2Wg{f ze;1|G)G7+(pL8r23Rdy^Rkt9P3@ zp9C|B*t0$-Nm^>+us9rECJ!6pQyg<0MEe#6g?N}Q>OE2Wxy6bb2YCnMi$pEi@3-m3 z=t9!F@@dI|%vd7Lw-D`Qg)0iEYdg#$@gmYQlzc5On6WD!dB4Xn=ngnLU{@rHzevll zi=@IxL5nf9!HZNx<-?7+dZu`sLBm;3zuq5w>;p(^9TrJ$95j%&(<*J!RL=dpA&YDG z;G$5NbwXx&O2(vkz}3npB<`EXGLBcCDx#-YK2a%wc#cd_gt(uWNN0L0L9BIgAmD7M!8;?XS36r6tco#00cjPcX>w*VrZWYr3v@Ma82t@OQGHo)2NRDq# ztf$7%6xwZo`FXWG`LUeNy()S|hK`4jE2I}#WYHM8-5n!uEcK>{^9wZV_tgGmtN%W$ z&-w#o!1_a5|087ZAp2vbvDN>CHMrIP6wPn_8PaF{IZ}DozhJd7w10`x3GD1%-c|Sf zFx22{-c>!@#k(eb_O5@W?1Rbwn$>6h4KiT;t*!qZGU#3ZUTK*8A6SDX|3@^x^-oBj z_0LEfL;Ei%ou*c|j_%@J(?E5&f0+L0M|#&EJln;)%0_W*9N%@(N{?h&EM0NcQ=D4C zJ_hRMav`9G_$bNEec?C^gTQ;*2f;c(mj{rqBZRYXQeRIo7i9Oqa~4LDlq2mKFlNkw z^eVzD&JiNdgxQ8P>D6HdDbC~3gy-_ZW_`pCS!W&;M~3RuzMz}Mg_Z(|W$dl;*=M#5 zCNFqt9l)hqd4_niWgS@q$j)la^qGX4;-$O?V6@JIqs65kWq1zxP<8#d49e<;QNCQB z)I%4*UOQt*y7EqH+!;>B=%LauVxujjxwB!N;bp*P|{ zqP_{#$qDePJy+^0w!}deuu}1-z9QPCk?q6xkbZZF2XTy^+}>X+m4NV7pYU6<7UU3K zLxJ5m89zT*RE@+W;tZ}dkPX^)=<;P+IX5T*_8QXKpSX*xhU^A!`!P-LyW2_X^redI zFNstM#(4&A{S`^^oyo&~% z+s9)D6QN&ZmKs?TMMr#B9Gj#iFQn~*>t+Ktn?mlOHOxXk&3oG;&(G4e2e`EDo{~Z~y{)esqPh`;l{x7BB4E{H3&>8$2n&0|& zq|f>fr1HQ2$!f!E{|`zh=CfP*UwEg#8SG&{+r|GTfA+usSJ?-X|36lr^ZfK;7E+&WaBPd7?|H zILoE;bw-P|>nZ)!fQWXIO&9qAqe(u`Q0ey8m_CHf>*alYRHDjWJ(g|J#)MUbwwuJe zQC_YXhMu%>F86@z@%jw$A(v4Q!#ERG%lG5Vb*V!*3cvwA(o17i^ZK31nvp<~_Y%pX zw|Qo%II`bu>`XVrtw9t6tlnyqvfumId$%t#VD+>0{>Y$}R{|`wEa*?>y7Q$zh&+r+DQx4;tQ$ z@>!qs!QrCboGEI0dnn*ExUi=qb%kqC4q85((7f zToMygDRtjnIM5q62kg?TNUloaQz~It@|ZYk+z;eOPwZ%M&J#D84K!*iknTS6RDV;n z9F*^%h7RAHxx-rGO*+J{z8odqdY92o`6``Vd4WtW$k_&P@$Cr_p3rIHus4EO3#Nn% zj&)v;<28Ma1t3U)c%9lowWeqgZz2sx!oRz=DJo;{`U>yde&R7sD6hyfov%4$uGIo^ zf^AdOPOl@TV@o`qHNcZ4?kToud1QnlXQ^8bM{*iu*=x6|BKx((5;JWYl(e#bN`+Aw z0)e$`A8bw-=cjnS2as2eyuKAQI`*OT-#S3H(lUEB!SfxM_-oXiMaNDP-sdOOnhWys z&B$1_pIb$P=~*G_)|*je6fSv6wn8U6HVp*ar=;EH;(eviuJR-ZKt8L(n(Jr)_<@xztIj*nvv?)VXCerqJsXN^MI$l1{-ojbjI z1yew0zd1Xrp6wD$P5B&5jZyZ&)bY02#v%h&f~}8327{^bO2gC>Sc9gXh~~Evkv?k@ zQaRVjtTu8s38fQJrw|L>$GH-}{%`>M73XR{+r_zZ={ZIulqnzfQg-!$5T=BOWr$Xv>Shm! znpIjUF3cDu)F=U2zk0uEj-+yCifhnn-xKc{yj;%>^Cb0_50L+E>h4A5szCmMXNM^rCJ&h#XX>Ek4nF(wF`dcbR#zqjW1H>B!sgB%C ziU2-7y%;b1*p)8dk0jalaGjYg+g9qO+!`70a+UL=NtLoFlk2iAH%nRn(I&UJ(wD;f zk-;;G;N%;MU5~%Reb&y3Gxj-$nu+PMR$nfTlZ4zNUiyuA?ymre&VE0DJY@`TZ4{mr z7MYuc>BhdI@p_`MoX^YyKLD_xSKExm6ITEZ5ZCq@B`J!vhcRSC&20yXqZy^jfbWa= z_GTyICnvrd2Rccd!PxzZ1=GxsX1~jkJNZ z8cOH3?^d3cM#UfA^(J689Q|~^}xR!(&$vm#)Z!%)_*_-+s9i&U|d69$DuMc6FJ-XHB6;Ot z=5Tq0A!aS&#UwytqAigln#6`WF9%X9Dwp@RD|AMqi&H%zP2VApR_H6&Km)$0Ott`n z7)hRy+s9oJ2c@~hL;-Wh3*-r=6ml5e^g!fZpjY&f z?~MrJ_A~t2SPD|K58~}rr|Q{(TFUNGIuj>90Gnt@=iu5n#5lA70t6X|+f2^%NMFK- zC@9Dipr*OP;dgkVB4wzeC3mHfs>e|0!j1{*lFu0A5KecdksG2hw@^z4koQzuA1 z0)dVgV|e9ceoK#Va!V}`$jSF9;)rJFx4B(3mu)wcaXI|1M(`Pi?_T9Do4!cbWn1w}92g(kf1;PJm~(a=T2S`sO=@{cIPvoBG-9u2lBHd zkwLe+MroM5h&5>PVl=ylQSIsw!`WM&_Y{f70l2+D4yvB5e+Rwk}jZ|wa*g}}JDc!s(Q1O|Xu?2n%x zHFO5~?N^eT+EIDgM-Iu1pz|)D59Kr!tXBqa`(+;vE3(i}%`82HZcBJ{#|;4{NS^oN zPIFXfhS@KV^eV~nQ}VQ)&Ub=u4}A@QRNiktsO29jGNPtL>0$CIXK}53wUu{I7rkc6 zn?tzKtRoTB5H2p8(8=?rt#xXrvgu3h4ZI#1u*z(G12X7mH!6)yzlk-t>C4gl)@G#7 z+JaPmb}Oq5qpd*cMAoUMi0-rb^)I=xuyYDTJv4#kcz@*vex|`r|aj+a5;5 zbjrLUFlMPcd_`}46CF9CdaMJ=r&p2_wqq~sNAauY8_`?H?DdFIO{H)0^)ceaY;8ob zIM2M3@HiuEwBn1dry$#;Rai5qMNiT6wM^^;Rxi$#lT6(l&!FcQG{Qjt4+J07%tu2n z?7aZih6&71rcit;8IIW*@{v}&Paf04hL-9fivh;ull!<}7>bH|LPZ3VPGJbtvmpCF zC_fF;X3e1_lc7Vjbup<(6YUGZip#SzA>F6U^8`LVzOg-2h;|E9;uea zS?*Qq@o7ntk&jgTFY`&ZuHmJ~hDb6cxVFnMG~$Ntg5RI9aIq$DXD`$GjfbZ3(fpna z9ls&loT5eEE^~{jm7*G!!CB&y3vXEqwcr@)FuZu+(5}>punNlO8^-8+Nh-TUW3)W& z%-b3dH!+D7Yn$4qZ2j%5KFdM|tV&z=BZFIihtk;kJ6VHUe;1nH+Ku#CdyqDmb}vfj zmhVQP6|i~tw_=Ze@NAdRD(!Pr+rS1J2o@{_DS>l89zRonV$ zWYFYil!nRAvIb3l4$W`XAbnOX(gxF>N9i=RYSQgK2b!jRk+Av7fo4D3oZn{D@$s=?d!0qEDlY3FtQ4W8rW~nsA^mgqVS)};*pOw2; z(~;%gMz0UVr;XyHLk*~grsuj!y5on%Z4#juMGX|(^u7*sFUpHFhB))M!#;|lXGjti zy9k7z_JZ~lzx>vfA=(B*wkMj#Gwa~WCg(^%TP_^t1JMDAtEl!nMeWYrPI-D{lB-DG z7!BEa^%VCQW?C}^LY`ZtEs&>T+~i^_`Rj|{>j{&?6b0<@NUgvr4m%0L?uu8;>13d{ zD|B`QT;iOB%!qg|E-Z7-5LXXnjvA}3^3CND-eGZ^LMP=3^4K}$Wb5o4%r_Y>qI(L{o-KT+0}*_}1T!;T+B#%|smeJc@IU8mK#=Tps3b46Xsf_6~7l zkW*gdcslM%SeN7ELh;Z6>HD;O&iMnp&PBA(K_N6wTyf;nBIaapjJOID51D93s87~A zNurRyaV?V%!dycS&SnhT2Jz0dPM!;K%4<<^4i4-2WLIj*)D%w?cicH+)&ei408HAS z2))iR!s~GE@K@0B#$gI0(!IVJZYsMphPak5g$`Goistpr8^OecVwJbA^}-=5YocOZ zJFo>0Nr(MGeHvBc# z;D*1B=C^Jjeb!B+^0BvAZAk5Hlun#yxAL(@+PC{y``Io&*7@1T-ck0!q#77N^5~I8-x1^biipbC$7M*2kH9n2)V-i#(Hzlfi?X!r?Bamn&~2)4aqnh+QbU zfl2a4#Kyb(CWvQR1kIWJhUp-`>PXyPW01sYdRp0qbqrxx8!c|R$q+8pCvvysj&OGY z>Bx=H2PhZ%V2m-jP;uAZl_zsBD7?f)Q-n~yb zcYws~FNN+t6&UJv0<>sL?aou3<^yF|wpXuxIFcqi4zXUsv%6%V6mn24XO zC5h8hRodbuV_=Hp+hK}~`~6aP=vb~Q^1@iWX&6~y`EV;UD&!j(81Dx2ve>;sHtu)k z;_7SML#f@q#eeeu$6C0&l}KAj+ZrQ2lQe1;J%(=D7tiVGd=T_+)Z$j#_4Ql5;FFK- zWoS5u;8Ax{e=;iqJlsHN7%%iP_>^m?T03#d#^QW6SGvdno+AUG*9BP{V@%J69eA&J zHyPlGx~A&#`Y?dCdkZQxahv2*n{GW(8l=Bhp6BenROqwIZYFK5o`^oMdcnFk`QPWJH~thNgP_!yW!jtQ*OkGXN>T9 zxJ$*lcV;l4U;+E~wX?FA7ZIJtCj1mq;Y=D;=qw@BYIjBf^W)Vu0K$LZ+Piv~J6YZd zbIlTg+2k8eWjWci)wuY=UbODlk`rZ^?cGfr12f9jx@FB)HKB$f3HX$8I4xHR$&nZkL}_!`Q7unZ3oW6?9-rII3mc1XZfL3T?uD zGhPMO#;7gwft!fEnmM@>cTH3kzPEkclafgs%MRa49hk^0SC%}t)8T>A(uTP_bi{89 zbIZ2M6%KKe>59%u62DE((uK@dlTDi;?IgNyCE}K+hI{B1ua);4qHUTumnM#8RJa#K z%ySZn*A3&)^Bi!=qp~RibjisD4$+ax#G;Uj6sMq@T(r6R;!7LJeQevOsgyZ%keB)M z2bj8@`+^K$wENEd@^nYk!CnNQ@^gV9a>|l9qZuSf*Xsug6gB>ht%`B*o;(gd>o$jVgRyrG?T@55Ptfjo zl>bvMXs!@-TKrPCv0)t*6H%Mbcxo!r_o$yUjx-GZq?1relk0^PP(JNLMI(75#SGx> zH2%175zuK@ZI%!kL!+P${w7(KK#*`6IIlU|gkt&3Tq7N97YV`!O%-Jx175Gz<#U1@ z^LrvQ%1+Ng&ptIbK%~F~@+tj2vOTrPD+oZR!tnV0h-oq3ZO^2j{h>0wClx0zb5cZL zTH#z{+T!o!O|ZnGbv*_WZ=nU`_n(!g4d<8&bJa-2?$ow@yfJ<@yeC^LW0@?7t)oSg zU#ExXzG1LC1GHG?5IOEhSVx5Er_VAXb+*2Di6Uoel1GLc*(An6qS4;U4-w6)WA(^{ z7|$A9*9lmmMlLW9E!oPf$6362ZQ?LvBsXE+MQ99-zH8DA*_50I?QXO~ZMlo=Gw556 zhHJy-(C4*4e41>Oh)=YW$umRL;DnQdO`1B-dS{m^h#$nj(6Kx%&!wOA5CbmYG31;yEEg6c~i>lf61#RdE#tIzr+WWf4mTmKbg&;|Ue z(r^L4#u{`1zmDd&ego;VeiLa!YJUr*bL)4jC<~m|Hv+w=%BW|%L|N|7QPyuO`(Wzd zvCa0o$bj{Gw*LFbV3hR-O2gED$Qm^DAEEiJKSuhjKS3&Q`=_inr1sBHI{9Fc zq9^Pt(H0eR+vdA}U~5smaXZM8Ql?$M_tPM7b7N07#z*ukBl|q!FYpf<}5uWoq;uY)c4fJ!#Mf^Dtv3F z4mFo5S*sSkiXV)}drb&?Z?}Ryed5Jib#L?X9t1al4$(C2@30h-Vn-h?DvtdBij#IrM5h)}O15%HIDAR-g5k z$bj`%w*J@1pqu?0rLp(_mNmHd{|?P>{XNoW{R7g*)BYn$=brCYZr0`aqE_&=%dC2~ zi<{LxyV-wI_QBNuY@6+0kOAvoZT-I?gKqY}D-Bcs57wZm|0kN?`d>(&^}mtI&Hfv! zji>#0lunRmw=%gq?OTCeir_2Q4cg-erq(~3{6Ca^F!_J7`mFzh3|Rly*8d+eX!8G3 z8YT~6Bx2CyJ<$ACDAH$zA#FTuPn1qmceDFL+80s8S57qgNq+c5YsLTFbERmT-5V3c zN8;-od5OGUygcVpakaSVF5|ZDjDf)Xn0srErWo(qL0m58-b+*J3(=D6O3LfaEDG6v z0J^PfiWKi$u9{I&11{u*Ya_1hgj-zBuLKXW0TQy)>PM-RMJ+H1e%d1^2d8{EnTszM z`Ad|$M)MLzi4Soa4P^RWadfnEOI(>VXO9K7O4UkFtUNQ`yASA+dzmjVozQyE$;-w7p!WX?$rJw?8v4B zSSaKX2X~TtD1$^X&gJ|NESrEj^YGjPDVOLy5wuoI>Kdbq${^S&OQQP7I4ZNx#D9giJAsc?Mx$?aNjkm4~(ZV1&Lauj^}{H z#+Awtuzcqp9KcClG5rv90N$n!(*dAso8=TY2FS;$;=F8(aQbG7^QodKj}fpbGgYrf ztFzQunm5-uWCDp$h*|30<>V#Lx@i^bXI6-J=_>tshjbG28sk(M>cyc zBBaG7ui0V0jtOVy-Tuv@Dou8*Uw{{tF9)ZU8b(^Qcs^dWvE7bjUeD|z z=Qf=68zh-@im*!sko?k#j|JI9H(GVD2LwoA6G z+s|n@+m$wRh-gWj&9UJq5J%Rg?IIUl>l!7lra9?L2PWwjZRB9mdNC*@4Kof-tCY8v zrIIFl%Rr;>fuhNonjlZ79Rg%=hVu{9E$s&*LKVd3F!A%i_`I8U~Q6%jz9iSYzhNC$1iBt`l^%WXlV*xysjj^u=4{Cr`vA z@yH7@N98N)<%#JOUF2i>h8ouoGiPMb!*gt{1a4hZOtTF~`6stNjx7ImV>NgolBnmn z+TubeYDmgVU`hB(Yo84zdzWK0p^u%(U}2+-y3id@&+;(@wz5dBSI@?1C- zreul<=>`~?nu!of703pQu3!F!w{(M4<2G@6ArvWbYO>G!++M1RL({NoHhQ+myP^O7 zOjl8ycdPu63k+UXjZl0Cw?Q=d*o9P`8%CcGfZV1b1Hk4Ikp zLQm4(R0yvY_}5dm=11Sul19oV%_-?a+P6&J*Rn)ag+@HvO#5v?X*s@lBZ;50gf|^9 z#)|g@Z>{`gwL3l*zS}7|ejy&Z zN~c7-<)?9b70&7el!1)a1-73dJ4)$~=;KTm9kOaBO%fmF8&emTAl|0zcAqJfi{*(W zYP7(;zMd3$o2SpxEOpvgojrul{q7WIATkxev?i&&%J!ek>a&uN0V~{ce%+}byGiaFBkXS8%f`F3Bdd++KB z*YJttYa`@)orj{B%P5{t*aMa+OI#t^iObh$>vZeseM0nIinT#Y3yo1|)M`d-pW5vL zFb2Gdf@zAl>RP{1VKY_KcvsuE8JFA~Uh0aQIm4B|Mg7H*7$P?^7VpVfz%N0!aB{MI ztbfmx5Ak%bqyq$UE9TCqu|79UYX6VDus9ShqQSU89ZId626(@dSEs;-7{h6QlG}QX z5iw8wt17`nPI)XdmS(xl5s;Ye1zOfYzm#K?JUoz)byc{wc!Rj^OkR>t{q1om09o;% zgb-ME_+6qYOWbg8myg6@$Du<&4mj{157ZW~r0y@6FIw*)oVb&y7cDDwY3bsu3bLQ7 zIhalR4{@#)uGDgJ*1C7kVfe*E#$9sRU&QO>Poz%~@z$jf_OD40wXT`-M1!+LzL6(gP6&)0 z2l_KRv`-It(xDylIZNg%xb4LSix{PTwZD5e`SK@v?v`}b+^N|5-fvqn@29L2t zN@JrhW({uiC1`%jhxA$bNab6Xvf7y1Whk9M&+g<~6{_G{eQWU9F22?K*|#oN_QB)@ ztUhZ6GGMK=^;O8AZ!J_BCjX8#X!6x)erpZVXB8oBOl>hrr>VPF`Oz@Hu=Q7xwf3`J zeCz7D&|UKlueG=yv*(_;7VK8pzQ+W`Pp4*zrU;l3hydxEeCqGzBD32X=5Y@QbN7~y z60{gux7VHMOj19gV}o1VncmBlx~f8(e+UfKVXmUg!mSl&WFwJ$4JksHZFNqKQkk0&no6jfghSOn$0$Z*;mlj*M?Hf}?MW?L)QA_Z+@A5kEX3I`EqWF` zY4Ujia1rrt4^+EVvWe+Ak7eT!ZEYT)HHHTRBwO82K5)9`GppB`AsbU=LnyF*P^Z_I zF_qXe)WIu(0wJx~&xB$QBE~#B!B0FaN!z%Tfcycm_z`DPOfLD51BuQt;_b$?C|-Vj zntVNXVw!B<4_HakS)B5@~`o6WIlu9WSv8U*m@9^`-a^D*UxC(yef-feZyTBLx@ z!2bHWv`uT$#L11~J!uFLNOF`djBv~y03(tQOzdU%&>~tGrxuQMWz8i2w1w|YyL(IF zaE1e2A|+C?7Y>JU;{FUhc@zZd@y1drm^;J;@NHXl4gdQp%x!(IHY$65t-XIskO6C* zt(PK$Zg#!W*z;wq!9BkL&2Mc)`m9Yz$w{o+NZ)O8`IP7P;xY_j2 zZgz{Z4<_Hr>a!}40c)GBZ$}2*tfe$eUdb9XxgX7M?Lhjhok$x~y9=e$)c?P`vwdkY z3ghsH%`u@|ButsJS0Z~GxIO1Qn;pJ=+`p_THm$S1mHrVr=bMF1_-+kTx7wo_e|AkQc*EVb3`^Pq`Ru0lf z?*~!GGRC>)ad~8do(3&Vh;0YhPb8@mxZe6q&DLQdArIc_7*tYl|j5)0X}^F@$l$MSxAXS~YYU8Z90*V3SX9P1U>emK`3( zYM(fJOQx@@su13SborAqKHL}Wr~MD0HRl*Hs;WEWY}&_VAB9O2vz5UREXwX&r#wL= zT$Ha}f)U{0%UeW^aYJQP^+ed-%XtB?{3)$@?Xj6tUA^jU$_|dUz;o~u$d-%n%kFE$ z#YkMOS;Vty!!It5=rZ2f_#W9+#4F*+Q`|sitfp2(M%O#Z7hTe?tbRS;AllKQ)+~1q z`c32vwvA}woGHLsf>4vTQH!6ct&H%=9+~9T_dp-igOI>-!7(H{QqT*w`n}?|Dqpem z_h6ej-(HZ)yzEe-rB<5AO7VOheve)~9+bOV!O5%!MkS)kxBZwt#zOoigCNh!>Wf!) zaD`u6F3t%EyUThxNlCk|`$XDJ(TQ3>5_<`YVt&X9sM3H*BQD^wG1?9QAcXW1cf*BN zRROD@KO6IpE+3+DezO6zX2Dn=-{^HTr=J`Lzhb+*Xnm>%=va*O(s$yCgWD&2$E;Kq r{oU;aoMW8N=C|kf7JI8qA=49wM;WH!- literal 0 HcmV?d00001 From aa310765acb1c697fd9dda83d13222b47fc72060 Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Mon, 5 Dec 2022 03:57:26 -0500 Subject: [PATCH 31/57] [Feature] support ava-kinetics (#2080) --- tools/data/ava_kinetics/README.md | 173 ++++++++++++++++ tools/data/ava_kinetics/X-101-64x4d-FPN.py | 147 ++++++++++++++ tools/data/ava_kinetics/cut_kinetics.py | 185 ++++++++++++++++++ tools/data/ava_kinetics/extract_rgb_frames.py | 51 +++++ tools/data/ava_kinetics/fetch_proposal.py | 137 +++++++++++++ tools/data/ava_kinetics/merge_annotations.py | 54 +++++ tools/data/ava_kinetics/prepare_annotation.py | 90 +++++++++ tools/data/ava_kinetics/softlink_ava.py | 24 +++ 8 files changed, 861 insertions(+) create mode 100644 tools/data/ava_kinetics/README.md create mode 100644 tools/data/ava_kinetics/X-101-64x4d-FPN.py create mode 100644 tools/data/ava_kinetics/cut_kinetics.py create mode 100644 tools/data/ava_kinetics/extract_rgb_frames.py create mode 100644 tools/data/ava_kinetics/fetch_proposal.py create mode 100644 tools/data/ava_kinetics/merge_annotations.py create mode 100644 tools/data/ava_kinetics/prepare_annotation.py create mode 100644 tools/data/ava_kinetics/softlink_ava.py diff --git a/tools/data/ava_kinetics/README.md b/tools/data/ava_kinetics/README.md new file mode 100644 index 0000000000..2d28771320 --- /dev/null +++ b/tools/data/ava_kinetics/README.md @@ -0,0 +1,173 @@ +# Preparing AVA-Kinetics + +## Introduction + + + +```BibTeX +@article{li2020ava, + title={The ava-kinetics localized human actions video dataset}, + author={Li, Ang and Thotakuri, Meghana and Ross, David A and Carreira, Jo{\~a}o and Vostrikov, Alexander and Zisserman, Andrew}, + journal={arXiv preprint arXiv:2005.00214}, + year={2020} +} +``` + +For basic dataset information, please refer to the official [website](https://research.google.com/ava/index.html). +AVA-Kinetics dataset is a crossover between the AVA Actions and Kinetics datasets. You may want to first prepare the AVA datasets. In this file, we provide commands to prepare the Kinetics part and merge the two parts together. + +For model training, we will keep reading from raw frames for the AVA part, but read from videos using `decord` for the Kinetics part to accelerate training. + +Before we start, please make sure that the directory is located at `$MMACTION2/tools/data/ava_kinetics/`. + +## Step 1. Prepare the Kinetics700 dataset + +The Kinetics part of the AVA-Kinetics dataset are sampled from the Kinetics-700 dataset. + +It is best if you have prepared the Kinetics-700 dataset (only videos required) following +[Preparing Kinetics](https://github.com/open-mmlab/mmaction2/tree/master/tools/data/kinetics). We will also have alternative method to prepare these videos if you do not have enough storage (coming soon). + +We will need the videos of this dataset (`$MMACTION2/data/kinetics700/videos_train`) and the videos file list (`$MMACTION2/data/kinetics700/kinetics700_train_list_videos.txt`), which is generated by [Step 4 in Preparing Kinetics](https://github.com/open-mmlab/mmaction2/tree/master/tools/data/kinetics#step-4-generate-file-list) + +The format of the file list should be: + +``` +Path_to_video_1 label_1\n +Path_to_video_2 label_2\n +... +Path_to_video_n label_n\n +``` + +The timestamp (start and end of the video) must be contained. For example: + +``` +class602/o3lCwWyyc_s_000012_000022.mp4 602\n +``` + +It means that this video clip is the 12th to 22nd seconds of the original video. It is okay if some videos are missing, and we will ignore them in the next steps. + +## Step 2. Download Annotations + +Download the annotation tar file (recall that the directory should be located at `$MMACTION2/tools/data/ava_kinetics/`). + +```shell +wget https://storage.googleapis.com/deepmind-media/Datasets/ava_kinetics_v1_0.tar.gz +tar xf ava_kinetics_v1_0.tar.gz && rm ava_kinetics_v1_0.tar.gz +``` + +You should have the `ava_kinetics_v1_0` folder at `$MMACTION2/tools/data/ava_kinetics/`. + +## Step 3. Cut Videos + +Use `cut_kinetics.py` to find the desired videos from the Kinetics-700 dataset and trim them to contain only annotated clips. Currently we only use the train set of the Kinetics part to improve training. Validation on the Kinetics part will come soon. + +Here is the script: + +```shell +python3 cut_kinetics.py --avakinetics_anotation=$AVAKINETICS_ANOTATION \ + --kinetics_list=$KINETICS_LIST \ + --avakinetics_root=$AVAKINETICS_ROOT \ + [--num_workers=$NUM_WORKERS ] +``` + +Arguments: + +- `avakinetics_anotation`: the directory to ava-kinetics anotations. Defaults to `./ava_kinetics_v1_0`. +- `kinetics_list`: the path to the videos file list as mentioned in Step 1. If you have prepared the Kinetics700 dataset following `mmaction2`, it should be `$MMACTION2/data/kinetics700/kinetics700_train_list_videos.txt`. +- `avakinetics_root`: the directory to save the ava-kinetics dataset. Defaults to `$MMACTION2/data/ava_kinetics`. +- `num_workers`: number of workers used to cut videos. Defaults to -1 and use all available cpus. + +There should be about 100k videos. It is OK if some videos are missing and we will ignore them in the next steps. + +## Step 4. Extract RGB Frames + +This step is similar to Step 4 in [Preparing AVA](https://github.com/open-mmlab/mmaction2/tree/dev-1.x/tools/data/ava#step-4-extract-rgb-and-flow). + +Here we provide a script to extract RGB frames using ffmpeg: + +```shell +python3 extract_rgb_frames.py --avakinetics_root=$AVAKINETICS_ROOT \ + [--num_workers=$NUM_WORKERS ] +``` + +Arguments: + +- `avakinetics_root`: the directory to save the ava-kinetics dataset. Defaults to `$MMACTION2/data/ava_kinetics`. +- `num_workers`: number of workers used to extract frames. Defaults to -1 and use all available cpus. + +If you have installed denseflow, you can also use `build_rawframes.py` to extract RGB frames: + +```shell +python3 ../build_rawframes.py ../../../data/ava_kinetics/videos/ ../../../data/ava_kinetics/rawframes/ --task rgb --level 1 --mixed-ext +``` + +## Step 5. Prepare Annotations + +Use `prepare_annotation.py` to prepare the training annotations. It will generate a `kinetics_train.csv` file containning the spatial-temporal annotations for the Kinetics part, localting at `$AVAKINETICS_ROOT`. + +Here is the script: + +```shell +python3 prepare_annotation.py --avakinetics_anotation=$AVAKINETICS_ANOTATION \ + --avakinetics_root=$AVAKINETICS_ROOT \ + [--num_workers=$NUM_WORKERS] +``` + +Arguments: + +- `avakinetics_anotation`: the directory to ava-kinetics anotations. Defaults to `./ava_kinetics_v1_0`. +- `avakinetics_root`: the directory to save the ava-kinetics dataset. Defaults to `$MMACTION2/data/ava_kinetics`. +- `num_workers`: number of workers used to prepare annotations. Defaults to -1 and use all available cpus. + +## Step 6. Fetch Proposal Files + +The pre-computed proposals for AVA dataset are provided by FAIR's [Long-Term Feature Banks](https://github.com/facebookresearch/video-long-term-feature-banks). For the Kinetics part, we use `Cascade R-CNN X-101-64x4d-FPN` from [mmdetection](https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_075702-43ce6a30.pth) to fetch the proposals. Here is the script: + +```shell +python3 fetch_proposal.py --avakinetics_root=$AVAKINETICS_ROOT \ + --datalist=$DATALIST \ + --picklepath=$PICKLEPATH \ + [--config=$CONFIG ] \ + [--checkpoint=$CHECKPOINT ] + +``` + +It will generate a `kinetics_proposal.pkl` file at `$MMACTION2/data/ava_kinetics/`. + +Arguments: + +- `avakinetics_root`: the directory to save the ava-kinetics dataset. Defaults to `$MMACTION2/data/ava_kinetics`. +- `datalist`: path to the `kinetics_train.csv` file generated at Step 3. +- `picklepath`: path to save the extracted proposal pickle file. +- `config`: the config file for the human detection model. Defaults to `X-101-64x4d-FPN.py`. +- `checkpoint`: the checkpoint for the human detection model. Defaults to the `mmdetection` pretraining checkpoint. + +## Step 7. Merge AVA to AVA-Kinetics + +Now we are done with the preparations for the Kinetics part. We need to merge the AVA part into the `ava_kinetics` folder (assuming you have AVA dataset ready at `$MMACTION2/data/ava`). First we make a copy of the AVA anotation to the `ava_kinetics` folder (recall that you are at `$MMACTION2/tools/data/ava_kinetics/`): + +```shell +cp -r ../../../data/ava/annotations/ ../../../data/ava_kinetics/ +``` + +Next we merge the generated anotation files of the Kinetics part to AVA. Please check: you should have two files `kinetics_train.csv` and `kinetics_proposal.pkl` at `$MMACTION2/data/ava_kinetics/` generated from Step 5 and Step 6. Run the following script to merge these two files into `$MMACTION2/data/ava_kinetics/annotations/ava_train_v2.2.csv` and `$MMACTION2/data/ava_kinetics/annotations/ava_dense_proposals_train.FAIR.recall_93.9.pkl` respectively. + +```shell +python3 merge_annotations.py --avakinetics_root=$AVAKINETICS_ROOT +``` + +Arguments: + +- `avakinetics_root`: the directory to save the ava-kinetics dataset. Defaults to `$MMACTION2/data/ava_kinetics`. + +Finally, we need to merge the rawframes of AVA part. You can either copy/move them or generate soft links. The following script is an example to use soft links: + +```shell +python3 softlink_ava.py --avakinetics_root=$AVAKINETICS_ROOT \ + --ava_root=$AVA_ROOT +``` + +Arguments: + +- `avakinetics_root`: the directory to save the ava-kinetics dataset. Defaults to `$MMACTION2/data/ava_kinetics`. +- `ava_root`: the directory to save the ava dataset. Defaults to `$MMACTION2/data/ava`. diff --git a/tools/data/ava_kinetics/X-101-64x4d-FPN.py b/tools/data/ava_kinetics/X-101-64x4d-FPN.py new file mode 100644 index 0000000000..114f80a5e2 --- /dev/null +++ b/tools/data/ava_kinetics/X-101-64x4d-FPN.py @@ -0,0 +1,147 @@ +# Copyright (c) OpenMMLab. All rights reserved. +model = dict( + type='CascadeRCNN', + data_preprocessor=dict( + type='DetDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='ResNeXt', + depth=101, + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=True), + norm_eval=True, + style='pytorch', + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d'), + groups=64, + base_width=4), + neck=dict( + type='FPN', + in_channels=[256, 512, 1024, 2048], + out_channels=256, + num_outs=5), + rpn_head=dict( + type='RPNHead', + in_channels=256, + feat_channels=256, + anchor_generator=dict( + type='AnchorGenerator', + scales=[8], + ratios=[0.5, 1.0, 2.0], + strides=[4, 8, 16, 32, 64]), + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[1.0, 1.0, 1.0, 1.0]), + loss_cls=dict( + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), + loss_bbox=dict( + type='SmoothL1Loss', beta=0.1111111111111111, loss_weight=1.0)), + roi_head=dict( + type='CascadeRoIHead', + num_stages=3, + stage_loss_weights=[1, 0.5, 0.25], + bbox_roi_extractor=dict( + type='SingleRoIExtractor', + roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0), + out_channels=256, + featmap_strides=[4, 8, 16, 32]), + bbox_head=[ + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[0.1, 0.1, 0.2, 0.2]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[0.05, 0.05, 0.1, 0.1]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, + loss_weight=1.0)), + dict( + type='Shared2FCBBoxHead', + in_channels=256, + fc_out_channels=1024, + roi_feat_size=7, + num_classes=80, + bbox_coder=dict( + type='DeltaXYWHBBoxCoder', + target_means=[0.0, 0.0, 0.0, 0.0], + target_stds=[0.033, 0.033, 0.067, 0.067]), + reg_class_agnostic=True, + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=False, + loss_weight=1.0), + loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) + ]), + test_cfg=dict( + rpn=dict( + nms_pre=1000, + max_per_img=1000, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0), + rcnn=dict( + score_thr=0.05, + nms=dict(type='nms', iou_threshold=0.5), + max_per_img=100))) + +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type='CocoDataset', + data_root='data/coco/', + ann_file='annotations/instances_val2017.json', + data_prefix=dict(img='val2017/'), + test_mode=True, + pipeline=[ + dict( + type='LoadImageFromFile', + file_client_args=dict(backend='disk')), + dict(type='Resize', scale=(1333, 800), keep_ratio=True), + dict(type='LoadAnnotations', with_bbox=True), + dict( + type='PackDetInputs', + meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', + 'scale_factor')) + ])) + +test_evaluator = dict( + type='CocoMetric', + ann_file='data/coco/annotations/instances_val2017.json', + metric='bbox', + format_only=False) + +test_cfg = dict(type='TestLoop') diff --git a/tools/data/ava_kinetics/cut_kinetics.py b/tools/data/ava_kinetics/cut_kinetics.py new file mode 100644 index 0000000000..3582035b10 --- /dev/null +++ b/tools/data/ava_kinetics/cut_kinetics.py @@ -0,0 +1,185 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import multiprocessing +import os +from collections import defaultdict +from typing import List + +import decord + + +def get_kinetics_frames(kinetics_anotation_file: str) -> dict: + """Given the AVA-kinetics anotation file, return a lookup to map the video + id and the the set of timestamps involved of this video id. + + Args: + kinetics_anotation_file (str): Path to the AVA-like anotation file for + the kinetics subset. + Returns: + dict: the dict keys are the kinetics videos' video id. The values are + the set of timestamps involved. + """ + with open(kinetics_anotation_file) as f: + anotated_frames = [i.split(',') for i in f.readlines()] + anotated_frames = [i for i in anotated_frames if len(i) == 7] + anotated_frames = [(i[0], int(float(i[1]))) for i in anotated_frames] + + frame_lookup = defaultdict(set) + for video_id, timestamp in anotated_frames: + frame_lookup[video_id].add(timestamp) + return frame_lookup + + +def filter_missing_videos(kinetics_list: str, frame_lookup: dict) -> dict: + """Given the kinetics700 dataset list, remove the video ids from the lookup + that are missing videos or frames. + + Args: + kinetics_list (str): Path to the kinetics700 dataset list. + The content of the list should be: + ``` + Path_to_video1 label_1\n + Path_to_video2 label_2\n + ... + Path_to_videon label_n\n + ``` + The start and end of the video must be contained in the filename. + For example: + ``` + class602/o3lCwWyyc_s_000012_000022.mp4\n + ``` + frame_lookup (dict): the dict from `get_kinetics_frames`. + Returns: + dict: the dict keys are the kinetics videos' video id. The values are + the a list of tuples: + (start_of_the_video, end_of_the_video, video_path) + """ + video_lookup = defaultdict(set) + with open(kinetics_list) as f: + for line in f.readlines(): + video_path = line.split(' ')[0] # remove label information + video_name = video_path.split('/')[-1] # get the file name + video_name = video_name.split('.')[0] # remove file extensions + video_name = video_name.split('_') + video_id = '_'.join(video_name[:-2]) + if video_id not in frame_lookup: + continue + + start, end = int(video_name[-2]), int(video_name[-1]) + frames = frame_lookup[video_id] + frames = [frame for frame in frames if start < frame < end] + if len(frames) == 0: + continue + + start, end = max(start, min(frames) - 2), min(end, max(frames) + 2) + video_lookup[video_id].add((start, end, video_path)) + + # Some frame ids exist in multiple videos in the Kinetics dataset. + # The reason is the part of one video may fall into different categories. + # Remove the duplicated records + for video in video_lookup: + if len(video_lookup[video]) == 1: + continue + info_list = list(video_lookup[video]) + removed_list = [] + for i, info_i in enumerate(info_list): + start_i, end_i, _ = info_i + for j in range(i + 1, len(info_list)): + start_j, end_j, _ = info_list[j] + if start_i <= start_j and end_j <= end_i: + removed_list.append(j) + elif start_j <= start_i and end_i <= end_j: + removed_list.append(i) + new_list = [] + for i, info in enumerate(info_list): + if i not in removed_list: + new_list.append(info) + video_lookup[video] = set(new_list) + return video_lookup + + +template = ('ffmpeg -ss %d -t %d -accurate_seek -i' + ' %s -r 30 -avoid_negative_ts 1 %s') + + +def generate_cut_cmds(video_lookup: dict, data_root: str) -> List[str]: + cmds = [] + for video_id in video_lookup: + for start, end, video_path in video_lookup[video_id]: + start0 = int(video_path.split('_')[-2]) + new_path = '%s/%s_%06d_%06d.mp4' % (data_root, video_id, start, + end) + cmd = template % (start - start0, end - start, video_path, + new_path) + cmds.append(cmd) + return cmds + + +def run_cmd(cmd): + os.system(cmd) + return + + +def remove_failed_video(video_path: str) -> None: + """Given the path to the video, delete the video if it cannot be read or if + the actual length of the video is 0.75 seconds shorter than expected.""" + try: + v = decord.VideoReader(video_path) + fps = v.get_avg_fps() + num_frames = len(v) + x = video_path.split('.')[0].split('_') + time = int(x[-1]) - int(x[-2]) + if num_frames < (time - 3 / 4) * fps: + os.remove(video_path) + except: # noqa: E722 + os.remove(video_path) + return + + +if __name__ == '__main__': + p = argparse.ArgumentParser() + p.add_argument( + '--avakinetics_anotation', + type=str, + default='./ava_kinetics_v1_0', + help='the directory to ava-kinetics anotations') + p.add_argument( + '--kinetics_list', + type=str, + help='the datalist of the kinetics700 training videos') + p.add_argument( + '--num_workers', + type=int, + default=-1, + help='number of workers used for multiprocessing') + p.add_argument( + '--avakinetics_root', + type=str, + default='../../../data/ava_kinetics', + help='the path to save ava-kinetics dataset') + args = p.parse_args() + + if args.num_workers > 0: + num_workers = args.num_workers + else: + num_workers = max(multiprocessing.cpu_count() - 1, 1) + + # Find videos from the Kinetics700 dataset required for AVA-Kinetics + kinetics_train = args.avakinetics_anotation + '/kinetics_train_v1.0.csv' + frame_lookup = get_kinetics_frames(kinetics_train) + video_lookup = filter_missing_videos(args.kinetics_list, frame_lookup) + + root = args.avakinetics_root + os.makedirs(root, exist_ok=True) + video_path = root + '/videos/' + os.makedirs(video_path, exist_ok=True) + all_cmds = generate_cut_cmds(video_lookup, video_path) + + # Cut and save the videos for AVA-Kinetics + pool = multiprocessing.Pool(num_workers) + _ = pool.map(run_cmd, all_cmds) + + # Remove failed videos + videos = os.listdir(video_path) + videos = ['%s/%s' % (video_path, video) for video in videos] + _ = pool.map(remove_failed_video, videos) diff --git a/tools/data/ava_kinetics/extract_rgb_frames.py b/tools/data/ava_kinetics/extract_rgb_frames.py new file mode 100644 index 0000000000..25a3251b56 --- /dev/null +++ b/tools/data/ava_kinetics/extract_rgb_frames.py @@ -0,0 +1,51 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import multiprocessing +import os + + +def extract_rgb(video_name, frame_path, video_path): + video_id = video_name.split('.')[0] + os.makedirs('%s/%s' % (frame_path, video_id), exist_ok=True) + cmd = 'ffmpeg -i %s/%s -r 30 -q:v 1 %s/%s' % (video_path, video_name, + frame_path, video_id) + cmd += '/img_%05d.jpg' + return cmd + + +def run_cmd(cmd): + os.system(cmd) + return + + +if __name__ == '__main__': + p = argparse.ArgumentParser() + p.add_argument( + '--avakinetics_root', + type=str, + default='../../../data/ava_kinetics', + help='the path to save ava-kinetics dataset') + p.add_argument( + '--num_workers', + type=int, + default=-1, + help='number of workers used for multiprocessing') + args = p.parse_args() + + if args.num_workers > 0: + num_workers = args.num_workers + else: + num_workers = max(multiprocessing.cpu_count() - 1, 1) + + root = args.avakinetics_root + video_path = root + '/videos/' + frame_path = root + '/rawframes/' + os.makedirs(frame_path, exist_ok=True) + + all_cmds = [ + extract_rgb(video_name, frame_path, video_path) + for video_name in os.listdir(video_path) + ] + + pool = multiprocessing.Pool(num_workers) + out = pool.map(run_cmd, all_cmds) diff --git a/tools/data/ava_kinetics/fetch_proposal.py b/tools/data/ava_kinetics/fetch_proposal.py new file mode 100644 index 0000000000..6e5279d4b4 --- /dev/null +++ b/tools/data/ava_kinetics/fetch_proposal.py @@ -0,0 +1,137 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import multiprocessing as mp +import os +import pickle + +import numpy as np +from mmdet.apis import inference_detector, init_detector +from mmdet.utils import register_all_modules +from PIL import Image + + +def get_vid_from_path(path): + video_id = path.split('/')[-1] + video_id = video_id.split('_')[:-2] + return '_'.join(video_id) + + +def prepare_det_lookup(datalist, frame_root): + with open(datalist) as f: + records = f.readlines() + det_lookup = {} + for record in records: + record = record.split(',') + folder_path = record[0] + video_id = get_vid_from_path(folder_path) + frame_id = int(record[1]) + for idx in range(frame_id - 1, frame_id + 2): + proposal_id = '%s,%04d' % (video_id, idx) + det_lookup[proposal_id] = '%s/%s' % (frame_root, folder_path) + return det_lookup + + +def single_worker(rank, det_lookup, args): + detect_list = list(det_lookup) + detect_sublist = [ + detect_list[i] for i in range(len(detect_list)) + if i % args.num_gpus == rank + ] + + # register all modules in mmdet into the registries + register_all_modules() + model = init_detector( + args.config, args.checkpoint, device='cuda:%d' % rank) + + lookup = {} + for count, key in enumerate(detect_sublist): + try: + folder_path = det_lookup[key] + start = int(folder_path.split('/')[-1].split('_')[-2]) + time = int(key.split(',')[1]) + frame_id = (time - start) * 30 + 1 + frame_path = '%s/img_%05d.jpg' % (folder_path, frame_id) + img = Image.open(frame_path) + result = inference_detector(model, frame_path) + bboxes = result._pred_instances.bboxes.cpu() + scores = result._pred_instances.scores.cpu() + labels = result._pred_instances.labels.cpu() + + bboxes = bboxes[labels == 0] + scores = scores[labels == 0] + + bboxes = bboxes[scores > 0.7].numpy() + scores = scores[scores > 0.7] + if scores.numel() > 0: + result_ = [] + for idx, (h1, w1, h2, w2) in enumerate(bboxes): + h1 /= img.size[0] + h2 /= img.size[0] + w1 /= img.size[1] + w2 /= img.size[1] + score = scores[idx].item() + result_.append((h1, w1, h2, w2, score)) + lookup[key] = np.array(result_) + except: # noqa: E722 + pass + + with open('tmp_person_%d.pkl' % rank, 'wb') as f: + pickle.dump(lookup, f) + return + + +if __name__ == '__main__': + p = argparse.ArgumentParser() + p.add_argument( + '--avakinetics_root', + type=str, + default='../../../data/ava_kinetics', + help='the path to save ava-kinetics dataset') + p.add_argument( + '--datalist', + type=str, + default='../../../data/ava_kinetics/kinetics_train.csv', + help='the list for kinetics videos') + p.add_argument( + '--config', + type=str, + default='X-101-64x4d-FPN.py', + help='the human detector') + p.add_argument( + '--checkpoint', + type=str, + default='https://download.openmmlab.com/mmdetection/v2.0/' + 'cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/' + 'cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_' + '075702-43ce6a30.pth', + help='the human detector checkpoint') + p.add_argument( + '--picklepath', + type=str, + default='../../../data/ava_kinetics/kinetics_proposal.pkl') + p.add_argument('--num_gpus', type=int, default=8) + + args = p.parse_args() + + frame_root = args.avakinetics_root + '/rawframes/' + det_lookup = prepare_det_lookup(args.datalist, frame_root) + + processes = [] + for rank in range(args.num_gpus): + ctx = mp.get_context('spawn') + p = ctx.Process(target=single_worker, args=(rank, det_lookup, args)) + p.start() + processes.append(p) + + for p in processes: + p.join() + + lookup = {} + for k in range(args.num_gpus): + one_lookup = pickle.load(open('tmp_person_%d.pkl' % k, 'rb')) + os.remove('tmp_person_%d.pkl' % k) + for key in one_lookup: + lookup[key] = one_lookup[key] + + with open(args.picklepath, 'wb') as f: + pickle.dump(lookup, f) diff --git a/tools/data/ava_kinetics/merge_annotations.py b/tools/data/ava_kinetics/merge_annotations.py new file mode 100644 index 0000000000..9b5060a8fb --- /dev/null +++ b/tools/data/ava_kinetics/merge_annotations.py @@ -0,0 +1,54 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os +import pickle + + +def check_file(path): + if os.path.isfile(path): + return + else: + path = path.split('/') + folder = '/'.join(path[:-1]) + filename = path[-1] + info = '%s not found at %s' % (filename, folder) + raise FileNotFoundError(info) + + +if __name__ == '__main__': + p = argparse.ArgumentParser() + p.add_argument( + '--avakinetics_root', + type=str, + default='../../../data/ava_kinetics', + help='the path to save ava-kinetics dataset') + root = p.parse_args().avakinetics_root + + kinetics_annot = root + '/kinetics_train.csv' + ava_annot = root + '/annotations/ava_train_v2.2.csv' + + check_file(kinetics_annot) + check_file(ava_annot) + + with open(kinetics_annot) as f: + record = f.readlines() + + with open(ava_annot) as f: + record += f.readlines() + + with open(ava_annot, 'w') as f: + for line in record: + f.write(line) + + kinetics_proposal = root + '/kinetics_proposal.pkl' + ava_proposal = root + '/annotations/' \ + 'ava_dense_proposals_train.FAIR.recall_93.9.pkl' + + check_file(kinetics_proposal) + check_file(ava_proposal) + + lookup = pickle.load(open(kinetics_proposal, 'rb')) + lookup.update(pickle.load(open(ava_proposal, 'rb'))) + + with open(ava_proposal, 'wb') as f: + pickle.dump(lookup, f) diff --git a/tools/data/ava_kinetics/prepare_annotation.py b/tools/data/ava_kinetics/prepare_annotation.py new file mode 100644 index 0000000000..00b7669d49 --- /dev/null +++ b/tools/data/ava_kinetics/prepare_annotation.py @@ -0,0 +1,90 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import multiprocessing +import os +from collections import defaultdict + +FPS = 30 + + +def get_video_info(frame_folder): + folder_name = frame_folder.split('/')[-1] + filename = folder_name.split('_') + video_id = '_'.join(filename[:-2]) + start = int(filename[-2]) + length = len(os.listdir(frame_folder)) // FPS + return (video_id, start, start + length, folder_name) + + +def get_avaialble_clips(frame_root, num_cpus): + folders = os.listdir(frame_root) + folders = ['%s/%s' % (frame_root, folder) for folder in folders] + pool = multiprocessing.Pool(num_cpus) + outputs = pool.map(get_video_info, folders) + lookup = defaultdict(list) + for record in outputs: + lookup[record[0]].append(record[1:]) + return lookup + + +def filter_train_list(kinetics_anotation_file, lookup): + with open(kinetics_anotation_file) as f: + anotated_frames = [i.split(',') for i in f.readlines()] + anotated_frames = [i for i in anotated_frames if len(i) == 7] + + filtered = [] + for line in anotated_frames: + if line[0] not in lookup: + continue + flag = False + for start, end, video_path in lookup[line[0]]: + if start < float(line[1]) < end: + flag = True + break + if flag is False: + continue + + frame_idx, x1, y1, x2, y2, label = list(map(float, line[1:7])) + frame_idx, label = int(frame_idx), int(label) + + string = (f'{video_path},{frame_idx},' + f'{x1:.3f},{y1:.3f},{x2:.3f},{y2:.3f},{label},-1\n') + + filtered.append(string) + return filtered + + +if __name__ == '__main__': + p = argparse.ArgumentParser() + p.add_argument( + '--avakinetics_anotation', + type=str, + default='./ava_kinetics_v1_0', + help='the directory to ava-kinetics anotations') + p.add_argument( + '--num_workers', + type=int, + default=-1, + help='number of workers used for multiprocessing') + p.add_argument( + '--avakinetics_root', + type=str, + default='../../../data/ava_kinetics', + help='the path to save ava-kinetics videos') + args = p.parse_args() + + if args.num_workers > 0: + num_workers = args.num_workers + else: + num_workers = max(multiprocessing.cpu_count() - 1, 1) + + frame_root = args.avakinetics_root + '/rawframes/' + frame_root = os.path.abspath(frame_root) + lookup = get_avaialble_clips(frame_root, num_workers) + + kinetics_train = args.avakinetics_anotation + '/kinetics_train_v1.0.csv' + filtered_list = filter_train_list(kinetics_train, lookup) + + with open('%s/kinetics_train.csv' % args.avakinetics_root, 'w') as f: + for line in filtered_list: + f.write(line) diff --git a/tools/data/ava_kinetics/softlink_ava.py b/tools/data/ava_kinetics/softlink_ava.py new file mode 100644 index 0000000000..18ca0688c3 --- /dev/null +++ b/tools/data/ava_kinetics/softlink_ava.py @@ -0,0 +1,24 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import os + +p = argparse.ArgumentParser() +p.add_argument( + '--ava_root', + type=str, + default='../../../data/ava', + help='the path to save ava dataset') +p.add_argument( + '--avakinetics_root', + type=str, + default='../../../data/ava_kinetics', + help='the path to save ava-kinetics dataset') +args = p.parse_args() + +ava_frames = os.path.abspath(args.ava_root + '/rawframes/') +kinetics_frames = os.path.abspath(args.avakinetics_root + '/rawframes/') + +ava_folders = os.listdir(ava_frames) +for folder in ava_folders: + cmd = 'ln -s %s/%s %s/%s' % (ava_frames, folder, kinetics_frames, folder) + os.system(cmd) From c96236b698fc115d49a37c8a4f0072ab885da6a6 Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Mon, 5 Dec 2022 04:02:40 -0500 Subject: [PATCH 32/57] [Enhance] Add configs and results of experiments on AVA-Kinetics (#2099) --- .../detection/_base_/models/slowonly_r50.py | 11 +- configs/detection/ava_kinetics/README.md | 103 ++++++ ...re-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py | 117 +++++++ ...pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py | 123 +++++++ ...nl-head_8xb8-8x8x1-10e_ava-kinetics-rgb.py | 6 + ...d_8xb8-8x8x1-focal-10e_ava-kinetics-rgb.py | 6 + ...ral-max_8xb8-8x8x1-10e_ava-kinetics-rgb.py | 6 + ...context_8xb8-8x8x1-10e_ava-kinetics-rgb.py | 6 + ...8xb8-16x4x1-10e-tricks_ava-kinetics-rgb.py | 128 +++++++ ...re-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py | 8 + ...pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py | 8 + mmaction/datasets/__init__.py | 3 +- mmaction/datasets/ava_dataset.py | 318 ++++++++++++++++++ .../models/roi_heads/bbox_heads/bbox_head.py | 18 +- tests/datasets/test_ava_dataset.py | 114 ++++++- 15 files changed, 965 insertions(+), 10 deletions(-) create mode 100644 configs/detection/ava_kinetics/README.md create mode 100644 configs/detection/ava_kinetics/slowonly_k400-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py create mode 100644 configs/detection/ava_kinetics/slowonly_k400-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py create mode 100644 configs/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-10e_ava-kinetics-rgb.py create mode 100644 configs/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-focal-10e_ava-kinetics-rgb.py create mode 100644 configs/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max_8xb8-8x8x1-10e_ava-kinetics-rgb.py create mode 100644 configs/detection/ava_kinetics/slowonly_k700-pre-r50-context_8xb8-8x8x1-10e_ava-kinetics-rgb.py create mode 100644 configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-16x4x1-10e-tricks_ava-kinetics-rgb.py create mode 100644 configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py create mode 100644 configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py diff --git a/configs/detection/_base_/models/slowonly_r50.py b/configs/detection/_base_/models/slowonly_r50.py index 9db201bdb9..4a06a4ab53 100644 --- a/configs/detection/_base_/models/slowonly_r50.py +++ b/configs/detection/_base_/models/slowonly_r50.py @@ -1,13 +1,16 @@ +url = ('https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/' + 'slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-' + 'rgb/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_' + 'kinetics400-rgb_20220901-e7b65fad.pth') + model = dict( type='FastRCNN', _scope_='mmdet', + init_cfg=dict(type='Pretrained', checkpoint=url), backbone=dict( type='ResNet3dSlowOnly', depth=50, - pretrained=( - 'https://download.openmmlab.com/mmaction/recognition/slowonly/' - 'slowonly_r50_4x16x1_256e_kinetics400_rgb/' - 'slowonly_r50_4x16x1_256e_kinetics400_rgb_20200704-a69556c6.pth'), + pretrained=None, pretrained2d=False, lateral=False, num_stages=4, diff --git a/configs/detection/ava_kinetics/README.md b/configs/detection/ava_kinetics/README.md new file mode 100644 index 0000000000..59ec345c43 --- /dev/null +++ b/configs/detection/ava_kinetics/README.md @@ -0,0 +1,103 @@ +# AVA + +[The AVA-Kinetics Localized Human Actions Video Dataset](https://arxiv.org/abs/2005.00214) + + + +

+ +## Abstract + + + +This paper describes the AVA-Kinetics localized human actions video dataset. The dataset is collected by annotating videos from the Kinetics-700 dataset using the AVA annotation protocol, and extending the original AVA dataset with these new AVA annotated Kinetics clips. The dataset contains over 230k clips annotated with the 80 AVA action classes for each of the humans in key-frames. We describe the annotation process and provide statistics about the new dataset. We also include a baseline evaluation using the Video Action Transformer Network on the AVA-Kinetics dataset, demonstrating improved performance for action classification on the AVA test set. + +```BibTeX +@article{li2020ava, + title={The ava-kinetics localized human actions video dataset}, + author={Li, Ang and Thotakuri, Meghana and Ross, David A and Carreira, Jo{\~a}o and Vostrikov, Alexander and Zisserman, Andrew}, + journal={arXiv preprint arXiv:2005.00214}, + year={2020} +} +``` + +## Results and Models + +### AVA2.2 + +Currently, we only use the training set of AVA-Kinetics and evaluate on the AVA2.2 validation dataset. The AVA-Kinetics validation dataset will be supported soon. + +| frame sampling strategy | resolution | gpus | backbone | pretrain | mAP | config | ckpt | log | +| :---------------------: | :--------: | :--: | :---------------: | :----------: | :---: | :------------------------------------------: | :-----------------------------------------: | :----------------------------------------: | +| 4x16x1 | raw | 8 | SlowOnly ResNet50 | Kinetics-400 | 24.53 | [config](/configs/detection/ava_kinetics/slowonly_k400-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k400-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb/slowonly_k400-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb_20221205-33e3ca7c.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k400-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb/slowonly_k400-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.log) | +| 4x16x1 | raw | 8 | SlowOnly ResNet50 | Kinetics-700 | 25.87 | [config](/configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb/slowonly_k700-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb_20221205-a07e8c15.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb/slowonly_k700-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.log) | +| 8x8x1 | raw | 8 | SlowOnly ResNet50 | Kinetics-400 | 26.10 | [config](/configs/detection/ava_kinetics/slowonly_k400-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k400-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb/slowonly_k400-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb_20221205-8f8dff3b.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k400-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb/slowonly_k400-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.log) | +| 8x8x1 | raw | 8 | SlowOnly ResNet50 | Kinetics-700 | 27.82 | [config](/configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb/slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb_20221205-16a01c37.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb/slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.log) | + +### Training with tricks + +We conduct ablation studies to show the improvements of training tricks using SlowOnly8x8 pretrained on the Kinetics700 dataset. The baseline is the last raw in [AVA2.2](https://github.com/hukkai/mmaction2/tree/ava-kinetics-exp/configs/detection/ava_kinetics#ava22). + +| method | frame sampling strategy | resolution | gpus | backbone | pretrain | mAP | config | ckpt | log | +| :--------------------: | :---------------------: | :--------: | :--: | :---------------: | :----------: | :---: | :-----------------------------------: | :---------------------------------: | :---------------------------------: | +| baseline | 8x8x1 | raw | 8 | SlowOnly ResNet50 | Kinetics-700 | 27.82 | [config](/configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb/slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb_20221205-16a01c37.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb/slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.log) | +| + context | 8x8x1 | raw | 8 | SlowOnly ResNet50 | Kinetics-700 | 28.31 | [config](/configs/detection/ava_kinetics/slowonly_k700-pre-r50-context_8xb8-8x8x1-10e_ava-kinetics-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k700-pre-r50-context_8xb8-8x8x1-10e_ava-kinetics-rgb/slowonly_k700-pre-r50-context_8xb8-8x8x1-10e_ava-kinetics-rgb_20221205-5d514f8c.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k700-pre-r50-context_8xb8-8x8x1-10e_ava-kinetics-rgb/slowonly_k700-pre-r50-context_8xb8-8x8x1-10e_ava-kinetics-rgb.log) | +| + temporal max pooling | 8x8x1 | raw | 8 | SlowOnly ResNet50 | Kinetics-700 | 28.48 | [config](/configs/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max_8xb8-8x8x1-10e_ava-kinetics-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max_8xb8-8x8x1-10e_ava-kinetics-rgb/slowonly_k700-pre-r50-context-temporal-max_8xb8-8x8x1-10e_ava-kinetics-rgb_20221205-5b5e71eb.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max_8xb8-8x8x1-10e_ava-kinetics-rgb/slowonly_k700-pre-r50-context-temporal-max_8xb8-8x8x1-10e_ava-kinetics-rgb.log) | +| + nonlinear head | 8x8x1 | raw | 8 | SlowOnly ResNet50 | Kinetics-700 | 29.83 | [config](/configs/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-10e_ava-kinetics-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-10e_ava-kinetics-rgb/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-10e_ava-kinetics-rgb_20221205-87624265.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-10e_ava-kinetics-rgb/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-10e_ava-kinetics-rgb.log) | +| + focal loss | 8x8x1 | raw | 8 | SlowOnly ResNet50 | Kinetics-700 | 30.33 | [config](/configs/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-focal-10e_ava-kinetics-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-focal-10e_ava-kinetics-rgb/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-focal-10e_ava-kinetics-rgb_20221205-37aa8395.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-focal-10e_ava-kinetics-rgb/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-focal-10e_ava-kinetics-rgb.log) | +| + more frames | 16x4x1 | raw | 8 | SlowOnly ResNet50 | Kinetics-700 | 31.29 | [config](/configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-16x4x1-10e-tricks_ava-kinetics-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-16x4x1-10e-tricks_ava-kinetics-rgb/slowonly_k700-pre-r50_8xb8-16x4x1-10e-tricks_ava-kinetics-rgb_20221205-dd652f81.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-16x4x1-10e-tricks_ava-kinetics-rgb/slowonly_k700-pre-r50_8xb8-16x4x1-10e-tricks_ava-kinetics-rgb.log) | + +Note: + +The **gpus** indicates the number of gpu we used to get the checkpoint; **+ context** indicates that using both RoI feature and global pooled feature for classification; **+ temporal max pooling** indicates that using max pooling in the temporal dimension for the feature; **nonlinear head** indicates that using a 2-layer mlp instead of a linear classifier. + +For more details on data preparation, you can refer to [AVA-Kinetics Data Preparation](/tools/data/ava_kinetics/README.md). + +## Train + +You can use the following command to train a model. + +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Example: train the SlowOnly model on AVA-Kinetics in a deterministic option. + +```shell +python tools/train.py configs/detection/ava_kinetics/slowonly_k400-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py \ + --cfg-options randomness.seed=0 randomness.deterministic=True +``` + +For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). + +## Test + +You can use the following command to test a model. + +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test the SlowOnly model on AVA-Kinetics and dump the result to a pkl file. + +```shell +python tools/test.py configs/detection/ava_kinetics/slowonly_k400-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py \ + checkpoints/SOME_CHECKPOINT.pth --dump result.pkl +``` + +For more details, you can refer to the **Test** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). + +## Citation + + + +```BibTeX +@article{li2020ava, + title={The ava-kinetics localized human actions video dataset}, + author={Li, Ang and Thotakuri, Meghana and Ross, David A and Carreira, Jo{\~a}o and Vostrikov, Alexander and Zisserman, Andrew}, + journal={arXiv preprint arXiv:2005.00214}, + year={2020} +} +``` diff --git a/configs/detection/ava_kinetics/slowonly_k400-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py b/configs/detection/ava_kinetics/slowonly_k400-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py new file mode 100644 index 0000000000..7407ec6978 --- /dev/null +++ b/configs/detection/ava_kinetics/slowonly_k400-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py @@ -0,0 +1,117 @@ +_base_ = [ + '../../_base_/default_runtime.py', '../_base_/models/slowonly_r50.py' +] + +dataset_type = 'AVAKineticsDataset' +data_root = 'data/ava_kinetics/rawframes' +anno_root = 'data/ava_kinetics/annotations' + +ann_file_train = f'{anno_root}/ava_train_v2.2.csv' +ann_file_val = f'{anno_root}/ava_val_v2.2.csv' + +exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.2.csv' +exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.2.csv' + +label_file = f'{anno_root}/ava_action_list_v2.2_for_activitynet_2019.pbtxt' + +proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.' + 'recall_93.9.pkl') +proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl' + +# file_client_args = dict( +# io_backend='petrel', +# path_mapping=dict({ +# 'data/ava_kinetics/rawframes/': +# 's3://openmmlab/datasets/action/ava/rawframes/' +# })) +file_client_args = dict(io_backend='disk') + +train_pipeline = [ + dict(type='SampleAVAFrames', clip_len=4, frame_interval=16), + dict(type='RawFrameDecode', **file_client_args), + dict(type='RandomRescale', scale_range=(256, 320)), + dict(type='RandomCrop', size=256), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCTHW', collapse=True), + dict(type='PackActionInputs') +] +# The testing is w/o. any cropping / flipping +val_pipeline = [ + dict( + type='SampleAVAFrames', clip_len=4, frame_interval=16, test_mode=True), + dict(type='RawFrameDecode', **file_client_args), + dict(type='Resize', scale=(-1, 256)), + dict(type='FormatShape', input_format='NCTHW', collapse=True), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + exclude_file=exclude_file_train, + pipeline=train_pipeline, + label_file=label_file, + proposal_file=proposal_file_train, + data_prefix=dict(img=data_root))) +val_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + exclude_file=exclude_file_val, + pipeline=val_pipeline, + label_file=label_file, + proposal_file=proposal_file_val, + data_prefix=dict(img=data_root), + test_mode=True)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='AVAMetric', + ann_file=ann_file_val, + label_file=label_file, + exclude_file=exclude_file_val) +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=10, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.1, + by_epoch=True, + begin=0, + end=2, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=8, + eta_min=0, + by_epoch=True, + begin=2, + end=10, + convert_to_iter_based=True) +] + +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00001), + clip_grad=dict(max_norm=40, norm_type=2)) + +default_hooks = dict(checkpoint=dict(max_keep_ckpts=2)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) diff --git a/configs/detection/ava_kinetics/slowonly_k400-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py b/configs/detection/ava_kinetics/slowonly_k400-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py new file mode 100644 index 0000000000..eb393d3a8c --- /dev/null +++ b/configs/detection/ava_kinetics/slowonly_k400-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py @@ -0,0 +1,123 @@ +_base_ = [ + '../../_base_/default_runtime.py', '../_base_/models/slowonly_r50.py' +] + +url = ('https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/' + 'slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-' + 'rgb/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_' + 'kinetics400-rgb_20220901-df42dc84.pth') + +model = dict(init_cfg=dict(type='Pretrained', checkpoint=url)) + +dataset_type = 'AVAKineticsDataset' +data_root = 'data/ava_kinetics/rawframes' +anno_root = 'data/ava_kinetics/annotations' + +ann_file_train = f'{anno_root}/ava_train_v2.2.csv' +ann_file_val = f'{anno_root}/ava_val_v2.2.csv' + +exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.2.csv' +exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.2.csv' + +label_file = f'{anno_root}/ava_action_list_v2.2_for_activitynet_2019.pbtxt' + +proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.' + 'recall_93.9.pkl') +proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl' + +# file_client_args = dict( +# io_backend='petrel', +# path_mapping=dict({ +# 'data/ava_kinetics/rawframes/': +# 's3://openmmlab/datasets/action/ava/rawframes/' +# })) +file_client_args = dict(io_backend='disk') + +train_pipeline = [ + dict(type='SampleAVAFrames', clip_len=8, frame_interval=8), + dict(type='RawFrameDecode', **file_client_args), + dict(type='RandomRescale', scale_range=(256, 320)), + dict(type='RandomCrop', size=256), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCTHW', collapse=True), + dict(type='PackActionInputs') +] +# The testing is w/o. any cropping / flipping +val_pipeline = [ + dict(type='SampleAVAFrames', clip_len=8, frame_interval=8, test_mode=True), + dict(type='RawFrameDecode', **file_client_args), + dict(type='Resize', scale=(-1, 256)), + dict(type='FormatShape', input_format='NCTHW', collapse=True), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + exclude_file=exclude_file_train, + pipeline=train_pipeline, + label_file=label_file, + proposal_file=proposal_file_train, + data_prefix=dict(img=data_root))) +val_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + exclude_file=exclude_file_val, + pipeline=val_pipeline, + label_file=label_file, + proposal_file=proposal_file_val, + data_prefix=dict(img=data_root), + test_mode=True)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='AVAMetric', + ann_file=ann_file_val, + label_file=label_file, + exclude_file=exclude_file_val) +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=10, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.1, + by_epoch=True, + begin=0, + end=2, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=8, + eta_min=0, + by_epoch=True, + begin=2, + end=10, + convert_to_iter_based=True) +] + +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00001), + clip_grad=dict(max_norm=40, norm_type=2)) + +default_hooks = dict(checkpoint=dict(max_keep_ckpts=2)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) diff --git a/configs/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-10e_ava-kinetics-rgb.py b/configs/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-10e_ava-kinetics-rgb.py new file mode 100644 index 0000000000..525bbea3d4 --- /dev/null +++ b/configs/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-10e_ava-kinetics-rgb.py @@ -0,0 +1,6 @@ +_base_ = ['slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py'] + +model = dict( + roi_head=dict( + bbox_roi_extractor=dict(with_global=True, temporal_pool_mode='max'), + bbox_head=dict(in_channels=4096, mlp_head=True))) diff --git a/configs/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-focal-10e_ava-kinetics-rgb.py b/configs/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-focal-10e_ava-kinetics-rgb.py new file mode 100644 index 0000000000..fa52d86dbb --- /dev/null +++ b/configs/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max-nl-head_8xb8-8x8x1-focal-10e_ava-kinetics-rgb.py @@ -0,0 +1,6 @@ +_base_ = ['slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py'] + +model = dict( + roi_head=dict( + bbox_roi_extractor=dict(with_global=True, temporal_pool_mode='max'), + bbox_head=dict(in_channels=4096, mlp_head=True, focal_gamma=1.0))) diff --git a/configs/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max_8xb8-8x8x1-10e_ava-kinetics-rgb.py b/configs/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max_8xb8-8x8x1-10e_ava-kinetics-rgb.py new file mode 100644 index 0000000000..c2a9401704 --- /dev/null +++ b/configs/detection/ava_kinetics/slowonly_k700-pre-r50-context-temporal-max_8xb8-8x8x1-10e_ava-kinetics-rgb.py @@ -0,0 +1,6 @@ +_base_ = ['slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py'] + +model = dict( + roi_head=dict( + bbox_roi_extractor=dict(with_global=True, temporal_pool_mode='max'), + bbox_head=dict(in_channels=4096))) diff --git a/configs/detection/ava_kinetics/slowonly_k700-pre-r50-context_8xb8-8x8x1-10e_ava-kinetics-rgb.py b/configs/detection/ava_kinetics/slowonly_k700-pre-r50-context_8xb8-8x8x1-10e_ava-kinetics-rgb.py new file mode 100644 index 0000000000..344668f852 --- /dev/null +++ b/configs/detection/ava_kinetics/slowonly_k700-pre-r50-context_8xb8-8x8x1-10e_ava-kinetics-rgb.py @@ -0,0 +1,6 @@ +_base_ = ['slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py'] + +model = dict( + roi_head=dict( + bbox_roi_extractor=dict(with_global=True), + bbox_head=dict(in_channels=4096))) diff --git a/configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-16x4x1-10e-tricks_ava-kinetics-rgb.py b/configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-16x4x1-10e-tricks_ava-kinetics-rgb.py new file mode 100644 index 0000000000..fa65298d8d --- /dev/null +++ b/configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-16x4x1-10e-tricks_ava-kinetics-rgb.py @@ -0,0 +1,128 @@ +_base_ = [ + '../../_base_/default_runtime.py', '../_base_/models/slowonly_r50.py' +] + +url = ('https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/' + 'slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-' + 'rgb/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_' + 'kinetics700-rgb_20221013-15b93b10.pth') + +model = dict( + init_cfg=dict(type='Pretrained', checkpoint=url), + roi_head=dict( + bbox_roi_extractor=dict(with_global=True, temporal_pool_mode='max'), + bbox_head=dict(in_channels=4096, mlp_head=True, focal_gamma=1.0))) + +dataset_type = 'AVAKineticsDataset' +data_root = 'data/ava_kinetics/rawframes' +anno_root = 'data/ava_kinetics/annotations' + +ann_file_train = f'{anno_root}/ava_train_v2.2.csv' +ann_file_val = f'{anno_root}/ava_val_v2.2.csv' + +exclude_file_train = f'{anno_root}/ava_train_excluded_timestamps_v2.2.csv' +exclude_file_val = f'{anno_root}/ava_val_excluded_timestamps_v2.2.csv' + +label_file = f'{anno_root}/ava_action_list_v2.2_for_activitynet_2019.pbtxt' + +proposal_file_train = (f'{anno_root}/ava_dense_proposals_train.FAIR.' + 'recall_93.9.pkl') +proposal_file_val = f'{anno_root}/ava_dense_proposals_val.FAIR.recall_93.9.pkl' + +# file_client_args = dict( +# io_backend='petrel', +# path_mapping=dict({ +# 'data/ava_kinetics/rawframes/': +# 's3://openmmlab/datasets/action/ava/rawframes/' +# })) +file_client_args = dict(io_backend='disk') + +train_pipeline = [ + dict(type='SampleAVAFrames', clip_len=16, frame_interval=4), + dict(type='RawFrameDecode', **file_client_args), + dict(type='RandomRescale', scale_range=(256, 320)), + dict(type='RandomCrop', size=256), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCTHW', collapse=True), + dict(type='PackActionInputs') +] +# The testing is w/o. any cropping / flipping +val_pipeline = [ + dict( + type='SampleAVAFrames', clip_len=16, frame_interval=4, test_mode=True), + dict(type='RawFrameDecode', **file_client_args), + dict(type='Resize', scale=(-1, 256)), + dict(type='FormatShape', input_format='NCTHW', collapse=True), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=8, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type=dataset_type, + ann_file=ann_file_train, + exclude_file=exclude_file_train, + pipeline=train_pipeline, + label_file=label_file, + proposal_file=proposal_file_train, + data_prefix=dict(img=data_root))) +val_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file_val, + exclude_file=exclude_file_val, + pipeline=val_pipeline, + label_file=label_file, + proposal_file=proposal_file_val, + data_prefix=dict(img=data_root), + test_mode=True)) +test_dataloader = val_dataloader + +val_evaluator = dict( + type='AVAMetric', + ann_file=ann_file_val, + label_file=label_file, + exclude_file=exclude_file_val) +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=10, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.1, + by_epoch=True, + begin=0, + end=2, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=8, + eta_min=0, + by_epoch=True, + begin=2, + end=10, + convert_to_iter_based=True) +] + +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.00001), + clip_grad=dict(max_norm=40, norm_type=2)) + +default_hooks = dict(checkpoint=dict(max_keep_ckpts=2)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) diff --git a/configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py b/configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py new file mode 100644 index 0000000000..c91e4aee39 --- /dev/null +++ b/configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py @@ -0,0 +1,8 @@ +_base_ = ['slowonly_k400-pre-r50_8xb8-4x16x1-10e_ava-kinetics-rgb.py'] + +url = ('https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/' + 'slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-' + 'rgb/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_' + 'kinetics700-rgb_20221013-15b93b10.pth') + +model = dict(init_cfg=dict(type='Pretrained', checkpoint=url)) diff --git a/configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py b/configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py new file mode 100644 index 0000000000..3ba5d71f70 --- /dev/null +++ b/configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py @@ -0,0 +1,8 @@ +_base_ = ['slowonly_k400-pre-r50_8xb8-8x8x1-10e_ava-kinetics-rgb.py'] + +url = ('https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/' + 'slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-' + 'rgb/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_' + 'kinetics700-rgb_20221013-15b93b10.pth') + +model = dict(init_cfg=dict(type='Pretrained', checkpoint=url)) diff --git a/mmaction/datasets/__init__.py b/mmaction/datasets/__init__.py index a5331d4a2f..9b933f98cf 100644 --- a/mmaction/datasets/__init__.py +++ b/mmaction/datasets/__init__.py @@ -1,7 +1,7 @@ # Copyright (c) OpenMMLab. All rights reserved. from .activitynet_dataset import ActivityNetDataset from .audio_dataset import AudioDataset -from .ava_dataset import AVADataset +from .ava_dataset import AVADataset, AVAKineticsDataset from .base import BaseActionDataset from .pose_dataset import PoseDataset from .rawframe_dataset import RawframeDataset @@ -12,6 +12,7 @@ 'VideoDataset', 'RawframeDataset', 'AVADataset', + 'AVAKineticsDataset', 'PoseDataset', 'BaseActionDataset', 'ActivityNetDataset', diff --git a/mmaction/datasets/ava_dataset.py b/mmaction/datasets/ava_dataset.py index 5c3b095171..8089d0fb75 100644 --- a/mmaction/datasets/ava_dataset.py +++ b/mmaction/datasets/ava_dataset.py @@ -308,3 +308,321 @@ def get_data_info(self, idx: int) -> dict: data_info['entity_ids'] = ann['entity_ids'] return data_info + + +@DATASETS.register_module() +class AVAKineticsDataset(BaseActionDataset): + """AVA-Kinetics dataset for spatial temporal detection. + + Based on official AVA annotation files, the dataset loads raw frames, + bounding boxes, proposals and applies specified transformations to return + a dict containing the frame tensors and other information. + + This datasets can load information from the following files: + + .. code-block:: txt + + ann_file -> ava_{train, val}_{v2.1, v2.2}.csv + exclude_file -> ava_{train, val}_excluded_timestamps_{v2.1, v2.2}.csv + label_file -> ava_action_list_{v2.1, v2.2}.pbtxt / + ava_action_list_{v2.1, v2.2}_for_activitynet_2019.pbtxt + proposal_file -> ava_dense_proposals_{train, val}.FAIR.recall_93.9.pkl + + Particularly, the proposal_file is a pickle file which contains + ``img_key`` (in format of ``{video_id},{timestamp}``). Example of a pickle + file: + + .. code-block:: JSON + + { + ... + '0f39OWEqJ24,0902': + array([[0.011 , 0.157 , 0.655 , 0.983 , 0.998163]]), + '0f39OWEqJ24,0912': + array([[0.054 , 0.088 , 0.91 , 0.998 , 0.068273], + [0.016 , 0.161 , 0.519 , 0.974 , 0.984025], + [0.493 , 0.283 , 0.981 , 0.984 , 0.983621]]), + ... + } + + Args: + ann_file (str): Path to the annotation file like + ``ava_{train, val}_{v2.1, v2.2}.csv``. + exclude_file (str): Path to the excluded timestamp file like + ``ava_{train, val}_excluded_timestamps_{v2.1, v2.2}.csv``. + pipeline (List[Union[dict, ConfigDict, Callable]]): A sequence of + data transforms. + label_file (str): Path to the label file like + ``ava_action_list_{v2.1, v2.2}.pbtxt`` or + ``ava_action_list_{v2.1, v2.2}_for_activitynet_2019.pbtxt``. + Defaults to None. + filename_tmpl (str): Template for each filename. + Defaults to 'img_{:05}.jpg'. + start_index (int): Specify a start index for frames in consideration of + different filename format. However, when taking frames as input, + it should be set to 0, since frames from 0. Defaults to 0. + proposal_file (str): Path to the proposal file like + ``ava_dense_proposals_{train, val}.FAIR.recall_93.9.pkl``. + Defaults to None. + person_det_score_thr (float): The threshold of person detection scores, + bboxes with scores above the threshold will be used. + Note that 0 <= person_det_score_thr <= 1. If no proposal has + detection score larger than the threshold, the one with the largest + detection score will be used. Default: 0.9. + num_classes (int): The number of classes of the dataset. Default: 81. + (AVA has 80 action classes, another 1-dim is added for potential + usage) + custom_classes (List[int], optional): A subset of class ids from origin + dataset. Please note that 0 should NOT be selected, and + ``num_classes`` should be equal to ``len(custom_classes) + 1``. + data_prefix (dict or ConfigDict): Path to a directory where video + frames are held. Defaults to ``dict(img='')``. + test_mode (bool): Store True when building test or validation dataset. + Defaults to False. + modality (str): Modality of data. Support ``RGB``, ``Flow``. + Defaults to ``RGB``. + num_max_proposals (int): Max proposals number to store. + Defaults to 1000. + timestamp_start (int): The start point of included timestamps. The + default value is referred from the official website. + Defaults to 902. + timestamp_end (int): The end point of included timestamps. The default + value is referred from the official website. Defaults to 1798. + fps (int): Overrides the default FPS for the dataset. Defaults to 30. + """ + + def __init__(self, + ann_file: str, + exclude_file: str, + pipeline: List[Union[ConfigType, Callable]], + label_file: str, + filename_tmpl: str = 'img_{:05}.jpg', + start_index: int = 0, + proposal_file: str = None, + person_det_score_thr: float = 0.9, + num_classes: int = 81, + custom_classes: Optional[List[int]] = None, + data_prefix: ConfigType = dict(img=''), + modality: str = 'RGB', + test_mode: bool = False, + num_max_proposals: int = 1000, + timestamp_start: int = 900, + timestamp_end: int = 1800, + fps: int = 30, + **kwargs) -> None: + self._FPS = fps # Keep this as standard + self.custom_classes = custom_classes + if custom_classes is not None: + assert num_classes == len(custom_classes) + 1 + assert 0 not in custom_classes + _, class_whitelist = read_labelmap(open(label_file)) + assert set(custom_classes).issubset(class_whitelist) + + self.custom_classes = list([0] + custom_classes) + self.exclude_file = exclude_file + self.label_file = label_file + self.proposal_file = proposal_file + assert 0 <= person_det_score_thr <= 1, ( + 'The value of ' + 'person_det_score_thr should in [0, 1]. ') + self.person_det_score_thr = person_det_score_thr + self.timestamp_start = timestamp_start + self.timestamp_end = timestamp_end + self.num_max_proposals = num_max_proposals + self.filename_tmpl = filename_tmpl + + super().__init__( + ann_file, + pipeline=pipeline, + data_prefix=data_prefix, + test_mode=test_mode, + num_classes=num_classes, + start_index=start_index, + modality=modality, + **kwargs) + + if self.proposal_file is not None: + self.proposals = load(self.proposal_file) + else: + self.proposals = None + + def parse_img_record(self, img_records: List[dict]) -> tuple: + """Merge image records of the same entity at the same time. + + Args: + img_records (List[dict]): List of img_records (lines in AVA + annotations). + + Returns: + Tuple(list): A tuple consists of lists of bboxes, action labels and + entity_ids. + """ + bboxes, labels, entity_ids = [], [], [] + while len(img_records) > 0: + img_record = img_records[0] + num_img_records = len(img_records) + + selected_records = [ + x for x in img_records + if np.array_equal(x['entity_box'], img_record['entity_box']) + ] + + num_selected_records = len(selected_records) + img_records = [ + x for x in img_records if + not np.array_equal(x['entity_box'], img_record['entity_box']) + ] + + assert len(img_records) + num_selected_records == num_img_records + + bboxes.append(img_record['entity_box']) + valid_labels = np.array([ + selected_record['label'] + for selected_record in selected_records + ]) + + # The format can be directly used by BCELossWithLogits + label = np.zeros(self.num_classes, dtype=np.float32) + label[valid_labels] = 1. + + labels.append(label) + entity_ids.append(img_record['entity_id']) + + bboxes = np.stack(bboxes) + labels = np.stack(labels) + entity_ids = np.stack(entity_ids) + return bboxes, labels, entity_ids + + def filter_data(self) -> List[dict]: + """Filter out records in the exclude_file.""" + valid_indexes = [] + if self.exclude_file is None: + valid_indexes = list(range(len(self.data_list))) + else: + exclude_video_infos = [ + x.strip().split(',') for x in open(self.exclude_file) + ] + for i, data_info in enumerate(self.data_list): + valid_indexes.append(i) + for video_id, timestamp in exclude_video_infos: + if (data_info['video_id'] == video_id + and data_info['timestamp'] == int(timestamp)): + valid_indexes.pop() + break + + logger = MMLogger.get_current_instance() + logger.info(f'{len(valid_indexes)} out of {len(self.data_list)}' + f' frames are valid.') + data_list = [self.data_list[i] for i in valid_indexes] + + return data_list + + def get_timestamp(self, video_id): + if len(video_id) == 11: + return self.timestamp_start, self.timestamp_end + video_id = video_id.split('_') + if len(video_id) >= 3: + start = int(video_id[-2]) + end = int(video_id[-1]) + video_id = '_'.join(video_id[:-2]) + return start, end + return self.timestamp_start, self.timestamp_end + + def load_data_list(self) -> List[dict]: + """Load AVA annotations.""" + check_file_exist(self.ann_file) + data_list = [] + records_dict_by_img = defaultdict(list) + with open(self.ann_file, 'r') as fin: + for line in fin: + line_split = line.strip().split(',') + + label = int(line_split[6]) + if self.custom_classes is not None: + if label not in self.custom_classes: + continue + label = self.custom_classes.index(label) + + video_id = line_split[0] + timestamp = int(line_split[1]) + img_key = f'{video_id},{timestamp:04d}' + + entity_box = np.array(list(map(float, line_split[2:6]))) + entity_id = int(line_split[7]) + start, end = self.get_timestamp(video_id) + shot_info = (1, (end - start) * self._FPS + 1) + + video_info = dict( + video_id=video_id, + timestamp=timestamp, + entity_box=entity_box, + label=label, + entity_id=entity_id, + shot_info=shot_info) + records_dict_by_img[img_key].append(video_info) + + for img_key in records_dict_by_img: + video_id, timestamp = img_key.split(',') + start, end = self.get_timestamp(video_id) + bboxes, labels, entity_ids = self.parse_img_record( + records_dict_by_img[img_key]) + ann = dict( + gt_bboxes=bboxes, gt_labels=labels, entity_ids=entity_ids) + frame_dir = video_id + if self.data_prefix['img'] is not None: + frame_dir = osp.join(self.data_prefix['img'], frame_dir) + video_info = dict( + frame_dir=frame_dir, + video_id=video_id, + timestamp=int(timestamp), + timestamp_start=start, + timestamp_end=end, + img_key=img_key, + shot_info=shot_info, + fps=self._FPS, + ann=ann) + data_list.append(video_info) + + return data_list + + def get_data_info(self, idx: int) -> dict: + """Get annotation by index.""" + data_info = super().get_data_info(idx) + img_key = data_info['img_key'] + data_info['filename_tmpl'] = self.filename_tmpl + if 'timestamp_start' not in data_info: + data_info['timestamp_start'] = self.timestamp_start + data_info['timestamp_end'] = self.timestamp_end + + if self.proposals is not None: + if len(img_key) == 16: + proposal_key = img_key + else: + video_id, timestamp = img_key.split(',') + vid = '_'.join(video_id.split('_')[:-2]) + timestamp = int(timestamp) + proposal_key = f'{vid},{timestamp:04d}' + + if proposal_key not in self.proposals: + data_info['proposals'] = np.array([[0, 0, 1, 1]]) + data_info['scores'] = np.array([1]) + else: + proposals = self.proposals[proposal_key] + assert proposals.shape[-1] in [4, 5] + if proposals.shape[-1] == 5: + thr = min(self.person_det_score_thr, max(proposals[:, 4])) + positive_inds = (proposals[:, 4] >= thr) + proposals = proposals[positive_inds] + proposals = proposals[:self.num_max_proposals] + data_info['proposals'] = proposals[:, :4] + data_info['scores'] = proposals[:, 4] + else: + proposals = proposals[:self.num_max_proposals] + data_info['proposals'] = proposals + + ann = data_info.pop('ann') + data_info['gt_bboxes'] = ann['gt_bboxes'] + data_info['gt_labels'] = ann['gt_labels'] + data_info['entity_ids'] = ann['entity_ids'] + + return data_info diff --git a/mmaction/models/roi_heads/bbox_heads/bbox_head.py b/mmaction/models/roi_heads/bbox_heads/bbox_head.py index 57cc483058..3fad373cf2 100644 --- a/mmaction/models/roi_heads/bbox_heads/bbox_head.py +++ b/mmaction/models/roi_heads/bbox_heads/bbox_head.py @@ -64,6 +64,8 @@ class BBoxHeadAVA(nn.Module): Defaults to ``(3, 5)``. multilabel (bool): Whether used for a multilabel task. Defaults to True. + mlp_head (bool): Whether to use an MLP as the classification head. + Defaults to False, i.e., using a single linear head. """ def __init__( @@ -77,7 +79,8 @@ def __init__( dropout_ratio: float = 0, dropout_before_pool: bool = True, topk: Union[int, Tuple[int]] = (3, 5), - multilabel: bool = True) -> None: + multilabel: bool = True, + mlp_head: bool = False) -> None: super(BBoxHeadAVA, self).__init__() assert temporal_pool_type in ['max', 'avg'] assert spatial_pool_type in ['max', 'avg'] @@ -123,12 +126,19 @@ def __init__( if dropout_ratio > 0: self.dropout = nn.Dropout(dropout_ratio) - self.fc_cls = nn.Linear(in_channels, num_classes) + if mlp_head: + self.fc_cls = nn.Sequential( + nn.Linear(in_channels, in_channels), nn.ReLU(), + nn.Linear(in_channels, num_classes)) + else: + self.fc_cls = nn.Linear(in_channels, num_classes) def init_weights(self) -> None: """Initialize the classification head.""" - nn.init.normal_(self.fc_cls.weight, 0, 0.01) - nn.init.constant_(self.fc_cls.bias, 0) + for m in self.modules(): + if isinstance(m, nn.Linear): + nn.init.xavier_normal_(m.weight) + nn.init.constant_(m.bias, 0) def forward(self, x: Tensor) -> Tensor: """Computes the classification logits given ROI features.""" diff --git a/tests/datasets/test_ava_dataset.py b/tests/datasets/test_ava_dataset.py index a3b0591dd7..a75dd512b6 100644 --- a/tests/datasets/test_ava_dataset.py +++ b/tests/datasets/test_ava_dataset.py @@ -6,7 +6,7 @@ from mmengine.testing import assert_dict_has_keys from numpy.testing import assert_array_equal -from mmaction.datasets import AVADataset +from mmaction.datasets import AVADataset, AVAKineticsDataset from mmaction.utils import register_all_modules @@ -120,3 +120,115 @@ def test_ava_pipeline(self): assert result['start_index'] == 0 assert result['timestamp_start'] == 900 assert result['timestamp_end'] == 1800 + + +class TestAVAKineticsDataset: + + @classmethod + def setup_class(cls): + cls.data_prefix = osp.normpath( + osp.join(osp.dirname(__file__), './../data', 'ava_dataset')) + cls.label_file = osp.join(cls.data_prefix, 'action_list.txt') + cls.ann_file = osp.join(cls.data_prefix, 'ava_sample.csv') + cls.exclude_file = osp.join(cls.data_prefix, + 'ava_excluded_timestamps_sample.csv') + cls.proposal_file = osp.join(cls.data_prefix, + 'ava_proposals_sample.pkl') + cls.pipeline = [ + dict(dict(type='SampleAVAFrames', clip_len=32, frame_interval=2)) + ] + cls.proposal = mmengine.load(cls.proposal_file) + + def test_ava_kinetics_dataset(self): + register_all_modules() + ava_dataset = AVAKineticsDataset( + self.ann_file, + self.exclude_file, + self.pipeline, + self.label_file, + data_prefix={'img': self.data_prefix}, + proposal_file=self.proposal_file) + + # custom classes + ava_dataset = AVAKineticsDataset( + self.ann_file, + self.exclude_file, + self.pipeline, + label_file=self.label_file, + custom_classes=[17, 79], + num_classes=3, + data_prefix={'img': self.data_prefix}, + proposal_file=self.proposal_file) + # ava_infos = ava_dataset.video_infos + target_labels = np.array([1, 2]) + labels = np.zeros([3]) + labels[target_labels] = 1. + target_labels = labels[None, ...] + + ava_dataset = AVAKineticsDataset( + self.ann_file, + None, + self.pipeline, + self.label_file, + data_prefix={'img': self.data_prefix}, + proposal_file=self.proposal_file) + + ava_dataset = AVAKineticsDataset( + self.ann_file, + None, + self.pipeline, + self.label_file, + test_mode=True, + data_prefix={'img': self.data_prefix}, + proposal_file=self.proposal_file) + + del ava_dataset + + def test_ava_kinetics_pipeline(self): + register_all_modules() + target_keys = [ + 'frame_dir', 'video_id', 'timestamp', 'img_key', 'shot_info', + 'fps', 'filename_tmpl', 'modality', 'start_index', + 'timestamp_start', 'timestamp_end', 'proposals', 'scores', + 'frame_inds', 'clip_len', 'frame_interval', 'gt_labels', + 'gt_bboxes', 'entity_ids' + ] + + ava_dataset = AVAKineticsDataset( + self.ann_file, + self.exclude_file, + self.pipeline, + self.label_file, + data_prefix={'img': self.data_prefix}, + proposal_file=self.proposal_file) + result = ava_dataset[0] + assert assert_dict_has_keys(result, target_keys) + + assert result['filename_tmpl'] == 'img_{:05}.jpg' + assert result['modality'] == 'RGB' + assert result['start_index'] == 0 + assert result['timestamp_start'] == 900 + assert result['timestamp_end'] == 1800 + assert_array_equal(result['proposals'], + np.array([[0.011, 0.157, 0.655, 0.983]])) + assert_array_equal(result['scores'], np.array([0.998163])) + + assert result['clip_len'] == 32 + assert result['frame_interval'] == 2 + assert len(result['frame_inds']) == 32 + + ava_dataset = AVAKineticsDataset( + self.ann_file, + None, + self.pipeline, + self.label_file, + test_mode=True, + data_prefix={'img': self.data_prefix}, + proposal_file=self.proposal_file) + # Try to get a sample + result = ava_dataset[0] + assert result['filename_tmpl'] == 'img_{:05}.jpg' + assert result['modality'] == 'RGB' + assert result['start_index'] >= 0 + assert result['timestamp_start'] > 0 + assert result['timestamp_end'] > result['timestamp_start'] From 0774a07480136778fa6cbd8b584463015b3338e5 Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Mon, 5 Dec 2022 17:05:31 +0800 Subject: [PATCH 33/57] Update lint.yml (#2110) --- .github/workflows/lint.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index a306b42760..68b58a2b21 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -17,10 +17,6 @@ jobs: python-version: 3.7 - name: Install pre-commit hook run: | - # markdownlint requires ruby >= 2.7 - sudo apt-add-repository ppa:brightbox/ruby-ng -y - sudo apt-get update - sudo apt-get install -y ruby2.7 pip install pre-commit pre-commit install - name: Linting From 83dab1a44143af4632464df7d7ea78a670336e97 Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Wed, 7 Dec 2022 15:37:22 +0800 Subject: [PATCH 34/57] [ci] update CI maximum torch version to 1.13.0 (#2118) * [ci] update CI maximum torch version to 1.13.0 * [fix] fix lint --- .circleci/test.yml | 6 +++--- ..._k700-pre-r50_8xb8-16x4x1-10e-tricks_ava-kinetics-rgb.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.circleci/test.yml b/.circleci/test.yml index 7b9abc002e..20575636c0 100644 --- a/.circleci/test.yml +++ b/.circleci/test.yml @@ -70,7 +70,7 @@ jobs: pip install -r requirements.txt - when: condition: - equal: [ "1.12.1", << parameters.torch >> ] + equal: [ "1.13.0", << parameters.torch >> ] steps: - run: pip install timm - when: @@ -182,8 +182,8 @@ workflows: - lint - build_cpu: name: maximum_version_cpu - torch: 1.12.1 - torchvision: 0.13.1 + torch: 1.13.0 + torchvision: 0.14.0 python: 3.9.0 requires: - minimum_version_cpu diff --git a/configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-16x4x1-10e-tricks_ava-kinetics-rgb.py b/configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-16x4x1-10e-tricks_ava-kinetics-rgb.py index fa65298d8d..4d4a3dea6b 100644 --- a/configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-16x4x1-10e-tricks_ava-kinetics-rgb.py +++ b/configs/detection/ava_kinetics/slowonly_k700-pre-r50_8xb8-16x4x1-10e-tricks_ava-kinetics-rgb.py @@ -49,7 +49,7 @@ # The testing is w/o. any cropping / flipping val_pipeline = [ dict( - type='SampleAVAFrames', clip_len=16, frame_interval=4, test_mode=True), + type='SampleAVAFrames', clip_len=16, frame_interval=4, test_mode=True), dict(type='RawFrameDecode', **file_client_args), dict(type='Resize', scale=(-1, 256)), dict(type='FormatShape', input_format='NCTHW', collapse=True), From b26842b5007bee5e81786150b129045dde60371a Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Wed, 7 Dec 2022 18:16:45 +0800 Subject: [PATCH 35/57] [doc] adjust doc structure (#2088) --- demo/README.md | 575 ++------------- docs/en/index.rst | 14 +- docs/en/user_guides/3_inference.md | 187 +---- docs/en/user_guides/index.rst | 19 - docs/zh_cn/Makefile | 20 + docs/zh_cn/_static/css/readthedocs.css | 6 + docs/zh_cn/_static/images/logo.png | Bin 0 -> 31100 bytes docs/zh_cn/conf.py | 135 ++++ docs/zh_cn/get_started.md | 3 + docs/zh_cn/index.rst | 64 ++ docs/zh_cn/make.bat | 35 + docs/zh_cn/merge_docs.sh | 8 + docs/zh_cn/migration.md | 1 + docs/zh_cn/notes/changelog.md | 850 +++++++++++++++++++++++ docs/zh_cn/notes/contribution_guide.md | 63 ++ docs/zh_cn/notes/faq.md | 1 + docs/zh_cn/notes/projects.md | 1 + docs/zh_cn/stat.py | 174 +++++ docs/zh_cn/switch_language.md | 3 + docs/zh_cn/user_guides/1_config.md | 1 + docs/zh_cn/user_guides/2_data_prepare.md | 1 + docs/zh_cn/user_guides/3_inference.md | 1 + docs/zh_cn/user_guides/4_train_test.md | 1 + docs/zh_cn/user_guides/useful_tools.md | 1 + docs/zh_cn/user_guides/visualization.md | 1 + 25 files changed, 1452 insertions(+), 713 deletions(-) delete mode 100644 docs/en/user_guides/index.rst create mode 100644 docs/zh_cn/Makefile create mode 100644 docs/zh_cn/_static/css/readthedocs.css create mode 100644 docs/zh_cn/_static/images/logo.png create mode 100644 docs/zh_cn/conf.py create mode 100644 docs/zh_cn/get_started.md create mode 100644 docs/zh_cn/index.rst create mode 100644 docs/zh_cn/make.bat create mode 100644 docs/zh_cn/merge_docs.sh create mode 100644 docs/zh_cn/migration.md create mode 100644 docs/zh_cn/notes/changelog.md create mode 100644 docs/zh_cn/notes/contribution_guide.md create mode 100644 docs/zh_cn/notes/faq.md create mode 100644 docs/zh_cn/notes/projects.md create mode 100644 docs/zh_cn/stat.py create mode 100644 docs/zh_cn/switch_language.md create mode 100644 docs/zh_cn/user_guides/1_config.md create mode 100644 docs/zh_cn/user_guides/2_data_prepare.md create mode 100644 docs/zh_cn/user_guides/3_inference.md create mode 100644 docs/zh_cn/user_guides/4_train_test.md create mode 100644 docs/zh_cn/user_guides/useful_tools.md create mode 100644 docs/zh_cn/user_guides/visualization.md diff --git a/demo/README.md b/demo/README.md index b8a033cadf..220cd84eea 100644 --- a/demo/README.md +++ b/demo/README.md @@ -4,14 +4,8 @@ - [Modify configs through script arguments](#modify-config-through-script-arguments): Tricks to directly modify configs through script arguments. - [Video demo](#video-demo): A demo script to predict the recognition result using a single video. -- [SpatioTemporal Action Detection Video Demo](#spatiotemporal-action-detection-video-demo): A demo script to predict the SpatioTemporal Action Detection result using a single video. - [Video GradCAM Demo](#video-gradcam-demo): A demo script to visualize GradCAM results using a single video. -- [Webcam demo](#webcam-demo): A demo script to implement real-time action recognition from a web camera. -- [Long Video demo](#long-video-demo): a demo script to predict different labels using a single long video. -- [SpatioTemporal Action Detection Webcam Demo](#spatiotemporal-action-detection-webcam-demo): A demo script to implement real-time spatio-temporal action detection from a web camera. - [Skeleton-based Action Recognition Demo](#skeleton-based-action-recognition-demo): A demo script to predict the skeleton-based action recognition result using a single video. -- [Video Structuralize Demo](#video-structuralize-demo): A demo script to predict the skeleton-based and rgb-based action recognition and spatio-temporal action detection result using a single video. -- [Audio Demo](#audio-demo): A demo script to predict the recognition result using a single audio file. ## Modify configs through script arguments @@ -24,9 +18,9 @@ When running demos using our provided scripts, you may specify `--cfg-options` t - Update keys inside a list of configs. - Some config dicts are composed as a list in your config. For example, the training pipeline `data.train.pipeline` is normally a list + Some config dicts are composed as a list in your config. For example, the training pipeline `train_dataloader.dataset.pipeline` is normally a list e.g. `[dict(type='SampleFrames'), ...]`. If you want to change `'SampleFrames'` to `'DenseSampleFrames'` in the pipeline, - you may specify `--cfg-options data.train.pipeline.0.type=DenseSampleFrames`. + you may specify `--cfg-options train_dataloader.dataset.pipeline.0.type=DenseSampleFrames`. - Update values of list/tuples. @@ -36,36 +30,34 @@ When running demos using our provided scripts, you may specify `--cfg-options` t ## Video demo -We provide a demo script to predict the recognition result using a single video. In order to get predict results in range `[0, 1]`, make sure to set `model['test_cfg'] = dict(average_clips='prob')` in config file. +MMAction2 provides a demo script to predict the recognition result using a single video. In order to get predict results in range `[0, 1]`, make sure to set `model['test_cfg'] = dict(average_clips='prob')` in config file. ```shell -python demo/demo.py ${CONFIG_FILE} ${CHECKPOINT_FILE} ${VIDEO_FILE} {LABEL_FILE} [--use-frames] \ - [--device ${DEVICE_TYPE}] [--fps {FPS}] [--font-scale {FONT_SCALE}] [--font-color {FONT_COLOR}] \ - [--target-resolution ${TARGET_RESOLUTION}] [--resize-algorithm {RESIZE_ALGORITHM}] [--out-filename {OUT_FILE}] +python demo/demo.py ${CONFIG_FILE} ${CHECKPOINT_FILE} ${VIDEO_FILE} ${LABEL_FILE} \ + [--device ${DEVICE_TYPE}] [--fps ${FPS}] [--font-scale ${FONT_SCALE}] [--font-color ${FONT_COLOR}] \ + [--target-resolution ${TARGET_RESOLUTION}] [--out-filename ${OUT_FILE}] ``` Optional arguments: -- `--use-frames`: If specified, the demo will take rawframes as input. Otherwise, it will take a video as input. -- `DEVICE_TYPE`: Type of device to run the demo. Allowed values are cuda device like `cuda:0` or `cpu`. If not specified, it will be set to `cuda:0`. -- `FPS`: FPS value of the output video when using rawframes as input. If not specified, it will be set to 30. -- `FONT_SCALE`: Font scale of the label added in the video. If not specified, it will be 0.5. -- `FONT_COLOR`: Font color of the label added in the video. If not specified, it will be `white`. +- `DEVICE_TYPE`: Type of device to run the demo. Allowed values are cuda device like `'cuda:0'` or `'cpu'`. Defaults to `'cuda:0'`. +- `FPS`: FPS value of the output video. Defaults to 30. +- `FONT_SCALE`: Font scale of the label added in the video. Defaults to 0.5. +- `FONT_COLOR`: Font color of the label added in the video. Defaults to `'white'`. - `TARGET_RESOLUTION`: Resolution(desired_width, desired_height) for resizing the frames before output when using a video as input. If not specified, it will be None and the frames are resized by keeping the existing aspect ratio. -- `RESIZE_ALGORITHM`: Resize algorithm used for resizing. If not specified, it will be set to `bicubic`. - `OUT_FILE`: Path to the output file which can be a video format or gif format. If not specified, it will be set to `None` and does not generate the output file. Examples: Assume that you are located at `$MMACTION2` and have already downloaded the checkpoints to the directory `checkpoints/`, -or use checkpoint url from `configs/` to directly load corresponding checkpoint, which will be automatically saved in `$HOME/.cache/torch/checkpoints`. +or use checkpoint url from to directly load corresponding checkpoint, which will be automatically saved in `$HOME/.cache/torch/checkpoints`. 1. Recognize a video file as input by using a TSN model on cuda by default. ```shell # The demo.mp4 and label_map_k400.txt are both from Kinetics-400 - python demo/demo.py configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py \ - checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth \ + python demo/demo.py configs/recognition/tsn/tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb.py \ + checkpoints/tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb_20220818-2692d16c.pth \ demo/demo.mp4 tools/data/kinetics/label_map_k400.txt ``` @@ -73,132 +65,23 @@ or use checkpoint url from `configs/` to directly load corresponding checkpoint, ```shell # The demo.mp4 and label_map_k400.txt are both from Kinetics-400 - python demo/demo.py configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py \ - https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth \ + python demo/demo.py configs/recognition/tsn/tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb.py \ + https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb/tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb_20220818-2692d16c.pth \ demo/demo.mp4 tools/data/kinetics/label_map_k400.txt ``` -3. Recognize a list of rawframes as input by using a TSN model on cpu. - - ```shell - python demo/demo.py configs/recognition/tsn/tsn_r50_inference_1x1x3_100e_kinetics400_rgb.py \ - checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth \ - PATH_TO_FRAMES/ LABEL_FILE --use-frames --device cpu - ``` - -4. Recognize a video file as input by using a TSN model and then generate an mp4 file. +3. Recognize a video file as input by using a TSN model and then generate an mp4 file. ```shell # The demo.mp4 and label_map_k400.txt are both from Kinetics-400 - python demo/demo.py configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py \ - checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth \ + python demo/demo.py configs/recognition/tsn/tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb.py \ + checkpoints/tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb_20220818-2692d16c.pth \ demo/demo.mp4 tools/data/kinetics/label_map_k400.txt --out-filename demo/demo_out.mp4 ``` -5. Recognize a list of rawframes as input by using a TSN model and then generate a gif file. - - ```shell - python demo/demo.py configs/recognition/tsn/tsn_r50_inference_1x1x3_100e_kinetics400_rgb.py \ - checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth \ - PATH_TO_FRAMES/ LABEL_FILE --use-frames --out-filename demo/demo_out.gif - ``` - -6. Recognize a video file as input by using a TSN model, then generate an mp4 file with a given resolution and resize algorithm. - - ```shell - # The demo.mp4 and label_map_k400.txt are both from Kinetics-400 - python demo/demo.py configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py \ - checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth \ - demo/demo.mp4 tools/data/kinetics/label_map_k400.txt --target-resolution 340 256 --resize-algorithm bilinear \ - --out-filename demo/demo_out.mp4 - ``` - - ```shell - # The demo.mp4 and label_map_k400.txt are both from Kinetics-400 - # If either dimension is set to -1, the frames are resized by keeping the existing aspect ratio - # For --target-resolution 170 -1, original resolution (340, 256) -> target resolution (170, 128) - python demo/demo.py configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py \ - checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth \ - demo/demo.mp4 tools/data/kinetics/label_map_k400.txt --target-resolution 170 -1 --resize-algorithm bilinear \ - --out-filename demo/demo_out.mp4 - ``` - -7. Recognize a video file as input by using a TSN model, then generate an mp4 file with a label in a red color and fontscale 1. - - ```shell - # The demo.mp4 and label_map_k400.txt are both from Kinetics-400 - python demo/demo.py configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py \ - checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth \ - demo/demo.mp4 tools/data/kinetics/label_map_k400.txt --font-scale 1 --font-color red \ - --out-filename demo/demo_out.mp4 - ``` - -8. Recognize a list of rawframes as input by using a TSN model and then generate an mp4 file with 24 fps. - - ```shell - python demo/demo.py configs/recognition/tsn/tsn_r50_inference_1x1x3_100e_kinetics400_rgb.py \ - checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth \ - PATH_TO_FRAMES/ LABEL_FILE --use-frames --fps 24 --out-filename demo/demo_out.gif - ``` - -## SpatioTemporal Action Detection Video Demo - -We provide a demo script to predict the SpatioTemporal Action Detection result using a single video. - -```shell -python demo/demo_spatiotemporal_det.py --video ${VIDEO_FILE} \ - [--config ${SPATIOTEMPORAL_ACTION_DETECTION_CONFIG_FILE}] \ - [--checkpoint ${SPATIOTEMPORAL_ACTION_DETECTION_CHECKPOINT}] \ - [--det-config ${HUMAN_DETECTION_CONFIG_FILE}] \ - [--det-checkpoint ${HUMAN_DETECTION_CHECKPOINT}] \ - [--det-score-thr ${HUMAN_DETECTION_SCORE_THRESHOLD}] \ - [--action-score-thr ${ACTION_DETECTION_SCORE_THRESHOLD}] \ - [--label-map ${LABEL_MAP}] \ - [--device ${DEVICE}] \ - [--out-filename ${OUTPUT_FILENAME}] \ - [--predict-stepsize ${PREDICT_STEPSIZE}] \ - [--output-stepsize ${OUTPUT_STEPSIZE}] \ - [--output-fps ${OUTPUT_FPS}] -``` - -Optional arguments: - -- `SPATIOTEMPORAL_ACTION_DETECTION_CONFIG_FILE`: The spatiotemporal action detection config file path. -- `SPATIOTEMPORAL_ACTION_DETECTION_CHECKPOINT`: The spatiotemporal action detection checkpoint URL. -- `HUMAN_DETECTION_CONFIG_FILE`: The human detection config file path. -- `HUMAN_DETECTION_CHECKPOINT`: The human detection checkpoint URL. -- `HUMAN_DETECTION_SCORE_THRE`: The score threshold for human detection. Default: 0.9. -- `ACTION_DETECTION_SCORE_THRESHOLD`: The score threshold for action detection. Default: 0.5. -- `LABEL_MAP`: The label map used. Default: `tools/data/ava/label_map.txt`. -- `DEVICE`: Type of device to run the demo. Allowed values are cuda device like `cuda:0` or `cpu`. Default: `cuda:0`. -- `OUTPUT_FILENAME`: Path to the output file which is a video format. Default: `demo/stdet_demo.mp4`. -- `PREDICT_STEPSIZE`: Make a prediction per N frames. Default: 8. -- `OUTPUT_STEPSIZE`: Output 1 frame per N frames in the input video. Note that `PREDICT_STEPSIZE % OUTPUT_STEPSIZE == 0`. Default: 4. -- `OUTPUT_FPS`: The FPS of demo video output. Default: 6. - -Examples: - -Assume that you are located at `$MMACTION2` . - -1. Use the Faster RCNN as the human detector, SlowOnly-8x8-R101 as the action detector. Making predictions per 8 frames, and output 1 frame per 4 frames to the output video. The FPS of the output video is 4. - -```shell -python demo/demo_spatiotemporal_det.py --video demo/demo.mp4 \ - --config configs/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb.py \ - --checkpoint https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb_20201217-16378594.pth \ - --det-config demo/faster_rcnn_r50_fpn_2x_coco.py \ - --det-checkpoint http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth \ - --det-score-thr 0.9 \ - --action-score-thr 0.5 \ - --label-map tools/data/ava/label_map.txt \ - --predict-stepsize 8 \ - --output-stepsize 4 \ - --output-fps 6 -``` - ## Video GradCAM Demo -We provide a demo script to visualize GradCAM results using a single video. +MMAction2 provides a demo script to visualize GradCAM results using a single video. ```shell python demo/demo_gradcam.py ${CONFIG_FILE} ${CHECKPOINT_FILE} ${VIDEO_FILE} [--use-frames] \ @@ -236,219 +119,9 @@ or use checkpoint url from `configs/` to directly load corresponding checkpoint, demo/demo.mp4 --target-layer-name backbone/layer4/1/relu --out-filename demo/demo_gradcam_tsn.gif ``` -## Webcam demo - -We provide a demo script to implement real-time action recognition from web camera. In order to get predict results in range `[0, 1]`, make sure to set `model.['test_cfg'] = dict(average_clips='prob')` in config file. - -```shell -python demo/webcam_demo.py ${CONFIG_FILE} ${CHECKPOINT_FILE} ${LABEL_FILE} \ - [--device ${DEVICE_TYPE}] [--camera-id ${CAMERA_ID}] [--threshold ${THRESHOLD}] \ - [--average-size ${AVERAGE_SIZE}] [--drawing-fps ${DRAWING_FPS}] [--inference-fps ${INFERENCE_FPS}] -``` - -Optional arguments: - -- `DEVICE_TYPE`: Type of device to run the demo. Allowed values are cuda device like `cuda:0` or `cpu`. If not specified, it will be set to `cuda:0`. -- `CAMERA_ID`: ID of camera device If not specified, it will be set to 0. -- `THRESHOLD`: Threshold of prediction score for action recognition. Only label with score higher than the threshold will be shown. If not specified, it will be set to 0. -- `AVERAGE_SIZE`: Number of latest clips to be averaged for prediction. If not specified, it will be set to 1. -- `DRAWING_FPS`: Upper bound FPS value of the output drawing. If not specified, it will be set to 20. -- `INFERENCE_FPS`: Upper bound FPS value of the output drawing. If not specified, it will be set to 4. - -:::{note} -If your hardware is good enough, increasing the value of `DRAWING_FPS` and `INFERENCE_FPS` will get a better experience. -::: - -Examples: - -Assume that you are located at `$MMACTION2` and have already downloaded the checkpoints to the directory `checkpoints/`, -or use checkpoint url from `configs/` to directly load corresponding checkpoint, which will be automatically saved in `$HOME/.cache/torch/checkpoints`. - -1. Recognize the action from web camera as input by using a TSN model on cpu, averaging the score per 5 times - and outputting result labels with score higher than 0.2. - - ```shell - python demo/webcam_demo.py configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py \ - checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth tools/data/kinetics/label_map_k400.txt --average-size 5 \ - --threshold 0.2 --device cpu - ``` - -2. Recognize the action from web camera as input by using a TSN model on cpu, averaging the score per 5 times - and outputting result labels with score higher than 0.2, loading checkpoint from url. - - ```shell - python demo/webcam_demo.py configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py \ - https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth \ - tools/data/kinetics/label_map_k400.txt --average-size 5 --threshold 0.2 --device cpu - ``` - -3. Recognize the action from web camera as input by using a I3D model on gpu by default, averaging the score per 5 times - and outputting result labels with score higher than 0.2. - - ```shell - python demo/webcam_demo.py configs/recognition/i3d/i3d_r50_video_inference_32x2x1_100e_kinetics400_rgb.py \ - checkpoints/i3d_r50_32x2x1_100e_kinetics400_rgb_20200614-c25ef9a4.pth tools/data/kinetics/label_map_k400.txt \ - --average-size 5 --threshold 0.2 - ``` - -:::{note} -Considering the efficiency difference for users' hardware, Some modifications might be done to suit the case. -Users can change: - -1). `SampleFrames` step (especially the number of `clip_len` and `num_clips`) of `test_pipeline` in the config file, like `--cfg-options data.test.pipeline.0.num_clips=3`. -2). Change to the suitable Crop methods like `TenCrop`, `ThreeCrop`, `CenterCrop`, etc. in `test_pipeline` of the config file, like `--cfg-options data.test.pipeline.4.type=CenterCrop`. -3). Change the number of `--average-size`. The smaller, the faster. -::: - -## Long video demo - -We provide a demo script to predict different labels using a single long video. In order to get predict results in range `[0, 1]`, make sure to set `test_cfg = dict(average_clips='prob')` in config file. - -```shell -python demo/long_video_demo.py ${CONFIG_FILE} ${CHECKPOINT_FILE} ${VIDEO_FILE} ${LABEL_FILE} \ - ${OUT_FILE} [--input-step ${INPUT_STEP}] [--device ${DEVICE_TYPE}] [--threshold ${THRESHOLD}] -``` - -Optional arguments: - -- `OUT_FILE`: Path to the output, either video or json file -- `INPUT_STEP`: Input step for sampling frames, which can help to get more spare input. If not specified , it will be set to 1. -- `DEVICE_TYPE`: Type of device to run the demo. Allowed values are cuda device like `cuda:0` or `cpu`. If not specified, it will be set to `cuda:0`. -- `THRESHOLD`: Threshold of prediction score for action recognition. Only label with score higher than the threshold will be shown. If not specified, it will be set to 0.01. -- `STRIDE`: By default, the demo generates a prediction for each single frame, which might cost lots of time. To speed up, you can set the argument `STRIDE` and then the demo will generate a prediction every `STRIDE x sample_length` frames (`sample_length` indicates the size of temporal window from which you sample frames, which equals to `clip_len x frame_interval`). For example, if the sample_length is 64 frames and you set `STRIDE` to 0.5, predictions will be generated every 32 frames. If set as 0, predictions will be generated for each frame. The desired value of `STRIDE` is (0, 1\], while it also works for `STRIDE > 1` (the generated predictions will be too sparse). Default: 0. -- `LABEL_COLOR`: Font Color of the labels in (B, G, R). Default is white, that is (256, 256, 256). -- `MSG_COLOR`: Font Color of the messages in (B, G, R). Default is gray, that is (128, 128, 128). - -Examples: - -Assume that you are located at `$MMACTION2` and have already downloaded the checkpoints to the directory `checkpoints/`, -or use checkpoint url from `configs/` to directly load corresponding checkpoint, which will be automatically saved in `$HOME/.cache/torch/checkpoints`. - -1. Predict different labels in a long video by using a TSN model on cpu, with 3 frames for input steps (that is, random sample one from each 3 frames) - and outputting result labels with score higher than 0.2. - - ```shell - python demo/long_video_demo.py configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py \ - checkpoints/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth PATH_TO_LONG_VIDEO tools/data/kinetics/label_map_k400.txt PATH_TO_SAVED_VIDEO \ - --input-step 3 --device cpu --threshold 0.2 - ``` - -2. Predict different labels in a long video by using a TSN model on cpu, with 3 frames for input steps (that is, random sample one from each 3 frames) - and outputting result labels with score higher than 0.2, loading checkpoint from url. - - ```shell - python demo/long_video_demo.py configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py \ - https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth \ - PATH_TO_LONG_VIDEO tools/data/kinetics/label_map_k400.txt PATH_TO_SAVED_VIDEO --input-step 3 --device cpu --threshold 0.2 - ``` - -3. Predict different labels in a long video from web by using a TSN model on cpu, with 3 frames for input steps (that is, random sample one from each 3 frames) - and outputting result labels with score higher than 0.2, loading checkpoint from url. - - ```shell - python demo/long_video_demo.py configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py \ - https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth \ - https://www.learningcontainer.com/wp-content/uploads/2020/05/sample-mp4-file.mp4 \ - tools/data/kinetics/label_map_k400.txt PATH_TO_SAVED_VIDEO --input-step 3 --device cpu --threshold 0.2 - ``` - -4. Predict different labels in a long video by using a I3D model on gpu, with input_step=1, threshold=0.01 as default and print the labels in cyan. - - ```shell - python demo/long_video_demo.py configs/recognition/i3d/i3d_r50_video_inference_32x2x1_100e_kinetics400_rgb.py \ - checkpoints/i3d_r50_256p_32x2x1_100e_kinetics400_rgb_20200801-7d9f44de.pth PATH_TO_LONG_VIDEO tools/data/kinetics/label_map_k400.txt PATH_TO_SAVED_VIDEO \ - --label-color 255 255 0 - ``` - -5. Predict different labels in a long video by using a I3D model on gpu and save the results as a `json` file - - ```shell - python demo/long_video_demo.py configs/recognition/i3d/i3d_r50_video_inference_32x2x1_100e_kinetics400_rgb.py \ - checkpoints/i3d_r50_256p_32x2x1_100e_kinetics400_rgb_20200801-7d9f44de.pth PATH_TO_LONG_VIDEO tools/data/kinetics/label_map_k400.txt ./results.json - ``` - -## SpatioTemporal Action Detection Webcam Demo - -We provide a demo script to implement real-time spatio-temporal action detection from a web camera. - -```shell -python demo/webcam_demo_spatiotemporal_det.py \ - [--config ${SPATIOTEMPORAL_ACTION_DETECTION_CONFIG_FILE}] \ - [--checkpoint ${SPATIOTEMPORAL_ACTION_DETECTION_CHECKPOINT}] \ - [--action-score-thr ${ACTION_DETECTION_SCORE_THRESHOLD}] \ - [--det-config ${HUMAN_DETECTION_CONFIG_FILE}] \ - [--det-checkpoint ${HUMAN_DETECTION_CHECKPOINT}] \ - [--det-score-thr ${HUMAN_DETECTION_SCORE_THRESHOLD}] \ - [--input-video] ${INPUT_VIDEO} \ - [--label-map ${LABEL_MAP}] \ - [--device ${DEVICE}] \ - [--output-fps ${OUTPUT_FPS}] \ - [--out-filename ${OUTPUT_FILENAME}] \ - [--show] \ - [--display-height] ${DISPLAY_HEIGHT} \ - [--display-width] ${DISPLAY_WIDTH} \ - [--predict-stepsize ${PREDICT_STEPSIZE}] \ - [--clip-vis-length] ${CLIP_VIS_LENGTH} -``` - -Optional arguments: - -- `SPATIOTEMPORAL_ACTION_DETECTION_CONFIG_FILE`: The spatiotemporal action detection config file path. -- `SPATIOTEMPORAL_ACTION_DETECTION_CHECKPOINT`: The spatiotemporal action detection checkpoint path or URL. -- `ACTION_DETECTION_SCORE_THRESHOLD`: The score threshold for action detection. Default: 0.4. -- `HUMAN_DETECTION_CONFIG_FILE`: The human detection config file path. -- `HUMAN_DETECTION_CHECKPOINT`: The human detection checkpoint URL. -- `HUMAN_DETECTION_SCORE_THRE`: The score threshold for human detection. Default: 0.9. -- `INPUT_VIDEO`: The webcam id or video path of the source. Default: `0`. -- `LABEL_MAP`: The label map used. Default: `tools/data/ava/label_map.txt`. -- `DEVICE`: Type of device to run the demo. Allowed values are cuda device like `cuda:0` or `cpu`. Default: `cuda:0`. -- `OUTPUT_FPS`: The FPS of demo video output. Default: 15. -- `OUTPUT_FILENAME`: Path to the output file which is a video format. Default: None. -- `--show`: Whether to show predictions with `cv2.imshow`. -- `DISPLAY_HEIGHT`: The height of the display frame. Default: 0. -- `DISPLAY_WIDTH`: The width of the display frame. Default: 0. If `DISPLAY_HEIGHT <= 0 and DISPLAY_WIDTH <= 0`, the display frame and input video share the same shape. -- `PREDICT_STEPSIZE`: Make a prediction per N frames. Default: 8. -- `CLIP_VIS_LENGTH`: The number of the draw frames for each clip. In other words, for each clip, there are at most `CLIP_VIS_LENGTH` frames to be draw around the keyframe. DEFAULT: 8. - -Tips to get a better experience for webcam demo: - -- How to choose `--output-fps`? - - - `--output-fps` should be almost equal to read thread fps. - - Read thread fps is printed by logger in format `DEBUG:__main__:Read Thread: {duration} ms, {fps} fps` - -- How to choose `--predict-stepsize`? - - - It's related to how to choose human detector and spatio-temporval model. - - Overall, the duration of read thread for each task should be greater equal to that of model inference. - - The durations for read/inference are both printed by logger. - - Larger `--predict-stepsize` leads to larger duration for read thread. - - In order to fully take the advantage of computation resources, decrease the value of `--predict-stepsize`. - -Examples: - -Assume that you are located at `$MMACTION2` . - -1. Use the Faster RCNN as the human detector, SlowOnly-8x8-R101 as the action detector. Making predictions per 40 frames, and FPS of the output is 20. Show predictions with `cv2.imshow`. - -```shell -python demo/webcam_demo_spatiotemporal_det.py \ - --input-video 0 \ - --config configs/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb.py \ - --checkpoint https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb_20201217-16378594.pth \ - --det-config demo/faster_rcnn_r50_fpn_2x_coco.py \ - --det-checkpoint http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth \ - --det-score-thr 0.9 \ - --action-score-thr 0.5 \ - --label-map tools/data/ava/label_map.txt \ - --predict-stepsize 40 \ - --output-fps 20 \ - --show -``` - ## Skeleton-based Action Recognition Demo -We provide a demo script to predict the skeleton-based action recognition result using a single video. +MMAction2 provides an demo script to predict the skeleton-based action recognition result using a single video. ```shell python demo/demo_skeleton.py ${VIDEO_FILE} ${OUT_FILENAME} \ @@ -457,6 +130,7 @@ python demo/demo_skeleton.py ${VIDEO_FILE} ${OUT_FILENAME} \ [--det-config ${HUMAN_DETECTION_CONFIG_FILE}] \ [--det-checkpoint ${HUMAN_DETECTION_CHECKPOINT}] \ [--det-score-thr ${HUMAN_DETECTION_SCORE_THRESHOLD}] \ + [--det-cat-id ${HUMAN_DETECTION_CATEGORY_ID}] \ [--pose-config ${HUMAN_POSE_ESTIMATION_CONFIG_FILE}] \ [--pose-checkpoint ${HUMAN_POSE_ESTIMATION_CHECKPOINT}] \ [--label-map ${LABEL_MAP}] \ @@ -467,208 +141,47 @@ python demo/demo_skeleton.py ${VIDEO_FILE} ${OUT_FILENAME} \ Optional arguments: - `SKELETON_BASED_ACTION_RECOGNITION_CONFIG_FILE`: The skeleton-based action recognition config file path. -- `SKELETON_BASED_ACTION_RECOGNITION_CHECKPOINT`: The skeleton-based action recognition checkpoint path or URL. +- `SKELETON_BASED_ACTION_RECOGNITION_CHECKPOINT`: The skeleton-based action recognition checkpoint path or url. - `HUMAN_DETECTION_CONFIG_FILE`: The human detection config file path. -- `HUMAN_DETECTION_CHECKPOINT`: The human detection checkpoint URL. -- `HUMAN_DETECTION_SCORE_THRE`: The score threshold for human detection. Default: 0.9. +- `HUMAN_DETECTION_CHECKPOINT`: The human detection checkpoint path or url. +- `HUMAN_DETECTION_SCORE_THRE`: The score threshold for human detection. Defaults to 0.9. +- `HUMAN_DETECTION_CATEGORY_ID`: The category id for human detection. Defaults to 0. - `HUMAN_POSE_ESTIMATION_CONFIG_FILE`: The human pose estimation config file path (trained on COCO-Keypoint). -- `HUMAN_POSE_ESTIMATION_CHECKPOINT`: The human pose estimation checkpoint URL (trained on COCO-Keypoint). -- `LABEL_MAP`: The label map used. Default: `tools/data/ava/label_map.txt`. -- `DEVICE`: Type of device to run the demo. Allowed values are cuda device like `cuda:0` or `cpu`. Default: `cuda:0`. -- `SHORT_SIDE`: The short side used for frame extraction. Default: 480. +- `HUMAN_POSE_ESTIMATION_CHECKPOINT`: The human pose estimation checkpoint path or url (trained on COCO-Keypoint). +- `LABEL_MAP`: The label map used. Defaults to `'tools/data/skeleton/label_map_ntu60.txt'`. +- `DEVICE`: Type of device to run the demo. Allowed values are cuda device like `'cuda:0'` or `'cpu'`. Defaults to `'cuda:0'`. +- `SHORT_SIDE`: The short side used for frame extraction. Defaults to 480. Examples: Assume that you are located at `$MMACTION2` . -1. Use the Faster RCNN as the human detector, HRNetw32 as the pose estimator, PoseC3D-NTURGB+D-120-Xsub-keypoint as the skeleton-based action recognizer. +1. Use the Faster-RCNN as the human detector, HRNetw32 as the pose estimator, PoseC3D-NTURGB+D-60-XSub-Keypoint as the skeleton-based action recognizer. ```shell -python demo/demo_skeleton.py demo/ntu_sample.avi demo/skeleton_demo.mp4 \ - --config configs/skeleton/posec3d/slowonly_r50_u48_240e_ntu120_xsub_keypoint.py \ - --checkpoint https://download.openmmlab.com/mmaction/skeleton/posec3d/slowonly_r50_u48_240e_ntu120_xsub_keypoint/slowonly_r50_u48_240e_ntu120_xsub_keypoint-6736b03f.pth \ - --det-config demo/faster_rcnn_r50_fpn_2x_coco.py \ +python demo/demo_skeleton.py demo/demo_skeleton.mp4 demo/demo_skeleton_out.mp4 \ + --config configs/skeleton/posec3d/slowonly_r50_8xb16-u48-240e_ntu60-xsub-keypoint.py \ + --checkpoint https://download.openmmlab.com/mmaction/skeleton/posec3d/slowonly_r50_u48_240e_ntu60_xsub_keypoint/slowonly_r50_u48_240e_ntu60_xsub_keypoint-f3adabf1.pth \ + --det-config demo/demo_configs/faster-rcnn_r50_fpn_2x_coco_infer.py \ --det-checkpoint http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth \ --det-score-thr 0.9 \ - --pose-config demo/hrnet_w32_coco_256x192.py \ + --det-cat-id 0 \ + --pose-config demo/demo_configs/td-hm_hrnet-w32_8xb64-210e_coco-256x192_infer.py \ --pose-checkpoint https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth \ - --label-map tools/data/skeleton/label_map_ntu120.txt + --label-map tools/data/skeleton/label_map_ntu60.txt ``` -2. Use the Faster RCNN as the human detector, HRNetw32 as the pose estimator, STGCN-NTURGB+D-60-Xsub-keypoint as the skeleton-based action recognizer. +2. Use the Faster-RCNN as the human detector, HRNetw32 as the pose estimator, STGCN-NTURGB+D-60-XSub-Keypoint as the skeleton-based action recognizer. ```shell -python demo/demo_skeleton.py demo/ntu_sample.avi demo/skeleton_demo.mp4 \ - --config configs/skeleton/stgcn/stgcn_80e_ntu60_xsub_keypoint.py \ +python demo/demo_skeleton.py demo/demo_skeleton.mp4 demo/demo_skeleton_out.mp4 \ + --config configs/skeleton/stgcn/stgcn_1xb16-80e_ntu60-xsub-keypoint.py \ --checkpoint https://download.openmmlab.com/mmaction/skeleton/stgcn/stgcn_80e_ntu60_xsub_keypoint/stgcn_80e_ntu60_xsub_keypoint-e7bb9653.pth \ - --det-config demo/faster_rcnn_r50_fpn_2x_coco.py \ + --det-config demo/demo_configs/faster-rcnn_r50_fpn_2x_coco_infer.py \ --det-checkpoint http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth \ --det-score-thr 0.9 \ - --pose-config demo/hrnet_w32_coco_256x192.py \ + --det-cat-id 0 \ + --pose-config demo/demo_configs/td-hm_hrnet-w32_8xb64-210e_coco-256x192_infer.py \ --pose-checkpoint https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth \ - --label-map tools/data/skeleton/label_map_ntu120.txt + --label-map tools/data/skeleton/label_map_ntu60.txt ``` - -## Video Structuralize Demo - -We provide a demo script to to predict the skeleton-based and rgb-based action recognition and spatio-temporal action detection result using a single video. - -```shell -python demo/demo_video_structuralize.py - [--rgb-stdet-config ${RGB_BASED_SPATIO_TEMPORAL_ACTION_DETECTION_CONFIG_FILE}] \ - [--rgb-stdet-checkpoint ${RGB_BASED_SPATIO_TEMPORAL_ACTION_DETECTION_CHECKPOINT}] \ - [--skeleton-stdet-checkpoint ${SKELETON_BASED_SPATIO_TEMPORAL_ACTION_DETECTION_CHECKPOINT}] \ - [--det-config ${HUMAN_DETECTION_CONFIG_FILE}] \ - [--det-checkpoint ${HUMAN_DETECTION_CHECKPOINT}] \ - [--pose-config ${HUMAN_POSE_ESTIMATION_CONFIG_FILE}] \ - [--pose-checkpoint ${HUMAN_POSE_ESTIMATION_CHECKPOINT}] \ - [--skeleton-config ${SKELETON_BASED_ACTION_RECOGNITION_CONFIG_FILE}] \ - [--skeleton-checkpoint ${SKELETON_BASED_ACTION_RECOGNITION_CHECKPOINT}] \ - [--rgb-config ${RGB_BASED_ACTION_RECOGNITION_CONFIG_FILE}] \ - [--rgb-checkpoint ${RGB_BASED_ACTION_RECOGNITION_CHECKPOINT}] \ - [--use-skeleton-stdet ${USE_SKELETON_BASED_SPATIO_TEMPORAL_DETECTION_METHOD}] \ - [--use-skeleton-recog ${USE_SKELETON_BASED_ACTION_RECOGNITION_METHOD}] \ - [--det-score-thr ${HUMAN_DETECTION_SCORE_THRE}] \ - [--action-score-thr ${ACTION_DETECTION_SCORE_THRE}] \ - [--video ${VIDEO_FILE}] \ - [--label-map-stdet ${LABEL_MAP_FOR_SPATIO_TEMPORAL_ACTION_DETECTION}] \ - [--device ${DEVICE}] \ - [--out-filename ${OUTPUT_FILENAME}] \ - [--predict-stepsize ${PREDICT_STEPSIZE}] \ - [--output-stepsize ${OUTPU_STEPSIZE}] \ - [--output-fps ${OUTPUT_FPS}] \ - [--cfg-options] -``` - -Optional arguments: - -- `RGB_BASED_SPATIO_TEMPORAL_ACTION_DETECTION_CONFIG_FILE`: The rgb-based spatio temoral action detection config file path. -- `RGB_BASED_SPATIO_TEMPORAL_ACTION_DETECTION_CHECKPOINT`: The rgb-based spatio temoral action detection checkpoint path or URL. -- `SKELETON_BASED_SPATIO_TEMPORAL_ACTION_DETECTION_CHECKPOINT`: The skeleton-based spatio temoral action detection checkpoint path or URL. -- `HUMAN_DETECTION_CONFIG_FILE`: The human detection config file path. -- `HUMAN_DETECTION_CHECKPOINT`: The human detection checkpoint URL. -- `HUMAN_POSE_ESTIMATION_CONFIG_FILE`: The human pose estimation config file path (trained on COCO-Keypoint). -- `HUMAN_POSE_ESTIMATION_CHECKPOINT`: The human pose estimation checkpoint URL (trained on COCO-Keypoint). -- `SKELETON_BASED_ACTION_RECOGNITION_CONFIG_FILE`: The skeleton-based action recognition config file path. -- `SKELETON_BASED_ACTION_RECOGNITION_CHECKPOINT`: The skeleton-based action recognition checkpoint path or URL. -- `RGB_BASED_ACTION_RECOGNITION_CONFIG_FILE`: The rgb-based action recognition config file path. -- `RGB_BASED_ACTION_RECOGNITION_CHECKPOINT`: The rgb-based action recognition checkpoint path or URL. -- `USE_SKELETON_BASED_SPATIO_TEMPORAL_DETECTION_METHOD`: Use skeleton-based spatio temporal action detection method. -- `USE_SKELETON_BASED_ACTION_RECOGNITION_METHOD`: Use skeleton-based action recognition method. -- `HUMAN_DETECTION_SCORE_THRE`: The score threshold for human detection. Default: 0.9. -- `ACTION_DETECTION_SCORE_THRE`: The score threshold for action detection. Default: 0.4. -- `LABEL_MAP_FOR_SPATIO_TEMPORAL_ACTION_DETECTION`: The label map for spatio temporal action detection used. Default: `tools/data/ava/label_map.txt`. -- `LABEL_MAP`: The label map for action recognition. Default: `tools/data/kinetics/label_map_k400.txt`. -- `DEVICE`: Type of device to run the demo. Allowed values are cuda device like `cuda:0` or `cpu`. Default: `cuda:0`. -- `OUTPUT_FILENAME`: Path to the output file which is a video format. Default: `demo/test_stdet_recognition_output.mp4`. -- `PREDICT_STEPSIZE`: Make a prediction per N frames. Default: 8. -- `OUTPUT_STEPSIZE`: Output 1 frame per N frames in the input video. Note that `PREDICT_STEPSIZE % OUTPUT_STEPSIZE == 0`. Default: 1. -- `OUTPUT_FPS`: The FPS of demo video output. Default: 24. - -Examples: - -Assume that you are located at `$MMACTION2` . - -1. Use the Faster RCNN as the human detector, HRNetw32 as the pose estimator, PoseC3D as the skeleton-based action recognizer and the skeleton-based spatio temporal action detector. Making action detection predictions per 8 frames, and output 1 frame per 1 frame to the output video. The FPS of the output video is 24. - -```shell -python demo/demo_video_structuralize.py - --skeleton-stdet-checkpoint https://download.openmmlab.com/mmaction/skeleton/posec3d/posec3d_ava.pth \ - --det-config demo/faster_rcnn_r50_fpn_2x_coco.py \ - --det-checkpoint http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth \ - --pose-config demo/hrnet_w32_coco_256x192.py - --pose-checkpoint https://download.openmmlab.com/mmpose/top_down/hrnet/ - hrnet_w32_coco_256x192-c78dce93_20200708.pth \ - --skeleton-config configs/skeleton/posec3d/slowonly_r50_u48_240e_ntu120_xsub_keypoint.py \ - --skeleton-checkpoint https://download.openmmlab.com/mmaction/skeleton/posec3d/ - posec3d_k400.pth \ - --use-skeleton-stdet \ - --use-skeleton-recog \ - --label-map-stdet tools/data/ava/label_map.txt \ - --label-map tools/data/kinetics/label_map_k400.txt -``` - -2. Use the Faster RCNN as the human detector, TSN-R50-1x1x3 as the rgb-based action recognizer, SlowOnly-8x8-R101 as the rgb-based spatio temporal action detector. Making action detection predictions per 8 frames, and output 1 frame per 1 frame to the output video. The FPS of the output video is 24. - -```shell -python demo/demo_video_structuralize.py - --rgb-stdet-config configs/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb.py \ - --rgb-stdet-checkpoint https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb_20201217-16378594.pth \ - --det-config demo/faster_rcnn_r50_fpn_2x_coco.py \ - --det-checkpoint http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth \ - --rgb-config configs/recognition/tsn/ - tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py \ - --rgb-checkpoint https://download.openmmlab.com/mmaction/recognition/ - tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/ - tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth \ - --label-map-stdet tools/data/ava/label_map.txt \ - --label-map tools/data/kinetics/label_map_k400.txt -``` - -3. Use the Faster RCNN as the human detector, HRNetw32 as the pose estimator, PoseC3D as the skeleton-based action recognizer, SlowOnly-8x8-R101 as the rgb-based spatio temporal action detector. Making action detection predictions per 8 frames, and output 1 frame per 1 frame to the output video. The FPS of the output video is 24. - -```shell -python demo/demo_video_structuralize.py - --rgb-stdet-config configs/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb.py \ - --rgb-stdet-checkpoint https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb_20201217-16378594.pth \ - --det-config demo/faster_rcnn_r50_fpn_2x_coco.py \ - --det-checkpoint http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth \ - --pose-config demo/hrnet_w32_coco_256x192.py - --pose-checkpoint https://download.openmmlab.com/mmpose/top_down/hrnet/ - hrnet_w32_coco_256x192-c78dce93_20200708.pth \ - --skeleton-config configs/skeleton/posec3d/slowonly_r50_u48_240e_ntu120_xsub_keypoint.py \ - --skeleton-checkpoint https://download.openmmlab.com/mmaction/skeleton/posec3d/ - posec3d_k400.pth \ - --use-skeleton-recog \ - --label-map-stdet tools/data/ava/label_map.txt \ - --label-map tools/data/kinetics/label_map_k400.txt -``` - -4. Use the Faster RCNN as the human detector, HRNetw32 as the pose estimator, TSN-R50-1x1x3 as the rgb-based action recognizer, PoseC3D as the skeleton-based spatio temporal action detector. Making action detection predictions per 8 frames, and output 1 frame per 1 frame to the output video. The FPS of the output video is 24. - -```shell -python demo/demo_video_structuralize.py - --skeleton-stdet-checkpoint https://download.openmmlab.com/mmaction/skeleton/posec3d/posec3d_ava.pth \ - --det-config demo/faster_rcnn_r50_fpn_2x_coco.py \ - --det-checkpoint http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth \ - --pose-config demo/hrnet_w32_coco_256x192.py - --pose-checkpoint https://download.openmmlab.com/mmpose/top_down/hrnet/ - hrnet_w32_coco_256x192-c78dce93_20200708.pth \ - --skeleton-config configs/skeleton/posec3d/slowonly_r50_u48_240e_ntu120_xsub_keypoint.py \ - --rgb-config configs/recognition/tsn/ - tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py \ - --rgb-checkpoint https://download.openmmlab.com/mmaction/recognition/ - tsn/tsn_r50_1x1x3_100e_kinetics400_rgb/ - tsn_r50_1x1x3_100e_kinetics400_rgb_20200614-e508be42.pth \ - --use-skeleton-stdet \ - --label-map-stdet tools/data/ava/label_map.txt \ - --label-map tools/data/kinetics/label_map_k400.txt -``` - -## Audio Demo - -Demo script to predict the audio-based action recognition using a single audio feature. - -The script `extract_audio.py` can be used to extract audios from videos and the script `build_audio_features.py` can be used to extract the audio features. - -```shell -python demo/demo_audio.py ${CONFIG_FILE} ${CHECKPOINT_FILE} ${AUDIO_FILE} {LABEL_FILE} [--device ${DEVICE}] -``` - -Optional arguments: - -- `DEVICE`: Type of device to run the demo. Allowed values are cuda devices like `cuda:0` or `cpu`. If not specified, it will be set to `cuda:0`. - -Examples: - -Assume that you are located at `$MMACTION2` and have already downloaded the checkpoints to the directory `checkpoints/`, -or use checkpoint url from `configs/` to directly load the corresponding checkpoint, which will be automatically saved in `$HOME/.cache/torch/checkpoints`. - -1. Recognize an audio file as input by using a tsn model on cuda by default. - - ```shell - python demo/demo_audio.py \ - configs/recognition_audio/resnet/tsn_r18_64x1x1_100e_kinetics400_audio_feature.py \ - https://download.openmmlab.com/mmaction/recognition/audio_recognition/tsn_r18_64x1x1_100e_kinetics400_audio_feature/tsn_r18_64x1x1_100e_kinetics400_audio_feature_20201012-bf34df6c.pth \ - audio_feature.npy label_map_k400.txt - ``` diff --git a/docs/en/index.rst b/docs/en/index.rst index d6b1ac7928..59e3e49b53 100644 --- a/docs/en/index.rst +++ b/docs/en/index.rst @@ -10,10 +10,20 @@ You can switch between Chinese and English documents in the lower-left corner of get_started.md .. toctree:: - :maxdepth: 2 + :maxdepth: 1 :caption: User Guides - user_guides/index.rst + user_guides/1_config.md + user_guides/2_data_prepare.md + user_guides/3_inference.md + user_guides/4_train_test.md + +.. toctree:: + :maxdepth: 1 + :caption: Useful Tools + + user_guides/useful_tools.md + user_guides/visualization.md .. toctree:: :maxdepth: 1 diff --git a/docs/en/user_guides/3_inference.md b/docs/en/user_guides/3_inference.md index f492892253..5bd459e51f 100644 --- a/docs/en/user_guides/3_inference.md +++ b/docs/en/user_guides/3_inference.md @@ -1,178 +1,43 @@ # Tutorial 3: Inference with existing models -## Inference with RGB-based Action Recognition Models +MMAction2 provides pre-trained models for video understanding in [Model Zoo](../modelzoo.md). +This note will show **how to use existing models to inference on given video**. -MMAction2 provides an inference script to predict the recognition result using a single video. In order to get predict results in range `[0, 1]`, make sure to set `model.cls_head.average_clips = 'prob'` in config file. +As for how to test existing models on standard datasets, please see this [guide](./4_train_test.md#test) -```shell -python demo/demo.py ${CONFIG_FILE} ${CHECKPOINT_FILE} ${VIDEO_FILE} ${LABEL_FILE} \ - [--device ${DEVICE_TYPE}] [--fps ${FPS}] [--font-scale ${FONT_SCALE}] [--font-color ${FONT_COLOR}] \ - [--target-resolution ${TARGET_RESOLUTION}] [--out-filename ${OUT_FILE}] -``` - -Optional arguments: - -- `DEVICE_TYPE`: Type of device to run the demo. Allowed values are cuda device like `'cuda:0'` or `'cpu'`. Defaults to `'cuda:0'`. -- `FPS`: FPS value of the output video. Defaults to 30. -- `FONT_SCALE`: Font scale of the label added in the video. Defaults to 0.5. -- `FONT_COLOR`: Font color of the label added in the video. Defaults to `'white'`. -- `TARGET_RESOLUTION`: Resolution(desired_width, desired_height) for resizing the frames before output when using a video as input. If not specified, it will be None and the frames are resized by keeping the existing aspect ratio. -- `OUT_FILE`: Path to the output file which can be a video format or gif format. If not specified, it will be set to `None` and does not generate the output file. - -Examples: - -Assume that you are located at `$MMACTION2` and have already downloaded the checkpoints to the directory `checkpoints/`, -or use checkpoint url from to directly load corresponding checkpoint, which will be automatically saved in `$HOME/.cache/torch/checkpoints`. - -1. Recognize a video file as input by using a TSN model on cuda by default. +## Inference on a given video - ```shell - # The demo.mp4 and label_map_k400.txt are both from Kinetics-400 - python demo/demo.py configs/recognition/tsn/tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb.py \ - checkpoints/tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb_20220818-2692d16c.pth \ - demo/demo.mp4 tools/data/kinetics/label_map_k400.txt - ``` +MMAction2 provides high-level Python APIs for inference on a given video: -2. Recognize a video file as input by using a TSN model on cuda by default, loading checkpoint from url. +- [init_recognizer](mmaction.apis.init_recognizer): Initialize a recognizer with a config and checkpoint +- [inference_recognizer](mmaction.apis.inference_recognizer): Inference on a given video - ```shell - # The demo.mp4 and label_map_k400.txt are both from Kinetics-400 - python demo/demo.py configs/recognition/tsn/tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb.py \ - https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb/tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb_20220818-2692d16c.pth \ - demo/demo.mp4 tools/data/kinetics/label_map_k400.txt - ``` +Here is an example of building the model and inference on a given video by using Kinitics-400 pre-trained checkpoint. -3. Recognize a video file as input by using a TSN model and then generate an mp4 file. +```{note} +If you use mmaction2 as a 3rd-party package, you need to download the conifg and the demo video in the example. - ```shell - # The demo.mp4 and label_map_k400.txt are both from Kinetics-400 - python demo/demo.py configs/recognition/tsn/tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb.py \ - checkpoints/tsn_r50_8xb32-1x1x8-100e_kinetics400-rgb_20220818-2692d16c.pth \ - demo/demo.mp4 tools/data/kinetics/label_map_k400.txt --out-filename demo/demo_out.mp4 - ``` +Run 'mim download mmaction2 --config tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb --dest .' to download the required config. -## Inference with Skeleton-based Action Recognition Models - -MMAction2 provides an inference script to predict the skeleton-based action recognition result using a single video. - -```shell -python demo/demo_skeleton.py ${VIDEO_FILE} ${OUT_FILENAME} \ - [--config ${SKELETON_BASED_ACTION_RECOGNITION_CONFIG_FILE}] \ - [--checkpoint ${SKELETON_BASED_ACTION_RECOGNITION_CHECKPOINT}] \ - [--det-config ${HUMAN_DETECTION_CONFIG_FILE}] \ - [--det-checkpoint ${HUMAN_DETECTION_CHECKPOINT}] \ - [--det-score-thr ${HUMAN_DETECTION_SCORE_THRESHOLD}] \ - [--det-cat-id ${HUMAN_DETECTION_CATEGORY_ID}] \ - [--pose-config ${HUMAN_POSE_ESTIMATION_CONFIG_FILE}] \ - [--pose-checkpoint ${HUMAN_POSE_ESTIMATION_CHECKPOINT}] \ - [--label-map ${LABEL_MAP}] \ - [--device ${DEVICE}] \ - [--short-side] ${SHORT_SIDE} +Run 'wget https://github.com/open-mmlab/mmaction2/blob/dev-1.x/demo/demo.mp4' to download the desired demo video. ``` -Optional arguments: - -- `SKELETON_BASED_ACTION_RECOGNITION_CONFIG_FILE`: The skeleton-based action recognition config file path. -- `SKELETON_BASED_ACTION_RECOGNITION_CHECKPOINT`: The skeleton-based action recognition checkpoint path or url. -- `HUMAN_DETECTION_CONFIG_FILE`: The human detection config file path. -- `HUMAN_DETECTION_CHECKPOINT`: The human detection checkpoint path or url. -- `HUMAN_DETECTION_SCORE_THRE`: The score threshold for human detection. Defaults to 0.9. -- `HUMAN_DETECTION_CATEGORY_ID`: The category id for human detection. Defaults to 0. -- `HUMAN_POSE_ESTIMATION_CONFIG_FILE`: The human pose estimation config file path (trained on COCO-Keypoint). -- `HUMAN_POSE_ESTIMATION_CHECKPOINT`: The human pose estimation checkpoint path or url (trained on COCO-Keypoint). -- `LABEL_MAP`: The label map used. Defaults to `'tools/data/skeleton/label_map_ntu60.txt'`. -- `DEVICE`: Type of device to run the demo. Allowed values are cuda device like `'cuda:0'` or `'cpu'`. Defaults to `'cuda:0'`. -- `SHORT_SIDE`: The short side used for frame extraction. Defaults to 480. +```python +from mmaction.apis import inference_recognizer, init_recognizer +from mmaction.utils import register_all_modules -Examples: +config_path = 'configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.py' +checkpoint_path = 'https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb_20220906-2692d16c.pth' # can be a local path +img_path = 'demo/demo.mp4' # you can specify your own picture path -Assume that you are located at `$MMACTION2` . - -1. Use the Faster-RCNN as the human detector, HRNetw32 as the pose estimator, PoseC3D-NTURGB+D-60-XSub-Keypoint as the skeleton-based action recognizer. - -```shell -python demo/demo_skeleton.py demo/demo_skeleton.mp4 demo/demo_skeleton_out.mp4 \ - --config configs/skeleton/posec3d/slowonly_r50_8xb16-u48-240e_ntu60-xsub-keypoint.py \ - --checkpoint https://download.openmmlab.com/mmaction/skeleton/posec3d/slowonly_r50_u48_240e_ntu60_xsub_keypoint/slowonly_r50_u48_240e_ntu60_xsub_keypoint-f3adabf1.pth \ - --det-config demo/demo_configs/faster-rcnn_r50_fpn_2x_coco_infer.py \ - --det-checkpoint http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth \ - --det-score-thr 0.9 \ - --det-cat-id 0 \ - --pose-config demo/demo_configs/td-hm_hrnet-w32_8xb64-210e_coco-256x192_infer.py \ - --pose-checkpoint https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth \ - --label-map tools/data/skeleton/label_map_ntu60.txt +# register all modules and set mmcls as the default scope. +register_all_modules() +# build the model from a config file and a checkpoint file +model = init_recognizer(config_path, checkpoint_path, device="cpu") # device can be 'cuda:0' +# test a single image +result = inference_recognizer(model, img_path) ``` -2. Use the Faster-RCNN as the human detector, HRNetw32 as the pose estimator, STGCN-NTURGB+D-60-XSub-Keypoint as the skeleton-based action recognizer. +`result` is a dictionary containing `pred_scores`. -```shell -python demo/demo_skeleton.py demo/demo_skeleton.mp4 demo/demo_skeleton_out.mp4 \ - --config configs/skeleton/stgcn/stgcn_1xb16-80e_ntu60-xsub-keypoint.py \ - --checkpoint https://download.openmmlab.com/mmaction/skeleton/stgcn/stgcn_80e_ntu60_xsub_keypoint/stgcn_80e_ntu60_xsub_keypoint-e7bb9653.pth \ - --det-config demo/demo_configs/faster-rcnn_r50_fpn_2x_coco_infer.py \ - --det-checkpoint http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth \ - --det-score-thr 0.9 \ - --det-cat-id 0 \ - --pose-config demo/demo_configs/td-hm_hrnet-w32_8xb64-210e_coco-256x192_infer.py \ - --pose-checkpoint https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w32_coco_256x192-c78dce93_20200708.pth \ - --label-map tools/data/skeleton/label_map_ntu60.txt -``` - -## SpatioTemporal Action Detection Video Demo - -We provide a demo script to predict the SpatioTemporal Action Detection result using a single video. - -```shell -python demo/demo_spatiotemporal_det.py --video ${VIDEO_FILE} \ - [--out-filename ${OUTPUT_FILENAME}] \ - [--config ${SPATIOTEMPORAL_ACTION_DETECTION_CONFIG_FILE}] \ - [--checkpoint ${SPATIOTEMPORAL_ACTION_DETECTION_CHECKPOINT}] \ - [--det-config ${HUMAN_DETECTION_CONFIG_FILE}] \ - [--det-checkpoint ${HUMAN_DETECTION_CHECKPOINT}] \ - [--det-score-thr ${HUMAN_DETECTION_SCORE_THRESHOLD}] \ - [--det-cat-id ${HUMAN_DETECTION_CATEGORY_ID}] \ - [--action-score-thr ${ACTION_DETECTION_SCORE_THRESHOLD}] \ - [--label-map ${LABEL_MAP}] \ - [--device ${DEVICE}] \ - [--short-side] ${SHORT_SIDE} \ - [--predict-stepsize ${PREDICT_STEPSIZE}] \ - [--output-stepsize ${OUTPUT_STEPSIZE}] \ - [--output-fps ${OUTPUT_FPS}] -``` - -Optional arguments: - -- `OUTPUT_FILENAME`: Path to the output file which is a video format. Defaults to `demo/stdet_demo.mp4`. -- `SPATIOTEMPORAL_ACTION_DETECTION_CONFIG_FILE`: The spatiotemporal action detection config file path. -- `SPATIOTEMPORAL_ACTION_DETECTION_CHECKPOINT`: The spatiotemporal action detection checkpoint URL. -- `HUMAN_DETECTION_CONFIG_FILE`: The human detection config file path. -- `HUMAN_DETECTION_CHECKPOINT`: The human detection checkpoint URL. -- `HUMAN_DETECTION_SCORE_THRESHOLD`: The score threshold for human detection. Defaults to 0.9. -- `HUMAN_DETECTION_CATEGORY_ID`: The category id for human detection. Defaults to 0. -- `ACTION_DETECTION_SCORE_THRESHOLD`: The score threshold for action detection. Defaults to 0.5. -- `LABEL_MAP`: The label map used. Defaults to `tools/data/ava/label_map.txt`. -- `DEVICE`: Type of device to run the demo. Allowed values are cuda device like `cuda:0` or `cpu`. Defaults to `cuda:0`. -- `SHORT_SIDE`: The short side used for frame extraction. Defaults to 256. -- `PREDICT_STEPSIZE`: Make a prediction per N frames. Defaults to 8. -- `OUTPUT_STEPSIZE`: Output 1 frame per N frames in the input video. Note that `PREDICT_STEPSIZE % OUTPUT_STEPSIZE == 0`. Defaults to 4. -- `OUTPUT_FPS`: The FPS of demo video output. Defaults to 6. - -Examples: - -Assume that you are located at `$MMACTION2` . - -1. Use the Faster RCNN as the human detector, SlowOnly-8x8-R101 as the action detector. Making predictions per 8 frames, and output 1 frame per 4 frames to the output video. The FPS of the output video is 4. - -```shell -python demo/demo_spatiotemporal_det.py demo/demo.mp4 demo/demo_spatiotemporal_det.mp4 \ - --config configs/detection/ava/slowonly_kinetics400-pretrained-r101_8xb16-8x8x1-20e_ava21-rgb.py \ - --checkpoint https://download.openmmlab.com/mmaction/detection/ava/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb/slowonly_omnisource_pretrained_r101_8x8x1_20e_ava_rgb_20201217-16378594.pth \ - --det-config demo/skeleton_demo_cfg/faster-rcnn_r50_fpn_2x_coco_infer.py \ - --det-checkpoint http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth \ - --det-score-thr 0.9 \ - --action-score-thr 0.5 \ - --label-map tools/data/ava/label_map.txt \ - --predict-stepsize 8 \ - --output-stepsize 4 \ - --output-fps 6 -``` +An action recognition demo can be found in [demo/demo.py](https://github.com/open-mmlab/mmaction2/blob/dev-1.x/demo/demo.py). diff --git a/docs/en/user_guides/index.rst b/docs/en/user_guides/index.rst deleted file mode 100644 index 30e0650013..0000000000 --- a/docs/en/user_guides/index.rst +++ /dev/null @@ -1,19 +0,0 @@ -Train & Test -************** - -.. toctree:: - :maxdepth: 1 - - 1_config.md - 2_data_prepare.md - 3_inference.md - 4_train_test.md - -Useful Tools -************* - -.. toctree:: - :maxdepth: 1 - - visualization.md - useful_tools.md diff --git a/docs/zh_cn/Makefile b/docs/zh_cn/Makefile new file mode 100644 index 0000000000..d4bb2cbb9e --- /dev/null +++ b/docs/zh_cn/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = . +BUILDDIR = _build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/zh_cn/_static/css/readthedocs.css b/docs/zh_cn/_static/css/readthedocs.css new file mode 100644 index 0000000000..07611c2b06 --- /dev/null +++ b/docs/zh_cn/_static/css/readthedocs.css @@ -0,0 +1,6 @@ +.header-logo { + background-image: url("../images/logo.png"); + background-size: 130px 40px; + height: 40px; + width: 130px; +} diff --git a/docs/zh_cn/_static/images/logo.png b/docs/zh_cn/_static/images/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..f0c759bb78c5424b4394d18a5ba833a8c9f43add GIT binary patch literal 31100 zcmYJZ1yoyG^FAEh-5r7!cXxuj6!+ln4#i4wcXziU!67&;?k)w2Tai+%{d(_xf8T%B zN>0|wI{WNB^UORmb0$VZO&$Z46cqpfU??hpv;Y8Dr@y~DkrDrXn^rXQ{k@>LDHwPH z0PM8?d|>=4Bzyn>R8@N!84V3PS1(sjJ6AVqMHv}tHxE}EdnaoEz;B~K+gCsBkx+d1 z(HcuvdhOK`tAbP);snb>} zHrg=my|LgG)mBw|aKQED<;&_z?{WU|>x};_RwxcGL&{O913*aYqp0v2Rg${G$d|rQ zSOSGTcxvpU$st>WQ2^iu&fhP@t6Aa|rpXlm3vgRzr^6lp_9voSgFz2KodVbc;vS>H zBx?aC*mWlG0TaZ4X6;rhR6sfa;N%}94FuF;050wo#Ss8&HECN!fVEQc8e~8M3?PR| zo(@*B2Vnj_Op6s3Is_=LmjtrILmOd24Itd|u%Uej0G~wtWGqQ@KxmFM>lA=m5EdXq zi8l>LtpZofxiGL-W9Ig;Qvn75oy?fdQeNP}@64DZ^}W5>+o#zda{4T7LG%vW2IUZ# zpOr_8r*I6XGJ_5PkUq^1{uY!-K0Z49c4TwxbbUWs`rGlw6(~;gWA}G&JS;o_TW$7w z!|eO_6*#wXm}c(*#~LHRhAzNv_sN836Q$S?buQp3&R_6VlCrQwV?vaI92E`ilHRhU zkMkf-{14AJ*S)xqpMU@Ty%}%=aL<*@X@HhRyt!Mr_%v!I9#6tcbGbYo)BYa+_~9+_ z-TXf9XJb}^*dO{aof<&ZasMu@6cEjqjD6hG58OYfGT%_rd@%s^QR+}N2vYl`6Ex%s z1~v{;f58VJ8FI%KPl}cTz_#V(_J0Hb(gvQABw1hp(^=&Q06^~vv2k@ek;)h{001fu zV{Vp0LLbDhhhpXrQuIQJU@Z_qlIV0$QEX|%!%%Y665>oL5*?`U8hm>*9BDC5?l!UB6nLo#WIPKR;bn+(;O+^|zR_r zfhqI_d z`T*Rg5kE`e&&m37^$b@ulr)9(ri_?)W@!p^m|s#4)lg~Zlerbq6de^d71+wP%f%P4 z?ekR_&Xb+L>>MJugw|7MrF0}?C99{td>KCMz!x5k-d8~>rPC@;$5rC9miP`|QN%5s zQ(UV#T~oM(TpKN1WT@y|?4!Y<{iz0z*E^SfI*(9hvYf4Kc`n!9*8b6sBS)nYVKFObLWgsen=HpR8!OxO zy*~3#<{*NUuece)8M+xE8OOCc?>*m3Ec@4@>D0Z)sx#FMu9SPLd6H+h0KQ|P(@qDv(~2GCdH=U$bA|HbL{ipjnI0&(hSWE#!Mg^9Ge8&=M{$r zn}(1EfsKHT{te4Zk4wVK^Nnk7w*tum)`ACNXW>V0ly3fR&F+ugo8Gf$;)~#O|5KEQ zuMe1~+w<}d&i7>xL}%A4*Yi{YoV>{V$s|r(g5aB}Wblb&wteUPP2{&~oJqUxlo7kK zdBrluPKK*bwg-HEMB+B}uH=^WygSxgj-nQp_S~E0Bjuxq{qLsJ9dm8E=i3&&rluxX zx9ZpGzg#2=E$2HHTJ*2?ZoVdbtdPwgRf^+}8_(Rx44Op9RLy*>&(;;!_1AsSU9DGK zF+OKKcV5wR(je2zL(db=n}k$1x+@8c)ETX^@N?Xtmj@p^%Jk$Zu7DfH9g zC;d<7pR*KGdv9$wy4K2C1E=jGpchigV^hEMfu$!tqJ3-0)cDwUHbakG4~ zh_s!nVyj$=0wy!QlU{W%v@brC<@MxkHLN=?ZBL1f2G_l)vgeU~F&P%Smb$-r)ZcfX zIw+{hs~WmXKISm>F*PuK>7!I{&1WcF;dGwZ95LGKn+!A;78kRAo;igR9}|Zy3Ml&2 zU)c}b73-%BSqaGrL3%BG5qX|`OZqkN&HtA0hZ=%8%plBnn7%O5uvR!nI4bxY_zEOG zgam{hL=JRu0%>#<39mE!h*8XUmo~cb?!Yash!M*)!)9g9_yePGZz7@-B9W zYvq&5=@bMxdQaXdx|7PDB!}oJ82=qJcP;ywW}CK`yVoP@O8ue1i)~-xkEELTE|EvZBWojKF$bQo$4hWnO@+= z?^~r^L`SDHlZC#b27Cs|L4qH%akE+^zDH0?Yj6;&f3I>;rNFz5%=@S=+h3Ycs`|1YrVg9@ZY*k)QVWBsE}a^*W=BoN&hnG(s*y!1tze_uzG*(khN^2`+=D# z!>rz|zPj;Ovs~xVzMh)da?pdn^Tx)GQpeZJbA|KrbC>!@gMlWcJ_o7gxu_eA2C^Rs z-^t9Jm3F*J0vsL^r`x8#PQ&q8@Ld{BLFzYeOZdxv1{8cSnbdu2sO#uoXV_R7*!B&) z!;GLH5ZSp-zvyje@zZnclMQ}wS-1+{P9n7=>mE1?+3uA0RUa)(3aki3`YC&T?%?I> zwAC~)(1PNu#h*Z_Anf)%FVuA-?=si7-OZ30b4cp@AmeUjP~d^aNy*Ic*6{Rh&WDZ% z)H+4Q{3!8 zqkqkv$8}vzlkhn0I92bAzu1q;*M^zR;Ooxo?%nY|svM?Q$=@1x^UvV%syX#X@#CQ1 zuN;q&cQ5n{MLd7AO=_@$mMQ=cKo0u%|_(_ z0GAp?kd%(!#zlc|ua#}^&cJm}&L+emaPep5dwE)T*ccm0>Ig}5Vcf)?1Hr_0wR{`I zaWzXLr3=9r3M6B<(KW7)3 zh@Xaj3N;E-+zZ*;g!+cQL3Pve`RO@)VM2_}1`MvJ&n6>whNY0yt_m7B#Q6ZYb zuy!D$azzGR46b@=(&d6{EA@L))(KW|v z#_~h;!}o*rBgHFncl%Ewr%OI{qWfXEp)u4#p#9>gqR6it=Cll7Av>vE6i~ySZ6-Hl zKjbwuQTv!3ah2z#|M#T)H9#*q-us?;+-7QCH`rg?8n8!Tt@55wk5FhsE4<66?JGey z>|f#x-rY!kgnm0$v;Xg2d{#JigdL1tm^B9!17g%0R`dZ#Q{JuuLl z<>GvG?YpkZ_NM7xVCKjrf+=8TV{{oRB>1lk9hq~5E86x`EK0f&aOSrpAmIZfZgLS# z+a?#G0-=1F_0I4R7(IaA#;Wi7cO=f51=`Fuu`qjczl|$L^u?4jhduKD#G;3~=?-zv z8*0SrZn!r5rf%u5{1fhR{lK|0hhf_WWz*;7PHhh(Iv;eXsJ!)4AGc#XwOd!Ugtt%6G3&QL zY(MhFf&g^w7oV9F0i=ILer<-4$(*v1WX~CzWC1j4{iwM|*#l@nOsT`}B23AmX~ED> zoWKRSNF5R?PxafP7!#B|{>0-lr(yBh^ zimrEHT@^Maj2Y-NaRWc_nRKp6vOIo57$k?S8)h6gafv+6soC+u2{G7}ACcQm4@CVx z&7Lb8qXa>5AL`HNkjv%rJ z{2`9Y_?g7~#`4*}$lu#T-mJG|8DltWAkGSxJsuQ7VXzJ$$CK?Lqs3}LvA~f zVgzPU*Ylm8CQvL z?f#@ka)3lXX+ng{{@XUOyXk)hK$2gpq`2ea*XS@W7iKTBUkE9Y2GfbpgauIg5vN?! z{8kU|hGlb=Jha96jUgAk`hPwFv6n*8|LD%>hjgP957Q$<9BVs&`+^MHLYBf79l`?q zfDn{mqvw#6R5!K%pI(qR5s8_fz;e#H;SMp=|c(X04|`bq#yA? zF147%(Hu2C+L7)5A%vY69j-1+07fGAv$qFLJ{=PR9^T%nWQ}I}Gf=4yfsI4{Fknxe z%cQE0(FyZkNo+RJZ-b+q{2?4)u=J3?lTF@S!Y zJ%gXfScQ35ARV{#yR?s@o~RD(Cx+Z9S(21eR(Qak`if=u032()urL_;wg36NNRm5_ zT;Ty>yv{i4Ba@fpk|6%7g*H`7gC<&79|g46o_&?z=Vf`Zm;Ty ze}vZQm>??5D69yk(Yr&8QL}bgg||UNK`A229r2suK>+n;Xhyb?t-@X2ig;3ZT(3=U?Wp>w4EfBP^2(>9Z7HywG{`6Fx9Ab-LdI$ezFJWYgE z0Yf9G8L`*nSBzL=uHu_f80bX=Ikhz*^u#$?k8rN-v};&FtErH2wy;1-iRhxSLPLGGHZWT*9Xu)THNFnz+Oq&90$UKRS{uM31qJmn zio=AcJnrs@VJxAKFiq`5j#SO~c+Q&+v-W?vt&_?R+3#y=M^_rk4^K`?ILK(wauS)(s>WSIVk-yD8l^9%v4A1)r9Z*fX&#m*!&+s}c$K=+~L<5s} zA4~MYZC6tf_j)~h1u8J&2lN!aXk%0l4ugeKSijmlWHVf)?5OaL%|E^fJ9PrYO9q$r zJr}KX7c9_#mmuf%P1D4hj4g{SJ@DD?Ps5={khyZ~z?KrlsqzeXP+|&N(gZ==QNH}_U`g=J77j5b*J`!zsUwvi zsg(q{-oR znf%@jO}0Rjo@RSynfZvvV~O?OZjC`N^?unZl?iw45UaT@?K! zt-QRn51}dT6vD9+viLm(Npy|uIb57}3uP89E0dmt>v0nXpv(=I`6jNnMwsF3la;_w zUyE)DBrS+PC2itn9rnSyJyR916jCoU8&lvE{CB)${SNrf#-0*B2KTfj25hGP`@;(W`aS&_u$<-*oPx`I+v-9n{GCPTBxRdF||UPP*}G4rk9Mxg)$EXIeO&xumyV z;CPM~2f}GO!pnXE<3B?AXl4m??g)S8ms1+~m#1+c22iib06X2#dY8EDc59)EhY!w* zO87BLnxMx1UGqq_=qB(r7$l6sZIxS60qZ|tf`e~R|I zrlg4v*23fOf_rLgOo_6I(fX>|^}t_Q1gQaYl$d3X=5?WC+m zE^1am{fHEM((2HJ*h*0%`Op}LonS~B&97O&$E;wM%AD4o0w8%pB+^FYw>U4 zK&RR({<*sV2t8D|jR|a*Tv5qNjavtsLX48qo{1PGm`QNeg5F4Yrc^D$*~TkyZ}Dc- zfS3mvng`!pv~qq35ukhV-wTAQQfE=O)O>7?fttT_rqY9Hg4qc_5h!!c)kbJ4;e!xi zr6ZbfHu+~49dO(oMCqu}wz>9rIBMqC6Z;{z{XeTj6*LE2;=HRz%*^Soz>d)ZA+gi# z6DdZ=NABurdOM-ev8(lVsOj6J^V%A_2B8zON6N;jj5O$c4P0uC7L8fqFjrX=fEW|a zD(~NraYmg>M6S4&LCwoF=@i1$pp9VUA-xM625@1N~e;Gp-=$H69q#t$12 z{~~dZa}x&=gTiA^7}p16mQ@B2cHlC8;iRv%;$DW35%`Hs{H*EAFUDrXKDfoy%Hr*Bc=CUZ2i#3jI z#8_!92CNIE#W}|?FLk>{dcpxgau8ufDCZfzzr)psZL;T@=bsg~jkcGrZmQZO#V#Do z?tjd#i&11*t!!C3`mY|nr?JS(<;1hES&EBsLTSc0AR__Vg9LCvI9&>`M3yF+80d<0 z?6G08sh#3QbL~;NyA49GBzq_z7wJjNWC|v4x^g&yNUdw>U&ugf5FrUHA%iV5XbnjC zp3xQOnnptdfCo*B6w&Po?q_7k1!3^@(QW-NCY{QD<>DM z!h4Pkbi`S=EOT51xWc+fcfzuvX>JVDGNj4{cwyK#7%YZG8%gCRAS3X>h+2NJ@}2!L zVn1tZoM-CE<&pZ!352HfZ37@FL`GC@`0q@+H zgjhZ42|rW8zKgT!RjdQfSeeLm;ippo3x&B0o~S8-Ew{vH#e8r61)*Cc+v?!}h9Aeo zduS0j2-{Q6xveJQ7S#ls&>+H+g|o@J1>MMO6o;PxXg;_hprGi^AV^j%*iUF5%=rfF zu0@Jebv)4e9NhEtH2>+&E1^Gff-)IJLl0&ka)kxs2c`*JL+%Kxf_Q|)I1VJ!(Y{CN z+@vgI+eqihuAON19N29`Vli#M^Kbohk7QwlxP{zH#^wr;3zgRhV(|4UwJ5&w^<>YJ zbcTuAAjx>&CBHAfM(u(7%~;6z%qaJb%D{KP0*uDueO$2)16p)$N7-6oU8!55>X2?N}dcVzC` zRX>)>>_}!++dz2};MB3ge;7Ps7#$npPs~!>0dNH!qKe=ZBIwfA(`8j8Wyf@x4rlrn zYM)s~daFj^vH#9uiXtSIQ#xxaYqMYS;abtD={X%7;jo-|U$TaudWis1Av%OyRx=iH zZ-{-KBc2z&c1P#hHY_F_>>$Z4oP z8j>ud1V6t(=ZKs9O@m+9kkQrD7%X+;yY0k@eAb8(mdi)hrHwFnKp*sn%UpETn+YS^ zW!(~GCVz^9>EhoSKRKeAAgo=Iq?5wWh<1O`kWhC>AJG}mpW?W%+P$ghvXW2GXS{8>%WI8WX_{HKwN*89l)&;)=g+)SbDjtAb^Vo_AYCv!JWvLt*V zL=Sxs3b3~CZc%{Y(jLAc4*>^cDp@0QsCb&CWHY~EY&LuDjKBDAh@pvUey6f@;jlL@ zxf92^Fh*raD?b?;cMx2xwWpr*r8V0~)R%0+AyS%qHip{G9mEbXpl)vJrDG2YigEBO z7D&aGO64garjqwBY!CgaqR$ZargW~&)UH1_tz*}>!n!6|oUT|N(1sgiHV^!=zsW?Z z`EDuHC2#!$M#axER6Gi^)i4-5lRAmkuMtmid-uvS)6jo5X0d^?siYy1cw=EM_RuJn zVIJE^+m9K~t6t8wGc(V3za_x|vB&wj5aJS%bfI0cBtT_xyYj6Xc;{G% zVswToUHeHMu*{^zp6Jjqtha9sKtGGUx@fjFiAEEEPRuG$y9NvGuonZ44-u7F)h;wn z;aVKUOCfDn^#`(TKcE=9o~Jd-x&OjkhR&jgB!vpepFl2Dw_PQ~olu>jrJb~scul5o zDb?=AlhB!^9l?r?S*capn$`^$sF$MI87&L}bz$`+#L~LE(+Pf@P?dmbEDI59-JX@E zQFnO3zA3-;UN>fMTBk95gvG#vL@mjqTWHswTJ0f3dXh2RBwT4ER3(-A=c*Z|%bHt> zqod2p;}Nljw^pgFR@d47R~m9zywHGt)4Q>;8S3K*+g$Q1dk;bIXvKxs=MVK*eOA4w zTS!Jr^<5U;TYR|L>Y;acx9s^wMHj4Z(B$lEu5OjB5`*J35N0pJio@S+0K_VJn&P-!)m7VDjVA?MYU~-WR2ThzvLf_ zTmPHj1xT(h5^c{blB-s+06+oq49QBcbl`zV}IFk)2PcqHmt!ntE+0Su@(tjS+W(z zG5M!w^684v`fqzuo%YrPFE$!|nfH%t9yyyPS_naF8HIF`wFq=Y`{L3`4(~8`c3c4)`31`>BeryOpINYRZ&pdQ2doR1(Z#h;T8B@HZxHrt-~FKHe!I(eXQuALaC3rgcMy3T8H4Pg6`+=x;4 zq(HQaQT-X(U?MN6?>5JmeUcAjba8_IoKqWV7!#@v=%63Rrg~}6t$vm@Sv6I?FhT2l zCay_$;PE2=$WspA_CneJT7I(5}&-f6iY9l>mIg`H}j#-v8pn{HjUDu-xt4a~N(e z%!mLMfp9PCg25BZdbCYl^Usbp!XX%oCa>hh?+^Q8Yl_f)qZ|sBznhb%z(teGf>KwP zS3b25BfIrInOpVKwD9sp z)$|PnRwrLGWZ7PwAU9SoQt|2t>Oi)b8z3z&8wPsG<9?I+NIM1w(|dk8r0-`g3y+uF z$WDB-%1X6H@4)Zdz3ZMH?#v=nzOjx6p!lIL<_On=oaVH#=cfb2t*>i-PN6YNI+3-z znugBR2m*aDM@yDi=wRU@ntl2bw!HnM$P$FQ18J=O)gfATv*NbdJ2e?rmBXQKC*~2= zL(BM+tDUL(A(l$t?RGG6iU|{<*25~0qE%WI$!6Pq4<~Y%*Q<-$FGlI)xlI-|O~H6C zz=qzPf(!Eeo9-I(4BnY>tcg#IO;|Mk`FBKh&-IMb3RGcG6uuYt0L2m3x!Pc6Cq^-e*&BALHHLNpmX@~pX#e$h@rFP_h0nM&!xn##U#VSJxIM8b;KWs5B~CYdB%??<9~eYIrd*0wqAL6eyQw zBlDgS4tG%jn{Xmyf?{!nEjO>}fv!r|mO0CY!3hs<;-~Z8s>bdsA-eSfn5=zPB`&H6 zSIS;0-C&Bo#y6QT*Q((o6yc&Cxp${mK8?v6kb)OOTdO*#GgvO;sQuJbe49Jlbx!F0 zPtH3bzXps}J3MUKKry}#lp!XGZn8Wi)p{}npT-wY$sB40_GV9*gIAJ{WWRY5@?m-; z(dSl7cmq-LW5M1q`_w3~{meFZl-@B%n9VS%la(x-u%jeQg;D-@=D&jk12LmMwIq+D zD6h?!pMB3IlI2xmxfyX-HpDSD6KJTkbld{Gi!{AcqrJrk&W;zgW8D!!&%7)9TO1WQ zx_v1AfhW8E&Iry7=-WNL^e_{${2+bwk&-Pi)m@lhiU4Ad3Zx= z^@>BSJl(@?jxm2LHb-z}dus3v4+v$4N0xwj<9D)}Bpl!a8X;(SAb!7wd9b{&V_=;2 zxg&&*_$_p1EwaK?Pq$+Mz5_!r-x8G`68^qAUbS>EM)Prb;=j>@nrXdT-TLT}?U;-? z043V=vZQrv&_T^Xu4lyOI&H0>R=16Xed=Fd(Kb*V3o;jxlcS4bI-L`$Np_NBs;wYK z@F!ogPqu(>DscKZV}OvDF>*#HJ*R&NPOn!7HrwrT>`Et~(av>L};g4>;DzUG1v zMKyL-)R^=v1z|gD=bO&?FyCC*aqN*e1cNraUEX-?T&cdsU3tut60(QWPsS%2aziX$ zb&GPOZZGyY$wp4G!Zt;=&Hx#=WBKDoSg+zYQPIA7FxPF917%uo)ET!CR}4ZbEQ8%? zH1X`Gbzv-*wto;O6{9X3Y`HSyILw1u29UPkf|M_9YV!_=@~osg@KUKQ=1RJ2Ii?;K=vGiwHRfm0LW$#As+?rZI;VH+&6I?dE-hDk4bmIW>MTXZ{EMCGhw@p@7 zb8?k-arqz!uLYB3wOs1Tt3E#a_rb4a#56A>c#V&X)NI~f4hQ48X>NX8%ng`AH*wYa ze8{?%Ker!?O|1U*p(62HjV-vSeFL9W-_96gOkUA^nv&tHGsosk@SBf9mMR@4CK0;O z7D^BTo~H6}1+=4DXIHM`mB9B0d4S4-=w3rM)Ago;-e9(C7GJhjsqf~h`eeZ?#*KS> zUZ4jqL^4uI(e_HoIi++=0*-n!48aOgqg7B2$^z)+f{{MjzrPKoKLpO@Pw`bBeW$tCI-LfWv$^yb0GTVZ=UcQZjHW3k9>P?0PDal*us@FV7 zEfuPzY5I9XTvNGVr;$cbj@3$01Eox3r_EOiNY7Gs{fw=~yF?2p%p~_6GR`JBr$ziZ zQR_>!#3P13DyYq2=f7a4`!^WFOH zc>-g=0`~~>VUD(iaLgO3*QxwQ?_2YI9mPl)KGvuG?U|04&fCKKw}SMD8feAJS9H#R z$hvLE${Q^wo)&DtAfEEKZ-x&c3ol$89XPpzD+e_Im@%hR0j6DU4a~m>m6>%W~ za^Fvnu2-`1N>C)5lpCR`%aaYx8?ed6Hw>Lxy-u{#6m^v?;);nKs|XiX{XVb^<=SUwXHvHa?TE8{JOLdb1CvPkp5f!_COnf``cPf7 zy#L-|D3rCs16j@%hB$Q@;%vIF;}i}p;g8)?E#oSp$*rW8I>Gjbz%*z(+;CTw2pGWT zg@nK1&=jEb5@>@rFid4Cx^X_C;WQx}d3Pn{ZCiUyNUf+CQiNnEVEwQkX5O3Js?ZuZ z32@eI>@+A#usg|)mWr5Q1*|7OjkKy;*5UB;kCow#N#iRz82Q4cyg`R^6y9@|;wg@m zD&!|7s@TL7KNzUtrycdD*4xhq`JoC*8>i&-xzw`3lK(P5`-K}jWG|Um_TcWo2h94c zsXsu%z5}nVZC~#qCdcE#x(HVo=Qb;J4-~HCMCivPqU3)xr^@P@Jei|0FaiW&`n|AX zl)%4o`PL*^(K-!AE7XPsM6X3MPIr^^PRN0D9|Y;U2~(We4r1vggyT})@PRMbZ&nWF zOdh+!3W%wS#vAR^O8de}JdZ&OW=^zD!;8Oe#KY#nan>qILy`LOMS*BJi)ESl<9Mv| z8cdQk**=%-Er-j>q_&S15TQsV?{_y5VjG*zE~H)5?wUmx!{3Hz9O0nHVXM=(8lrm# zTNOL`(rXI8ql);>7W<1ZGTR=wQ=7-Uij@LeLC=1rD-@%4cB($2V^hcUKN6606peSM z9x3Ho=mU|mkmD15Jc^QxhBC7qcp>i-bgB&6c2z#CsIHO?Ot${|&9mf?_8ABAt|$0e zgJaC=lQf|sRu?tU>6-BZJC$m)TF30PG7SxLJg_-Q`-)rGD>MmC;Qos~DzwNmYlB#Q z&5$8I8bu;D6trxBu*>tZ5!u}3i$kk~OccTD@YPGwt4|Nn(kfF1%2QCmxX56=P3a%k z8^uM}m=(#kAkxmbYtBK#)PsZ6VBv=C|8}l2ZpV*X%4O(UrLCAuXf#V$ZFJ{-=Q~g# zbB}F!J^JYQ0|O~U(vvf#%>_x<3zX3A)j!3ywQ~I8dGbMYAtG4x8+)0|*vW>vMY+f` z3K2}vA^Ue(6;VZd(NZ#W%QAqoMANX>`~&l72mU-!UQDiE4F;X1XbQXwdCEApKl9?@ z+JaDBE5Sp@^7^u(@ld63v)8s4C!0R1vFxxt@cXMW{oz-Z68#e^6{NpYD(}du0W@{K zNoS!KpD=wADW%Te?ASiHP~qXp3F>*JiYlPYi%6|l#Wo^bpFhPac@w!gCp|ukGcQXC zG#G1iX0c{QefbsM6OTzUvBAG5@a*L$0FWRhc!w$0#8Qt(UH~~z>VC3QkJxc zR*$sOPQ~M3m=vwm-sFYoN1hV#w>==($D67(YEYOS$&gfjcT&`QVyQBDw=i`q!oTt^ zYzygPaP2+)9G~a3PDC-zoKMPIf z<-%ikwDbX+ykfSlp4yWLkxJ`GzvYPne-UQfzFkQRTS5O+z9b%=jx>2>e>X^T5r#YmaKsddTi?3tzPAxp8BB%IB5~D{{-4rIGQvoQ7iE zKXGQkNbIVuZ6G6m%dJ(M3zRDCFv;LLq~$(?;HDIe4Zo|gplL6(MP@U?F<9lEQ^*SY z8wY*le2LQnlxZ|kKq^DvQ@8yDTIbKD-N7;&gy#6gaXB6OEhE`bTUsDE+a`I@pv+GR z4Mmk#UF!~ui%<4di>G}0G3`~Ipcy8;p=>FfFvBp8;kX@p6U#j4t`bf_v!>BadkTTT zIuGD72wH2kg!SXdxTU><{j1&&g|r8+Bv)F)FhiA#(I2T*O0B7K`zq_WK};J8hG_PN z3C(D15jhO)zpU4M^4S%N;5TPN<+hDHdo+{BB@3r{x#Z5~Dk_b^#@+6x1N7sk}2bMRg zdqr@1;MeJ_4eBv6uKgaQK)3ks@b%BJ4VtCVBy(OQ4twYm?(y9(`zOw@ty--j^ZAjS z@Z6LVn@TsKn)(i3U>e#@7ko}2p48zQLz;UubF+C5y+1LA*)g35l(+-!*0V-j>-PCC z&XFfY_fSDAu(LL#7fU)-_WfnqF7u8&^99{gpI`@D&6qF#^lwCH$>Vl3E%$)f+~dY9 zHAGaaJl--Z&{cI5@NR-~_3O~WeAVtWUEo3%Rr!d>?L}aEBa|NG_9CEUoC!`5#7{{t zp#w0#BbEFYOgKNq|2k#w*hPBH-k6q$WV-SqLmo=F@RO9@0@x=$o+7wm+xh@ z6o_cJNqjNm2!KWX#LJBFyOa7qb(>h}M|FF5;WDcR~6hbpV!|7n;n*WyN zLD>P{iyCfH`Xwz27PYc0m53*15_08>GenO}`y-#t9UTpOeZINNffmyM`Mq+%mVGj# zr0G{Rpa`}5XL$aDCkXD*`n7-^O(u*odqZ6GPE_+(?3eV_=wXcWo0~Hv^rg%n1}Gmj>XUp+JHg2>x3)H zv``vN$@5Ax?=fko&LzQ&*+<{_S9g83oKMK=TmY<5_2LaO!qgaLAwSwX}zE7 zt(bI%?#})=hu|*0`C(L+Iq}HhT9a^M>o1ObDNv1jNr|Hb5p^ahJ&_1p|J)9yXnA~i z5B(11H;1bfa%8%(519&q2}T7Edt=>DFn^!FyYDvsnDXXd5xldyh`&(OwhG3Q^SyT2 zE2>T(uLLuBFtS|k8dV9(`dZFN1;H{MuetQmI9a$(R!axo>B2N1X*fs#XK^Y#b}yJQ zQnI5gexV9K`fEo!QJ-W|fBT`hm3aw2%S%b_6t@AolG%SUYL1ZtPN;6`mv*c0BzImB zlc~levp+7Xt)Qm}IFD~iVwn$(*|XLzdI~#AY+-752%Sf_ooXVehY9Dcjd1aOw3CS0 z3ZP~g+zhK8tTb%O3K2iH%{r;`4EYr{KlK$X^q|JP{88s?)$vkBt&SODFIMLlwIc%! zfbPckmbu@UfDcHo^f%LmD_-ww3HHjwL|^_^8HxuYk`=vNmgKugjTbli^DnvmFT$bj z`+gE`o>3g>%x>yUh&zsER8?AU@_`b60=kivr-XXpeyJp~naNiK`A&%wffqh>fJL(o zI73gLO^|vCJK+RXE1uu&t=6PJ-v4rHn(5lEx~-I^O@2f>eg#-eZhh8=|GCDrZe0^8 z-R|`sa}(=VVDy7od_EIV_y+7$lBmwb9PN$2-K?|0^bAT54^=XxULsj^mDC|!mxQ9kX?~L&XpqTe5M)Ehhn|;z_ z|KQc5ZD^lP?H{sVV$9!L1*b;(OaQNRH=g%$oBAun$8A z;C>@CGaZ`}!b%{%@-Q!}0|pc-UbT|sI%$u6KCIR$m;1DuU0I2H)T+sgIqalQZL(3cF(BU_~j1qF(h;<`E3Dm_z_9V-NG3@p?1zzi;JRm z-ZG^W5D!YLiU(C0jQk-sER>7e^%9%FB~Uz-yiA9V@)KAIxiH3Eh18@GQnvq` zE=82LO5a|0A9RTuT=m9~kMu#MqCQMwy6X)CBGOL9*w6u!1otPQIyL2VR4ZQc7_3w4 z>UL3k{OE%wxBmMed!g`jEScU#U8APIsj19?Vd%v_Ue}@NGwLtyjI-H$bF*^~Cr`=C zBb}t#FO#3APZ=zfm$=a6X6tWjZpepftio}A08mA6bZt^Y?XGMR)+iIsMIb; z?t}407eW|McidIBzr|oux((vDi(ADQiJo~-{-9`-4eEoj5;Kfdon$e*Y)T~*YU-`I zmFo%zU8b3k|AC0-wGnF9{6@L?Q)$0&2A_8c|0rSH)?oYiC_{|Vs5dh-uLt`T$T zr#9~SqT6Y#Q}uO;m+A{#NS@LIQP0-wE&1j}QPiD5COFTQ%>qp}7`PZpgKNPb(wNZ9 z`%81~oTpl464l(f{>1LSt%oviz&YqpV642XOy6A_!GrXWWbQauGZh`@v%(YZ1ZxQ4 zR{s@F?ny)~uxsz)OF*Q8E9a06a*Xt(P2?%+uDZGK_rk9@av^IjR6kUZ@#9WbcaSS! z&gn_tebJPjlxuU6IxnU1g`ar!VhNhFlEr1PtDKahfdVR zzNbO)9_>G{zYsId9L766c8z2!o*ki7iv{f(95<%LN)_F$>Wwq!VJ|2GBPtxf(K61k zd_M;lRzJp=7KUyq`jkBBidyG58VWN2`56BWrUyqWkj=-&0ds;efh{@RpX{}CCRr!z zHBL3iV&Z`^iX)S+cho~tbk($3Gz1ljie}QxKj@XrRQKDr%jNMx=sr((os0xGNLvL$ zNI6tcJsgoTCP|B( z#Ma7sbc~CH=?&4kl|=sLW<^FUO$=9+#h53@#5a{JMo0wDzP>X!b0b6{054q{=N;wahwabkYtEg=E8*&*E!74TDt$=V^?e@C3hzxbGFf$c5tN3-;DtqH)n>o~hv9oLPsXht2egMLw1)OVAE0+x zNp%=%TJ$$aE#9gcXf2d9sO%vm!%?Sw_BQ%vp5|W?S@U^Ksk5OS2L%V#zd zwl=JK+)3!0_{99h=(9VYBlbEkwy4dLIgpkSSIqR(;*(v6DpD>~tR`)M8fPiA5bN2Y z%`{cGvgVzB`+|b~l7j)&n&;ozla*99{BvtSHgN+ns8~muzn-?3k66A#mp0)4tLZEl z+6!3iya(&EM4-5r9vyB3POOMxP#KyjDi?ryy~J?H*{?0%Wqot=5- z89TaMO1&acWjdlgfB7abJAC@i>Qn9VO)2XxG1rfuUKdnsS|a2Ne~8E_bJ(=B$4=;b z=j-cdeJTSb4>TIIhPf`ZS?6pxes|umvd(4iEf?&nAq9EtF5cjHj7ZBa(ubKgC1S?r zev6}VW9NAwGiY!OpuFo_*t3HQx1CeL-uElAg8S+TOtTc=2wsiG;q>2=#hG?dos=A3a zHWjVb^<+|YWaM7ETynKE@EqFr16h~`(&Q#>2pbN}$Ck+JJzd9lLikpBR7!3qNArpD z*sBoV@>5NnAj6#S)EV&GdY0$=-_UO)UXO)^_NeI%Jzj~AB|YnL>m6OI#lg-PiTg#- zmyyY{A7VTBs%4W6S9bUybySIecCcm%yfCka2>Sg{IGemhr(6RAe1@dTr>gU50) zoGhcy$653D1+tnt{A0`X5?Qx)V^iKH#axn3mrBh*n_+d;1q-77J2-jc@9zzGZ(ky& zyvYHTSyRAXmbyyL1TukSM(==$YHS$8c&~GpzEN$O1h1O!Je!RI{oKTtJe#eQOwrxF z4g&{}al$HC3As|8Hkk8yn+i?AU-jJQ(8y?G2&birG8qoQ8GNDu{2JBYf=3oM;$1mL zbiijT92ZfCW`rq2tG_D1N;cPUMgX`6IG0_!8V6f)Xt$_X@t`PR`cw}PrCLuk@=fPY zpQ$g$^k5a-?pkW+RYyvDC@aSqMRRI!Srt2R4u)48h2J$340s!27n-5>gNuckswsOj zv-$UYXTIx>*0SGy)$(CCPI~tqCLqzJ*CFc<$^esXi8aSIo#wanZ5&1$m7tCM5V~Gm zlS9`+X;_DRwvbd&FU&_WECaZ=`rJAO5Xlt68A87SaH1KHBvX+ny&> z_1P_49kPXCrTm154Xtb>%z4pdSItN>i?C4wx%S13o)w8#-qe3Sd(9LfnzQa>exg+h5}@VCqj^;ha9#)7)aCUsMb1%Slh)$T#f=W=V3R7d@sV z4l2hZoGp8AyXi^G1q;k>-v53NoS>(sIi4+xQaO`Dt&n6I# zC0r588_<)o;b<>8gHpRe219r2-mPbPw|M*&ec3Mde$=VXS^n#-r>{SG4_D2ZRu0Af zK4#d@8*Gx3P|r=OvoMMKGM%Q!`2_mdJno&3x&!xv?Bx z#AVRO7yO!+I`}9wG)lv{2b>989qkNS*jatK5AOB6T#K&2%|iv=(K$2v>laN!s7A<+ z*+f=l5VCH~82iKDMUW(}&EA-9ce_BujAf&9a}0f(=%Q)Z^?_Rr+U{<@J%5|cNDJ|k zy$ELj_?xY4>^vNhp7_J6bYu1W+7Btl!!BAF~Rzg08S zt@-o3|3Jws@xXIb{(@r+IM1Q?0#09%#9jrKzT^=!5cTA1X#NZ^qI=(~>b*yUDrt%O zYHNh%DVcf||0ALKqCJ4B?Ylu|*JqW|j*@)pckRhPc52CA(L6>5X#0^asd7r%a7NG} z6b^KfN`pgRVC&$Gn9FGGVT|x^x7N$Enn#8pPes8X_^-H+0!22bha5_gYNfM{ays5W zs@}&eu+CUMQK=fb%Pq=CDo_?Hokdx8<0rcIiYH3x@LkLL4*APgGx$1E43Xe6DRjSR zO8=3w>_)F976kVq{dc)ArS0ldV*T_2!va!d39vVido59@Bg!Ir-om?dCVzJ9oa zWK6;0kkxv#JI&YKdEMtE{ky%y!TkLr{yd1_vgQIEy7wjYrBbR>p@f4ZoWtu#nMEm; zXdCnu9mIwq0czl3J8lK(f_w{%C+;7cmP?wyp&5%|WUc-fk@!mUaWkg9#M);c7jk1d zK8ncc9L@J(%jY5RiB2_$YuKgN8huM+;MX2jQd!&*AQ>(JP0A@Oq}=l60-7f#?ukQA z;4(MBt>LaDg>zh=2wm9bR5}&Kee;wbfkW8m7F;#3kAT2bYp1N5zWC&g11i;yJE#?O z6P{M33_~}b0d4r@gh|(6`TH*QDmpH))YyY~1)9++G-{oH6@T@Kxfw8>5QkbvhYeB9 zpV|*B0gU^?0fZW3nZ!0vLLGO>J2U$54_L6m@r*_@N@ypjJe1>>!v;XLRwZwGd;uKo zxDj)pGOY}+Dbpy!)ZPK`To-@^c*wB-K8$Ovuc8;WD^a9}S=6RQU#Kb+9%bgA%l^%P z;M`+_R_E_hJdM6ICNI8KCqlKy;G$SFM{@GWi;?N@UX?wDNk+M!mV(6t zs|4K3SC0G3Y<=66bQbmY6InzpE>t=jdL7p ze;>PfOnuaM_74I`18PQ>Z=3!rMLsz|&lKkC^NuezNuO;io$Y|;;yqFPBg&itJ8^k-A zjfVIH1xWDTSG)s^F`M1 z$U*+$?^C{`mIfCPYlSo#GU*cG%W{Ot$=#hSM2rfFp=}`E@${i*|Cnb?>qrV94x6V$ za;JUQkY#s@D;hbi5Un~&CB5^uqiG)8?@R7Au2=>brn~Iu{bO>YI|GYrk2k&L{6$GM zSI7q2hz(H5TZ-X_Z*>fVpE3XFm}NyVW9!J3I5;1lE>;s}xn$HMb)dz9J!15ppQ*cI z9dbA{9`atD<-NO*dM?77;l%cuOle2?@c!SpoD~oZb=K5!{b9^jZtuhQoE5(~<;U|f zFI&m?TA%W?ss{N=e<9p?T;7xUyZ0u$96$QW&m(c=jb^)?8RI>)w+VI+%G_HP&E*V^ zUv=q=%;B>E8YmOPa4}JJaY-R|fr|Jw{KqyxELvwn_q7&+{%Y_B@`O;mr~!2Aw8 zf8*Mp=iP053@|~g7af)rZar>ASsCVDtM$4ZTc!2cLe)*;$5T>6j`fQHQ1JNYl8R93 zDy@z zr@4&#EOTVKjdrtg0zRiP;vPnpmqLG_4EI{+qyGESj%J1Z0R1zis!#eMSTm7OH@`p3 ze4f_y$4TZsAlM}(SyNZx@($bIzZ#(TxGL_eb*+y^-4r-W&=KfQQ(_iK`VCrCoHs(S zQi3g27(|k438`vI{$mW}kT#`}_?DK~7fHc&>xK#}qvKSMq=n2#Z$Ye-5BlMoj?F{7 zpLmBxy7QE6sXi6n5+&=X78{ann8<{ShlOh~^ObM++2P(W`6SX^w z=CG+p9adBevf?$_VgeDP%bnbJPHUUQZL;8*egibMUQ9N=uAKP?Pnpp^dz;@j>Cdj5rR4zodlLPq z1AUx}5Ri`8+@b6n{Rd)uiD)Y!Puf(q62nFYh%HwfIjvQ){MYh)I+@BndDQGn2H%ep z)xBz^UA|3p}hMT_*Ka27c zQv=$Q1>pkZyFkW7FO(OOjaAU|IT=+!2=CaEAR}d#w)6Mi{Za!bVLkj(TwT8>Z^Ot- zjhLn?<>wVDfZqout_JK!rZ}L=q|oFn#&Jtm%6Gx0%8IWFw;yiL6xplQ^UD=&i=)9X z+HhAqO}@FF7QuUdH+~wjS|fT2PdJPzJ&4Izep!RRI~uH3vxK!?J_{7gGP+FMQ8CM9 z=NV{e&Gm+#U6xq57nw-Ea9>mkx2!-|o?gAvhbN2GB+TblD4(duTx=WTm6!7WI7uYC zbk5C))!>V-Z!GHh1n#KSz`qeOyLkmT#82yu$uIVoRKNK)-oAd|6G;7H>dkD$TW z6jB+P@Yj_Y)#Zjs%O&UGGv8udT0b1(Tg)O*+FaF~)F$=Rpmk|82}K^e!+w0fdn;wt zt0thSX`MZ9p5m=coEF>#$4&={F%u5A)rl8zo}JAo{i@Kb(rA9lpc>`Mn)dMIKNH7V z&-T%-q@X2>6aF^6$!q=yK`n(lptd(p-g7_du`^Q5O?FX2y7Vbhf+lATujPkui}0rB z@0{<7adzg#6GxU4r)9clMGK*)s}yuLpNyxs2lP+OF+2r5MpU!~FTiw&N^l+D3Fs+! zrlhpxt3DpXPqVu8YVByjkai6QMAsGF;BUkGq8d_ML4eM6x;A6 zjj+q?fPqN7ls4Z?rcU-xS{t_`18#c4f1n$EN11)0pZ?5~JNC=Qt-e4hFvQKuCN?ZyfCMhjb_lfGZF=rix_o1ke@R zL^b0c*G!tIE6pv@AgN1l*O=fK)SV4y)?6xXbH$pcgwAG`e;6}4tygYTsi_d?k}au%x;_!5a$Vl-;pp>=)oc*dJ<$dxg1ppHWk43T;N)SE(`VM2P=5 z{#<0mjiBBF0l_0;8qhw!9{nY2(Y3GwXJK_ED!aK6XIT~tw`hkC=t<{U9vo?t>}Nsa z8IPyCVCBE}2~~dRZJ^T$;;74IGu9C{OeZ{Oo_CfzbABTJnpL`K~NFY({ zh(v-(1x5*5<=3Wbz;Z|vJ}FK~uO1fOaPC7+3eFO2m5c?>)Lig-9T^gGgZ%QH@$4nF zVxH;8t{mm#27Dv8?B;aFp-1Da+S^|Wr4z)SJAyS4WqPVipKXd$uwLWEe!P%dEPkfE z8jY7q3iNa+*dLLA=6%ylrVDgCR_A8=*sVR@rX!7Yz14mIKSL6pEk`I}%0$m`0Lc}J z&GRGi%7*E8bsvlIn|tEmgu=FPLy3)IhMS? zmQOIB7Cv{W%%a%6^?3=fY#_bJPuN}mWYI(S9ltE>QGkN`VsJJ6U6F>pPBA2V8zXu9 ziX`Z4qPa+O#-O{v-2_>C*sqAIEb1J2X*Ti6ooJT#H=Q8>BGj#wc)inMPw6@QUb!N9 zlaJIxhw*zU(U&@K7{Ug_MwK7@QWJXYVx)O++6=_p#FK7N{;s=YE}M!UofG0N#Z5_w zWE{S`jurN%+Yn6Cybd?AW<6}z2?di^7r4w+qnYJ#o{}d)dl;|ZMXHz#x)g_>%HHr1 zZxFY|DBqfC{`4ADxIU`rp2BUpPOCHOo&ALYwf^oL!y+|kNZg7r>XnbzYSF0jj?kQf$PDmpbUxzGxKS)cX44_+e zA{FGeSvw=Z?c{(ZO-|8t{Hjvx3mnk;C{4~PdE@;Ip3oex$d9mf3^p@mPl~v#9oY?FYHZR zklvgprP#I!BouDxbU+#^o7|6(Ju^ix;l01&@Zd*Ug&sMMoI|_K2?&%Ll__iRhx8-( zG$wY>Q~KH<2K?eZ5#$lD&-BMNqLTScH|Hvtm6&_BQpu_cGL^3_s$k%bHIQTIK04NZ zyhJXj>T~crlEWq&zMFc$0smOyO;#H$kmLJ3aqB}w%WA-y@^X`er1c`}Tzc|`Xye8u zEbH7v>ImB|6V0b5WBd#E;fN=yd-!DNN)wfDlSs+7C7}U^;M(bFR2AkI3Jiml-I}sM zkBT+l%1SkGwal2D~KRE-_ z_#*WSB&rLGi3I#6g<9-gZ}eQN$=@N{|7<^96K;JbWSs+Gy+R)iUD1lOoa24z<&_ETXBhOmriJjsK=e`FC%*2c~WiJjdsD2y&Xq`7R(!tr!KbpeWSIKW|37Kvd81&^-7zo{a9Fhlr+DWsw0Eqr8>YsQ0Gbr1c=6Q`|%yQ@SB9 z%dC3kyI6r@{6aLpQ9I2z>^=LCY@=o(lb^g&9$vn7VLrq|CF1yJLwf+5zuD^tAKo~d zvwynA&PC@b!tsKvF+Z*Fgk6*qPRHhZ0!@kKbgWstWuE!=O*ZFac7fPlr}9Z#fQajnQ_7n7$GgHv&g_ zZv+>}&`qLi{(Rq|yUgv6`7la#vdPyE#nrWUVVHuHK0Yt9}<~&Sevtp+nE9LhXkr}AiyHAdp z176%1%Tl9US;|KXZ*ZNBG67{2KsfD5^#qPM4KcNYvl@mb0^d*gYL+|=_w|m}@3NI^ zwra3Q&KVKxIkC?5BHE7%Z_#c}mTNk|z451BO;PEEEWq~4M&CQ5uVd=HFZC8py-Igd z?3Xm|g4F(Of3*pEuF#Z(YDsc)?p<&%vZY$5rvC2OTMSP%8zT7^B6mI^Z@M@8c<-up z&0DGsag6MsNMSCbJBw%xTV<_jwzyGC@6Z6=Vm|XW1Hbd=SkB?L2r0+%xVJ0DTQAaU z4$p%ZVRb`lTepq@J!^)048@GuD5{Uz_mvm7#z1#ko>_o{Z0Kt2gsB5z zW4ZqrWG_|(1Q#Hpl3$;n6L$S7m_hVvUwcc%I%hcf(MGEROQWJELiyHylMU6hI*z|j z5?t}dn0l0uon?aBe)FY&9nKdjCBUxGZ1r$_uK?ATV0jiHo1VI8{=DxXU&hsbmg`WF zTyWxatX$4jVJsU!(M?ef5b|ckGto9H#xbCMc{1Ak+)1gSx})?1p=B9hV`LE+E;p-D zVDQT^y5W5=D*-sU$t|+ww>5C9|L)_r*JZTR`ywVYkLhB+P=L9W)<>K<#4e=7F`V`Y zwO2B9D(_3NuH^lqVp{+sUaO;lyE8J{q;6Cih#74Us;K)YpYo_(!MFrAc)G?>0LYnx zztKIGNZvs;W^=-V|4H{oYG-|+E(Uk!`=*Ig7lB_(|qIMheWOe-^6*1Gy=W-5tn{S6w(5j4Z{-1Ip4L zcc%Po$?zC0|6I@#->9oQ<&E~fgO4B;MmQY>k0eHd-#*h`{GM?`7tR?~hf`+9b9lip z$bc(GP=qQ<4~<9c%gI-fPDiRDK^p7w#2PKQ(VUY0HU%V@J4_=!uc4-^p;0+oJd@k4gD}J$|M{M-2-jA|Ot3g-K zAw9Fd7R(}fYHwuTa7WiP?>|B6d8nV?J}nsxjlhZVf0%BAqbmBlF^SY=cb*{>V>wu- zLV#&mqh%sEUaf79Y*?I7{^~h)$Pt9UNmUOa>k-dSdhY|5RG5t9| z9;)Z}4#fCjUXU&-?SIB~^+p5*2p}u?OJ3x!Vg6E`mq2-0*Q4xBe}|#c;+<>xeO8=i zvK6H~rKDKHIYE7=MMHYEh52KQhRl2GMQ^G3;AUI@ z1OI2E+FzT;fBZfedW6Ng@1Q?N(hP)*AEXJyB7s?Tf>wV$xga(fgrrecQ+=JTDJEj9 zq11JqdJigcK+@V>y}P#%mob^H&!23+?JU>a-xWLa>pK$`Go&0MXcYx+Y)nqnA72A5 zs91NiI=wPSmRGp>>s;oK7i`cVa=EmR1bdiyK%wPt$Z0%uh><$kp$a&d?}wE_kpuBF zzqZB!Ea)Nk;Q-O^eqSj&jp*}f^2i_MYc6!+v)KEG?wfQp_zC`^Msgx!o{-|27+I`pV?WM5F0oe||iM(b_}O~!P6IY}5U z{H4~NpnB$|A0=>@vxMjFV@V9!Mh$WfDd)u!2i*;{s1;RlI@H!-s31i->PK)EL7O)_9wTeX9lS zk=+3JW=lX`%q2%z6w1!q=@>cVoQ`BZAr)h@oI&y@9qy^D9R+E_~^GkqdWDoI7bsBR1 zL2;y8)XOwLQmVA0WlZUH%_X!c7inFl^yQMLptMZWcj%~56poso78W3FggZJsckSMMF>Ngj+?W z{TojX`o)UXmBAth=@C!lJ}houm)0)Uihln~bcx?>ot7anbh2 zB0XavGNvHy^xP=N5r503dDv#5@nO&qcVgRK^jF+=>S_gExkzWI9>cc2{|)4gQk!UF??JMt zGB5DlHC0hQD&P#jGN>)L)E;~stXbJM6X^izf~) zM=UyLW^Voph6sOBPTDIRM@FBLs%(NU2rMe!EQ;aB*h>_)b2u7^XpH^at{XWvqW%HK;#k`Kr7<36`4Supdt~eDhmXuh_x)b7j#7SN$*6gvEYvJaf2D zy&CX)BE4e@%y(H?@fL_uenj0wis@O)({96pwr;?#l&47kDQn5YZ*_qA^yNjwf;H=j zbQ3BKj9yZUpxb|Xe7m*NUS>J{fw zZ?Pf#1ZY5=Pu_oPSMz5p^xoU$=#r(B3r!r%EHvUT*@}!!da&c4B!|_XZ$1BnYqwBr zj5XV*PH^w>E;O5s4_7d>={VI0ne}Mb{X_8O#%j{QQnXAZN$HSPXuvNgpRP=0m&S zP~CZwzZ#QA8LHoO2z(FS&OS$jEQV1sFLN`^dYf2ST`7Li_RuFy{ndU0%|Qezo1z*W zYWu`j<~y@1r<1I@3X~qRSB90ENT4C7T)Fo5p06dZNtIl%2h;FPc{L|Vwfsm+%p-yZ z<*YRW%}&u8N^^`c-)~z*diWnFZD1DPMWr%h6EKS^q6REf;|osq93mRrblZy8LQ52R5Y9@#ltT{@1`(I^YnJJ|Bo(P0@FCbah-yn_ zk{5gzG>-Y8-U%dsvWIpG++wjdTIBXQUSEevV_&$_c^_hZ_LG(U# z)z@1Sl8^@nFbj2lsn3(w)gyEcKzRL*Q!lB?>%-dkkpJsJ5|)<|?t|N=Sszj;h&{2+ zJXAU%bV(hLqj9V4wXS`gg0o+>CpP_%zQGypkJA^P>*|mCNqOb@ANA;2gI!r8*A-=m zYuR;J)+8`Nx~2Aagudmj36=|V9}{T_!-u^R4J1O`?D0)_`5$|n=A=R1tj@MF_2)E zB7*=V&qdW>QiqiN-RQcC&H4dTHh^In4G)+!RV#YTH;SHXNNt$flI#FU8FA3qj_(^4 zf09;zW=MP$to!(dOE1pix6Ps;-9=kg?pWTYqx#ji)gnt*0qt~yKany_xBM^ z5&w{5s|>cc4!rm98w0NT)yOq+JMyY10o=+U#)gOoj|t=cUK1P*&IOq*aP-h!(X1?H zW|(G!{JC`mID+Vl5v%IimH?U^ofGm2Tt}yVg?z3N*ED0$rqQPb50qqy&pFT82K%2w zmEdJ;>xiU#WD*Oywn?7Ee2%h}H8)nL=UtE!NJ(38N72qv2xW($Zr16a zArsL!b9hn3{IFVKuImm9tw1m)ptKW-GTs}6XNZ#uU&qK-AYa1ZBlf&&9_mPv;^?gQ zLeurxH5CG!-dc74P%gmu6&iIC=`Ig|TmO$YLmjnj^DMaEDN#BdAz zm3=?6!JewcMl}a;(2CraFM_J%QSx8f%&^BUR96ca-YHU5(5kkmQMizDxV9>Hc=<4QQDQgQC#|70zcQ$)uoMj@xu4kY z3hg1ten_j*kHh_a+V@nteXWaSt-<@Z3XH_a9-ol(REYpy)gEm$`7cqt5wr;2LhqY_ zh(#G|oQRRL<)A|!8LtH|q@?wVB9R)}N9h39{b@3il9{NxP?ro*3L^wh#1B7a-#xDtO*6X=9LtEe@|7WBkf0xEc2yXNn489K=2R0-v;--Xk;`{TKU z*N-=bp5BkzYA`4|k#rer@$g|(Nj(>teha^2`c5rXQh}WTt}Mp^W}6 zAPNd`e*^%~xhaDeU_ZgH1eHSGebpv`4#B|c5^^b#V^~-~6cyKv`^tkOX~mT0lV z%Ll2YTv3}MHp8laqodr02_Q`hq58O#C13LXPO6wf!@CM7yc9S<8j?ut3Wb2}X7~<@WfZ zwCwR)<`-B*7+)#pXpTlhO`6UchJ_&3;PZZYP>%X?zKk`quic1B@c*Ia zZPLCD+PD!F>_KKoUG{-_bBv});kP%5AvP^fsJY_H7S~%JmrgivA^h?y5^liaH4}2e zGES5XuU)lRD#Q8u=QXSPe^{|!P|){qs6JFcLWl;5ac@F4lLNU3Qo5QIrmAj%QT5^> zIYbv|A6R!W!>zuk0w)UnV(=v~C+D{Uxl6V>1+SxZvsW7;ONA}tDmikToIS387D;(}M{ zQX8}z6xM!z+Ov8B>6Cp5uYZm<0fjmb3O6t&7ct({;QV}3D!JF>GSxIR(lTCZlpjVnfn)FM_RC0HlU7&v}{bIN)45h$`ECyQ1D0+x9 z6B^Ya__Lyt$&e@<@Bj8jF~r^+tE06Iqm^AWC1D?U@J2YDR53QDyH}^&89EJQ>lt5VMNux1Dk3d?J^mvgB6IDHkzE<#4 zbwSLDoCMt?%O(OqVe03=J%LU*4k=KB;57X=Sf&gKCjdsDjnx&k6y1^~uvS&Gp6;rQ z%PnS%UHd)bPH9qwiAsE()h0sE-Q}y1uej}WA!e{Z=m<(rY*nN`YBlmjX&~)4vg)s@ zt~HL~SrTl@`Er%Tv$J_|4Fk%>H=)lr|Gz@rh&uYhPkfvla6}6k38RNJ9OBj+8r?YE zb3-c4LJ8{csxbLC<$!U8%|$d+I!GYRD7-Dq0#YC`^6w6Mo%8~&{ss8*Wg6>mxZL+w0C!jX&ssW2c|&Mhws79!-SH?FM(I_rUlFzF_Yg3`cw zv}*ny!-1J(&o^eB+`=JkVA%(2v{K)cD``;BH#Z_BR~GeuUZSvQ8Cp{^<~R7$j()b_ zhndO>N(Neqwu=z6sR$w_JQW5;;6!S8Hk?62=s0imVhVEz4THw@F6QS3UK#>qX0E^* zQhnZ2Y?|hzYCkqw@CP)`OENO`-SIv@`cWT(Xej73P{o55o#^}EXFODbl`bCC7%+35 zJfX5BX~AZ?s$`g@HGeEbzTycitImrs?)T+?gEZ#J`4~U0RZ=#)G_^D|)$s-qPtu{r zaDw!Eb*Ia?aa)3nW7uR_k8oQME!8dcxejGyCvaE_U!|jr>FmHVKC(3M2QsLJ*boS< zeGe6IL0QqR?;z(fgjpX5Y@RRvQPr~2G(y{r zC&krch)eh{9JyaW;{ZgaS$F~N6lOSkz`fIqtk8)3EEqK6=FBZNIqT)7wis%Txsm`P zB>D~Iw8|VOK}}nFkh&?f8gd{ldq4tQ8ObJ4vDBBrPQeL~@(mc7jhRDA)KOBZ4uvS? z3Sd>Y7;ye;CX4oh(18%&3gB}z6G5N$62-;G>~M9QXf1f~dCwL2Sw zW40w-VpF<>kJ~z$qU!rm-LE1A3qr;{iUgFnV%5Mq(2)U{T9s0iLoYI5-Gy*7>wD+!2kPswfH~ zz;9TjiU2AJ)e3AP6G}cZ?e)g<6V+DNzdL zoCA51+L*UL%&Bd41YKRB?W2{+UK0TLlc!Vgbg*f>abhFOti&N7m~0tBX)9+00wGHA z^9#ICjs)YBHwAl{Pg8*(FjvlpNiK>LvM2X3do?1@8-)jr5!>qnNni!b{mp-8y!5-n z{z17wj2S)z#|SL41(D;lBhj5$3u_TMt*Xhn;NPVaE(`Xzmm06Xs=^NA3R^TSG%*+~ zj$~piE<{_J84i#5Lok+5vSSCm(AXt(-UE1|w7y>M$^rAGT+j^jrN@MV<3BVG=h0LK z*j6AY4n%OGYXs>1Wo~=HMOsw;yY3U0Y&1zQNfCipW#TWnnaWLyr~75BiLy~qI03MuC{zH`LEU5VLb<3p=P zjLo>n`$oG?pu<>=lM_nx?{7u^&)gxz9{oIID7q7Q5Bkg)3u0prT0y8nOlu)dKx~T& zwV9`9R8EIi{{BFsswQIvQ^wf2X0c}26b?osK_*aUDK?o0XY4Cx{sY-UzmgxUozj?) zJf|)81)=QpB3#!mj_jaa$U9i&p)!`uM1%K8-)M#p6aCaHOm{f)#8JkyOfOeYLc4@z z{)>r+&TYz(&F!K@q=G*~)G`9tr3a^QKN(jBJ@yq8N~GP-{=J7wDcAL+%8{^ZTAbIz zFeyIc`0sfk)W3(*=cJmxJJ9zj$gWRIb#Q#PE7BI8$f!dNvpy4B3aj@Gy6c2g2qB?XJ)PcQ8tFYQP$q HFTwu@rHlVn literal 0 HcmV?d00001 diff --git a/docs/zh_cn/conf.py b/docs/zh_cn/conf.py new file mode 100644 index 0000000000..e0e68b6018 --- /dev/null +++ b/docs/zh_cn/conf.py @@ -0,0 +1,135 @@ +# Copyright (c) OpenMMLab. All rights reserved. +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import subprocess +import sys + +import pytorch_sphinx_theme + +sys.path.insert(0, os.path.abspath('..')) + +# -- Project information ----------------------------------------------------- + +project = 'MMAction2' +copyright = '2020, OpenMMLab' +author = 'MMAction2 Authors' +version_file = '../.././mmaction/version.py' + + +def get_version(): + with open(version_file, 'r') as f: + exec(compile(f.read(), version_file, 'exec')) + return locals()['__version__'] + + +# The full version, including alpha/beta/rc tags +release = get_version() + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', + 'sphinx_markdown_tables', 'sphinx_copybutton', 'myst_parser' +] + +# numpy and torch are required +autodoc_mock_imports = ['mmaction.version', 'PIL'] + +copybutton_prompt_text = r'>>> |\.\.\. ' +copybutton_prompt_is_regexp = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This pattern also affects html_static_path and html_extra_path. +exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] + +# -- Options for HTML output ------------------------------------------------- +source_suffix = {'.rst': 'restructuredtext', '.md': 'markdown'} + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'pytorch_sphinx_theme' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". + +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] +html_theme_options = { + # 'logo_url': 'https://mmaction2.readthedocs.io/en/latest/', + 'menu': [ + { + 'name': + 'Tutorial', + 'url': + 'https://colab.research.google.com/github/' + 'open-mmlab/mmaction2/blob/master/demo/mmaction2_tutorial.ipynb' + }, + { + 'name': 'GitHub', + 'url': 'https://github.com/open-mmlab/mmaction2' + }, + { + 'name': + 'Upstream', + 'children': [ + { + 'name': 'MMCV', + 'url': 'https://github.com/open-mmlab/mmcv', + 'description': 'Foundational library for computer vision' + }, + { + 'name': + 'MMClassification', + 'url': + 'https://github.com/open-mmlab/mmclassification', + 'description': + 'Open source image classification toolbox based on PyTorch' + }, + { + 'name': 'MMDetection', + 'url': 'https://github.com/open-mmlab/mmdetection', + 'description': 'Object detection toolbox and benchmark' + }, + ] + }, + ], + # Specify the language of shared menu + 'menu_lang': + 'cn' +} + +language = 'zh_CN' +master_doc = 'index' + +html_static_path = ['_static'] +html_css_files = ['css/readthedocs.css'] + +myst_enable_extensions = ['colon_fence'] + + +def builder_inited_handler(app): + subprocess.run(['bash', './merge_docs.sh']) + subprocess.run(['python', './stat.py']) + + +def setup(app): + app.connect('builder-inited', builder_inited_handler) diff --git a/docs/zh_cn/get_started.md b/docs/zh_cn/get_started.md new file mode 100644 index 0000000000..658a611f8e --- /dev/null +++ b/docs/zh_cn/get_started.md @@ -0,0 +1,3 @@ +# 依赖环境(内容建设中) + +# 安装(内容建设中) diff --git a/docs/zh_cn/index.rst b/docs/zh_cn/index.rst new file mode 100644 index 0000000000..59e3e49b53 --- /dev/null +++ b/docs/zh_cn/index.rst @@ -0,0 +1,64 @@ +Welcome to MMAction2's documentation! +===================================== + +You can switch between Chinese and English documents in the lower-left corner of the layout. + +.. toctree:: + :maxdepth: 1 + :caption: Get Started + + get_started.md + +.. toctree:: + :maxdepth: 1 + :caption: User Guides + + user_guides/1_config.md + user_guides/2_data_prepare.md + user_guides/3_inference.md + user_guides/4_train_test.md + +.. toctree:: + :maxdepth: 1 + :caption: Useful Tools + + user_guides/useful_tools.md + user_guides/visualization.md + +.. toctree:: + :maxdepth: 1 + :caption: Migration + + migration.md + +.. toctree:: + :maxdepth: 1 + :caption: Model Zoo + + modelzoo.md + recognition_models.md + detection_models.md + skeleton_models.md + localization_models.md + +.. toctree:: + :maxdepth: 1 + :caption: Notes + + notes/contribution_guide.md + notes/projects.md + notes/changelog.md + notes/faq.md + +.. toctree:: + :caption: Switch Language + + switch_language.md + + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`search` diff --git a/docs/zh_cn/make.bat b/docs/zh_cn/make.bat new file mode 100644 index 0000000000..922152e96a --- /dev/null +++ b/docs/zh_cn/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=. +set BUILDDIR=_build + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/zh_cn/merge_docs.sh b/docs/zh_cn/merge_docs.sh new file mode 100644 index 0000000000..aa2a9bebfd --- /dev/null +++ b/docs/zh_cn/merge_docs.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +## gather models +cat ../../configs/localization/*/README.md | sed "s/md#t/html#t/g" | sed "s/#/#&/" | sed '1i\# Action Localization Models' | sed 's/](\/docs\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' | sed "s/getting_started.html##t/getting_started.html#t/g" > localization_models.md +cat ../../configs/recognition/*/README.md | sed "s/md#t/html#t/g" | sed "s/#/#&/" | sed '1i\# Action Recognition Models' | sed 's/](\/docs\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' | sed "s/getting_started.html##t/getting_started.html#t/g" > recognition_models.md +cat ../../configs/recognition_audio/*/README.md | sed "s/md#t/html#t/g" | sed "s/#/#&/" | sed 's/](\/docs\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' | sed "s/getting_started.html##t/getting_started.html#t/g" >> recognition_models.md +cat ../../configs/detection/*/README.md | sed "s/md#t/html#t/g" | sed "s/#/#&/" | sed '1i\# Spatio Temporal Action Detection Models' | sed 's/](\/docs\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' | sed "s/getting_started.html##t/getting_started.html#t/g" > detection_models.md +cat ../../configs/skeleton/*/README.md | sed "s/md#t/html#t/g" | sed "s/#/#&/" | sed '1i\# Skeleton-based Action Recognition Models' | sed 's/](\/docs\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' | sed "s/getting_started.html##t/getting_started.html#t/g" > skeleton_models.md diff --git a/docs/zh_cn/migration.md b/docs/zh_cn/migration.md new file mode 100644 index 0000000000..90ab61e459 --- /dev/null +++ b/docs/zh_cn/migration.md @@ -0,0 +1 @@ +# 从 MMAction2 0.x 迁移 (内容建设中) diff --git a/docs/zh_cn/notes/changelog.md b/docs/zh_cn/notes/changelog.md new file mode 100644 index 0000000000..1c96e5161d --- /dev/null +++ b/docs/zh_cn/notes/changelog.md @@ -0,0 +1,850 @@ +# Changelog + +## 1.0.0rc1 (14/10/2022) + +**Highlights** + +- Support Video Swin Transformer + +**New Features** + +- Support Video Swin Transformer ([#1939](https://github.com/open-mmlab/mmaction2/pull/1939)) + +**Improvements** + +- Add colab tutorial for 1.x ([#1956](https://github.com/open-mmlab/mmaction2/pull/1956)) +- Support skeleton-based action recognition demo ([#1920](https://github.com/open-mmlab/mmaction2/pull/1920)) + +**Bug Fixes** + +- Fix link in doc ([#1986](https://github.com/open-mmlab/mmaction2/pull/1986), [#1967](https://github.com/open-mmlab/mmaction2/pull/1967), [#1951](https://github.com/open-mmlab/mmaction2/pull/1951), [#1926](https://github.com/open-mmlab/mmaction2/pull/1926),[#1944](https://github.com/open-mmlab/mmaction2/pull/1944), [#1944](https://github.com/open-mmlab/mmaction2/pull/1944), [#1927](https://github.com/open-mmlab/mmaction2/pull/1927), [#1925](https://github.com/open-mmlab/mmaction2/pull/1925)) +- Fix CI ([#1987](https://github.com/open-mmlab/mmaction2/pull/1987), [#1930](https://github.com/open-mmlab/mmaction2/pull/1930), [#1923](https://github.com/open-mmlab/mmaction2/pull/1923)) +- Fix pre-commit hook config ([#1971](https://github.com/open-mmlab/mmaction2/pull/1971)) +- Fix TIN config ([#1912](https://github.com/open-mmlab/mmaction2/pull/1912)) +- Fix UT for BMN and BSN ([#1966](https://github.com/open-mmlab/mmaction2/pull/1966)) +- Fix UT for Recognizer2D ([#1937](https://github.com/open-mmlab/mmaction2/pull/1937)) +- Fix BSN and BMN configs for localization ([#1913](https://github.com/open-mmlab/mmaction2/pull/1913)) +- Modeify ST-GCN configs ([#1913](https://github.com/open-mmlab/mmaction2/pull/1914)) +- Fix typo in migration doc ([#1931](https://github.com/open-mmlab/mmaction2/pull/1931)) +- Remove Onnx related tools ([#1928](https://github.com/open-mmlab/mmaction2/pull/1928)) +- Update TANet readme ([#1916](https://github.com/open-mmlab/mmaction2/pull/1916), [#1890](https://github.com/open-mmlab/mmaction2/pull/1890)) +- Update 2S-AGCN readme ([#1915](https://github.com/open-mmlab/mmaction2/pull/1915)) +- Fix TSN configs ([#1905](https://github.com/open-mmlab/mmaction2/pull/1905)) +- Fix configs for detection ([#1903](https://github.com/open-mmlab/mmaction2/pull/1903)) +- Fix typo in TIN config ([#1904](https://github.com/open-mmlab/mmaction2/pull/1904)) +- Fix PoseC3D readme ([#1899](https://github.com/open-mmlab/mmaction2/pull/1899)) +- Fix ST-GCN configs ([#1891](https://github.com/open-mmlab/mmaction2/pull/1891)) +- Fix audio recognition readme ([#1898](https://github.com/open-mmlab/mmaction2/pull/1898)) +- Fix TSM readme ([#1887](https://github.com/open-mmlab/mmaction2/pull/1887)) +- Fix SlowOnly readme ([#1889](https://github.com/open-mmlab/mmaction2/pull/1889)) +- Fix TRN readme ([#1888](https://github.com/open-mmlab/mmaction2/pull/1888)) +- Fix typo in get_started doc ([#1895](https://github.com/open-mmlab/mmaction2/pull/1895)) + +## 1.0.0rc0 (09/01/2022) + +We are excited to announce the release of MMAction2 v1.0.0rc0. +MMAction2 1.0.0beta is the first version of MMAction2 1.x, a part of the OpenMMLab 2.0 projects. +Built upon the new [training engine](https://github.com/open-mmlab/mmengine). + +**Highlights** + +- **New engines**. MMAction2 1.x is based on MMEngine\](https://github.com/open-mmlab/mmengine), which provides a general and powerful runner that allows more flexible customizations and significantly simplifies the entrypoints of high-level interfaces. + +- **Unified interfaces**. As a part of the OpenMMLab 2.0 projects, MMAction2 1.x unifies and refactors the interfaces and internal logics of train, testing, datasets, models, evaluation, and visualization. All the OpenMMLab 2.0 projects share the same design in those interfaces and logics to allow the emergence of multi-task/modality algorithms. + +- **More documentation and tutorials**. We add a bunch of documentation and tutorials to help users get started more smoothly. Read it [here](https://github.com/open-mmlab/mmaction2/blob/1.x/docs/en/migration.md). + +**Breaking Changes** + +In this release, we made lots of major refactoring and modifications. Please refer to the [migration guide](../migration.md) for details and migration instructions. + +## 0.24.0 (05/05/2022) + +**Highlights** + +- Support different seeds + +**New Features** + +- Add lateral norm in multigrid config ([#1567](https://github.com/open-mmlab/mmaction2/pull/1567)) +- Add openpose 25 joints in graph config ([#1578](https://github.com/open-mmlab/mmaction2/pull/1578)) +- Support MLU Backend ([#1608](https://github.com/open-mmlab/mmaction2/pull/1608)) + +**Bug and Typo Fixes** + +- Fix local_rank ([#1558](https://github.com/open-mmlab/mmaction2/pull/1558)) +- Fix install typo ([#1571](https://github.com/open-mmlab/mmaction2/pull/1571)) +- Fix the inference API doc ([#1580](https://github.com/open-mmlab/mmaction2/pull/1580)) +- Fix zh-CN demo.md and getting_started.md ([#1587](https://github.com/open-mmlab/mmaction2/pull/1587)) +- Remove Recommonmark ([#1595](https://github.com/open-mmlab/mmaction2/pull/1595)) +- Fix inference with ndarray ([#1603](https://github.com/open-mmlab/mmaction2/pull/1603)) +- Fix the log error when `IterBasedRunner` is used ([#1606](https://github.com/open-mmlab/mmaction2/pull/1606)) + +## 0.23.0 (04/01/2022) + +**Highlights** + +- Support different seeds +- Provide multi-node training & testing script +- Update error log + +**New Features** + +- Support different seeds([#1502](https://github.com/open-mmlab/mmaction2/pull/1502)) +- Provide multi-node training & testing script([#1521](https://github.com/open-mmlab/mmaction2/pull/1521)) +- Update error log([#1546](https://github.com/open-mmlab/mmaction2/pull/1546)) + +**Documentations** + +- Update gpus in Slowfast readme([#1497](https://github.com/open-mmlab/mmaction2/pull/1497)) +- Fix work_dir in multigrid config([#1498](https://github.com/open-mmlab/mmaction2/pull/1498)) +- Add sub bn docs([#1503](https://github.com/open-mmlab/mmaction2/pull/1503)) +- Add shortcycle sampler docs([#1513](https://github.com/open-mmlab/mmaction2/pull/1513)) +- Update Windows Declaration([#1520](https://github.com/open-mmlab/mmaction2/pull/1520)) +- Update the link for ST-GCN([#1544](https://github.com/open-mmlab/mmaction2/pull/1544)) +- Update install commands([#1549](https://github.com/open-mmlab/mmaction2/pull/1549)) + +**Bug and Typo Fixes** + +- Update colab tutorial install cmds([#1522](https://github.com/open-mmlab/mmaction2/pull/1522)) +- Fix num_iters_per_epoch in analyze_logs.py([#1530](https://github.com/open-mmlab/mmaction2/pull/1530)) +- Fix distributed_sampler([#1532](https://github.com/open-mmlab/mmaction2/pull/1532)) +- Fix cd dir error([#1545](https://github.com/open-mmlab/mmaction2/pull/1545)) +- Update arg names([#1548](https://github.com/open-mmlab/mmaction2/pull/1548)) + +**ModelZoo** + +## 0.22.0 (03/05/2022) + +**Highlights** + +- Support Multigrid training strategy +- Support CPU training +- Support audio demo +- Support topk customizing in models/heads/base.py + +**New Features** + +- Support Multigrid training strategy([#1378](https://github.com/open-mmlab/mmaction2/pull/1378)) +- Support STGCN in demo_skeleton.py([#1391](https://github.com/open-mmlab/mmaction2/pull/1391)) +- Support CPU training([#1407](https://github.com/open-mmlab/mmaction2/pull/1407)) +- Support audio demo([#1425](https://github.com/open-mmlab/mmaction2/pull/1425)) +- Support topk customizing in models/heads/base.py([#1452](https://github.com/open-mmlab/mmaction2/pull/1452)) + +**Documentations** + +- Add OpenMMLab platform([#1393](https://github.com/open-mmlab/mmaction2/pull/1393)) +- Update links([#1394](https://github.com/open-mmlab/mmaction2/pull/1394)) +- Update readme in configs([#1404](https://github.com/open-mmlab/mmaction2/pull/1404)) +- Update instructions to install mmcv-full([#1426](https://github.com/open-mmlab/mmaction2/pull/1426)) +- Add shortcut([#1433](https://github.com/open-mmlab/mmaction2/pull/1433)) +- Update modelzoo([#1439](https://github.com/open-mmlab/mmaction2/pull/1439)) +- add video_structuralize in readme([#1455](https://github.com/open-mmlab/mmaction2/pull/1455)) +- Update OpenMMLab repo information([#1482](https://github.com/open-mmlab/mmaction2/pull/1482)) + +**Bug and Typo Fixes** + +- Update train.py([#1375](https://github.com/open-mmlab/mmaction2/pull/1375)) +- Fix printout bug([#1382](<(https://github.com/open-mmlab/mmaction2/pull/1382)>)) +- Update multi processing setting([#1395](https://github.com/open-mmlab/mmaction2/pull/1395)) +- Setup multi processing both in train and test([#1405](https://github.com/open-mmlab/mmaction2/pull/1405)) +- Fix bug in nondistributed multi-gpu training([#1406](https://github.com/open-mmlab/mmaction2/pull/1406)) +- Add variable fps in ava_dataset.py([#1409](https://github.com/open-mmlab/mmaction2/pull/1409)) +- Only support distributed training([#1414](https://github.com/open-mmlab/mmaction2/pull/1414)) +- Set test_mode for AVA configs([#1432](https://github.com/open-mmlab/mmaction2/pull/1432)) +- Support single label([#1434](https://github.com/open-mmlab/mmaction2/pull/1434)) +- Add check copyright([#1447](https://github.com/open-mmlab/mmaction2/pull/1447)) +- Support Windows CI([#1448](https://github.com/open-mmlab/mmaction2/pull/1448)) +- Fix wrong device of class_weight in models/losses/cross_entropy_loss.py([#1457](https://github.com/open-mmlab/mmaction2/pull/1457)) +- Fix bug caused by distributed([#1459](https://github.com/open-mmlab/mmaction2/pull/1459)) +- Update readme([#1460](https://github.com/open-mmlab/mmaction2/pull/1460)) +- Fix lint caused by colab automatic upload([#1461](https://github.com/open-mmlab/mmaction2/pull/1461)) +- Refine CI([#1471](https://github.com/open-mmlab/mmaction2/pull/1471)) +- Update pre-commit([#1474](https://github.com/open-mmlab/mmaction2/pull/1474)) +- Add deprecation message for deploy tool([#1483](https://github.com/open-mmlab/mmaction2/pull/1483)) + +**ModelZoo** + +- Support slowfast_steplr([#1421](https://github.com/open-mmlab/mmaction2/pull/1421)) + +## 0.21.0 (31/12/2021) + +**Highlights** + +- Support 2s-AGCN +- Support publish models in Windows +- Improve some sthv1 related models +- Support BABEL + +**New Features** + +- Support 2s-AGCN([#1248](https://github.com/open-mmlab/mmaction2/pull/1248)) +- Support skip postproc in ntu_pose_extraction([#1295](https://github.com/open-mmlab/mmaction2/pull/1295)) +- Support publish models in Windows([#1325](https://github.com/open-mmlab/mmaction2/pull/1325)) +- Add copyright checkhook in pre-commit-config([#1344](https://github.com/open-mmlab/mmaction2/pull/1344)) + +**Documentations** + +- Add MMFlow ([#1273](https://github.com/open-mmlab/mmaction2/pull/1273)) +- Revise README.md and add projects.md ([#1286](https://github.com/open-mmlab/mmaction2/pull/1286)) +- Add 2s-AGCN in Updates([#1289](https://github.com/open-mmlab/mmaction2/pull/1289)) +- Add MMFewShot([#1300](https://github.com/open-mmlab/mmaction2/pull/1300)) +- Add MMHuman3d([#1304](https://github.com/open-mmlab/mmaction2/pull/1304)) +- Update pre-commit([#1313](https://github.com/open-mmlab/mmaction2/pull/1313)) +- Use share menu from the theme instead([#1328](https://github.com/open-mmlab/mmaction2/pull/1328)) +- Update installation command([#1340](https://github.com/open-mmlab/mmaction2/pull/1340)) + +**Bug and Typo Fixes** + +- Update the inference part in notebooks([#1256](https://github.com/open-mmlab/mmaction2/pull/1256)) +- Update the map_location([#1262](<(https://github.com/open-mmlab/mmaction2/pull/1262)>)) +- Fix bug that start_index is not used in RawFrameDecode([#1278](https://github.com/open-mmlab/mmaction2/pull/1278)) +- Fix bug in init_random_seed([#1282](https://github.com/open-mmlab/mmaction2/pull/1282)) +- Fix bug in setup.py([#1303](https://github.com/open-mmlab/mmaction2/pull/1303)) +- Fix interrogate error in workflows([#1305](https://github.com/open-mmlab/mmaction2/pull/1305)) +- Fix typo in slowfast config([#1309](https://github.com/open-mmlab/mmaction2/pull/1309)) +- Cancel previous runs that are not completed([#1327](https://github.com/open-mmlab/mmaction2/pull/1327)) +- Fix missing skip_postproc parameter([#1347](https://github.com/open-mmlab/mmaction2/pull/1347)) +- Update ssn.py([#1355](https://github.com/open-mmlab/mmaction2/pull/1355)) +- Use latest youtube-dl([#1357](https://github.com/open-mmlab/mmaction2/pull/1357)) +- Fix test-best([#1362](https://github.com/open-mmlab/mmaction2/pull/1362)) + +**ModelZoo** + +- Improve some sthv1 related models([#1306](https://github.com/open-mmlab/mmaction2/pull/1306)) +- Support BABEL([#1332](https://github.com/open-mmlab/mmaction2/pull/1332)) + +## 0.20.0 (07/10/2021) + +**Highlights** + +- Support TorchServe +- Add video structuralize demo +- Support using 3D skeletons for skeleton-based action recognition +- Benchmark PoseC3D on UCF and HMDB + +**New Features** + +- Support TorchServe ([#1212](https://github.com/open-mmlab/mmaction2/pull/1212)) +- Support 3D skeletons pre-processing ([#1218](https://github.com/open-mmlab/mmaction2/pull/1218)) +- Support video structuralize demo ([#1197](https://github.com/open-mmlab/mmaction2/pull/1197)) + +**Documentations** + +- Revise README.md and add projects.md ([#1214](https://github.com/open-mmlab/mmaction2/pull/1214)) +- Add CN docs for Skeleton dataset, PoseC3D and ST-GCN ([#1228](https://github.com/open-mmlab/mmaction2/pull/1228), [#1237](https://github.com/open-mmlab/mmaction2/pull/1237), [#1236](https://github.com/open-mmlab/mmaction2/pull/1236)) +- Add tutorial for custom dataset training for skeleton-based action recognition ([#1234](https://github.com/open-mmlab/mmaction2/pull/1234)) + +**Bug and Typo Fixes** + +- Fix tutorial link ([#1219](https://github.com/open-mmlab/mmaction2/pull/1219)) +- Fix GYM links ([#1224](https://github.com/open-mmlab/mmaction2/pull/1224)) + +**ModelZoo** + +- Benchmark PoseC3D on UCF and HMDB ([#1223](https://github.com/open-mmlab/mmaction2/pull/1223)) +- Add ST-GCN + 3D skeleton model for NTU60-XSub ([#1236](https://github.com/open-mmlab/mmaction2/pull/1236)) + +## 0.19.0 (07/10/2021) + +**Highlights** + +- Support ST-GCN +- Refactor the inference API +- Add code spell check hook + +**New Features** + +- Support ST-GCN ([#1123](https://github.com/open-mmlab/mmaction2/pull/1123)) + +**Improvement** + +- Add label maps for every dataset ([#1127](https://github.com/open-mmlab/mmaction2/pull/1127)) +- Remove useless code MultiGroupCrop ([#1180](https://github.com/open-mmlab/mmaction2/pull/1180)) +- Refactor Inference API ([#1191](https://github.com/open-mmlab/mmaction2/pull/1191)) +- Add code spell check hook ([#1208](https://github.com/open-mmlab/mmaction2/pull/1208)) +- Use docker in CI ([#1159](https://github.com/open-mmlab/mmaction2/pull/1159)) + +**Documentations** + +- Update metafiles to new OpenMMLAB protocols ([#1134](https://github.com/open-mmlab/mmaction2/pull/1134)) +- Switch to new doc style ([#1160](https://github.com/open-mmlab/mmaction2/pull/1160)) +- Improve the ERROR message ([#1203](https://github.com/open-mmlab/mmaction2/pull/1203)) +- Fix invalid URL in getting_started ([#1169](https://github.com/open-mmlab/mmaction2/pull/1169)) + +**Bug and Typo Fixes** + +- Compatible with new MMClassification ([#1139](https://github.com/open-mmlab/mmaction2/pull/1139)) +- Add missing runtime dependencies ([#1144](https://github.com/open-mmlab/mmaction2/pull/1144)) +- Fix THUMOS tag proposals path ([#1156](https://github.com/open-mmlab/mmaction2/pull/1156)) +- Fix LoadHVULabel ([#1194](https://github.com/open-mmlab/mmaction2/pull/1194)) +- Switch the default value of `persistent_workers` to False ([#1202](https://github.com/open-mmlab/mmaction2/pull/1202)) +- Fix `_freeze_stages` for MobileNetV2 ([#1193](https://github.com/open-mmlab/mmaction2/pull/1193)) +- Fix resume when building rawframes ([#1150](https://github.com/open-mmlab/mmaction2/pull/1150)) +- Fix device bug for class weight ([#1188](https://github.com/open-mmlab/mmaction2/pull/1188)) +- Correct Arg names in extract_audio.py ([#1148](https://github.com/open-mmlab/mmaction2/pull/1148)) + +**ModelZoo** + +- Add TSM-MobileNetV2 ported from TSM ([#1163](https://github.com/open-mmlab/mmaction2/pull/1163)) +- Add ST-GCN for NTURGB+D-XSub-60 ([#1123](https://github.com/open-mmlab/mmaction2/pull/1123)) + +## 0.18.0 (02/09/2021) + +**Improvement** + +- Add CopyRight ([#1099](https://github.com/open-mmlab/mmaction2/pull/1099)) +- Support NTU Pose Extraction ([#1076](https://github.com/open-mmlab/mmaction2/pull/1076)) +- Support Caching in RawFrameDecode ([#1078](https://github.com/open-mmlab/mmaction2/pull/1078)) +- Add citations & Support python3.9 CI & Use fixed-version sphinx ([#1125](https://github.com/open-mmlab/mmaction2/pull/1125)) + +**Documentations** + +- Add Descriptions of PoseC3D dataset ([#1053](https://github.com/open-mmlab/mmaction2/pull/1053)) + +**Bug and Typo Fixes** + +- Fix SSV2 checkpoints ([#1101](https://github.com/open-mmlab/mmaction2/pull/1101)) +- Fix CSN normalization ([#1116](https://github.com/open-mmlab/mmaction2/pull/1116)) +- Fix typo ([#1121](https://github.com/open-mmlab/mmaction2/pull/1121)) +- Fix new_crop_quadruple bug ([#1108](https://github.com/open-mmlab/mmaction2/pull/1108)) + +## 0.17.0 (03/08/2021) + +**Highlights** + +- Support PyTorch 1.9 +- Support Pytorchvideo Transforms +- Support PreciseBN + +**New Features** + +- Support Pytorchvideo Transforms ([#1008](https://github.com/open-mmlab/mmaction2/pull/1008)) +- Support PreciseBN ([#1038](https://github.com/open-mmlab/mmaction2/pull/1038)) + +**Improvements** + +- Remove redundant augmentations in config files ([#996](https://github.com/open-mmlab/mmaction2/pull/996)) +- Make resource directory to hold common resource pictures ([#1011](https://github.com/open-mmlab/mmaction2/pull/1011)) +- Remove deprecated FrameSelector ([#1010](https://github.com/open-mmlab/mmaction2/pull/1010)) +- Support Concat Dataset ([#1000](https://github.com/open-mmlab/mmaction2/pull/1000)) +- Add `to-mp4` option to resize_videos.py ([#1021](https://github.com/open-mmlab/mmaction2/pull/1021)) +- Add option to keep tail frames ([#1050](https://github.com/open-mmlab/mmaction2/pull/1050)) +- Update MIM support ([#1061](https://github.com/open-mmlab/mmaction2/pull/1061)) +- Calculate Top-K accurate and inaccurate classes ([#1047](https://github.com/open-mmlab/mmaction2/pull/1047)) + +**Bug and Typo Fixes** + +- Fix bug in PoseC3D demo ([#1009](https://github.com/open-mmlab/mmaction2/pull/1009)) +- Fix some problems in resize_videos.py ([#1012](https://github.com/open-mmlab/mmaction2/pull/1012)) +- Support torch1.9 ([#1015](https://github.com/open-mmlab/mmaction2/pull/1015)) +- Remove redundant code in CI ([#1046](https://github.com/open-mmlab/mmaction2/pull/1046)) +- Fix bug about persistent_workers ([#1044](https://github.com/open-mmlab/mmaction2/pull/1044)) +- Support TimeSformer feature extraction ([#1035](https://github.com/open-mmlab/mmaction2/pull/1035)) +- Fix ColorJitter ([#1025](https://github.com/open-mmlab/mmaction2/pull/1025)) + +**ModelZoo** + +- Add TSM-R50 sthv1 models trained by PytorchVideo RandAugment and AugMix ([#1008](https://github.com/open-mmlab/mmaction2/pull/1008)) +- Update SlowOnly SthV1 checkpoints ([#1034](https://github.com/open-mmlab/mmaction2/pull/1034)) +- Add SlowOnly Kinetics400 checkpoints trained with Precise-BN ([#1038](https://github.com/open-mmlab/mmaction2/pull/1038)) +- Add CSN-R50 from scratch checkpoints ([#1045](https://github.com/open-mmlab/mmaction2/pull/1045)) +- TPN Kinetics-400 Checkpoints trained with the new ColorJitter ([#1025](https://github.com/open-mmlab/mmaction2/pull/1025)) + +**Documentation** + +- Add Chinese translation of feature_extraction.md ([#1020](https://github.com/open-mmlab/mmaction2/pull/1020)) +- Fix the code snippet in getting_started.md ([#1023](https://github.com/open-mmlab/mmaction2/pull/1023)) +- Fix TANet config table ([#1028](https://github.com/open-mmlab/mmaction2/pull/1028)) +- Add description to PoseC3D dataset ([#1053](https://github.com/open-mmlab/mmaction2/pull/1053)) + +## 0.16.0 (01/07/2021) + +**Highlights** + +- Support using backbone from pytorch-image-models(timm) +- Support PIMS Decoder +- Demo for skeleton-based action recognition +- Support Timesformer + +**New Features** + +- Support using backbones from pytorch-image-models(timm) for TSN ([#880](https://github.com/open-mmlab/mmaction2/pull/880)) +- Support torchvision transformations in preprocessing pipelines ([#972](https://github.com/open-mmlab/mmaction2/pull/972)) +- Demo for skeleton-based action recognition ([#972](https://github.com/open-mmlab/mmaction2/pull/972)) +- Support Timesformer ([#839](https://github.com/open-mmlab/mmaction2/pull/839)) + +**Improvements** + +- Add a tool to find invalid videos ([#907](https://github.com/open-mmlab/mmaction2/pull/907), [#950](https://github.com/open-mmlab/mmaction2/pull/950)) +- Add an option to specify spectrogram_type ([#909](https://github.com/open-mmlab/mmaction2/pull/909)) +- Add json output to video demo ([#906](https://github.com/open-mmlab/mmaction2/pull/906)) +- Add MIM related docs ([#918](https://github.com/open-mmlab/mmaction2/pull/918)) +- Rename lr to scheduler ([#916](https://github.com/open-mmlab/mmaction2/pull/916)) +- Support `--cfg-options` for demos ([#911](https://github.com/open-mmlab/mmaction2/pull/911)) +- Support number counting for flow-wise filename template ([#922](https://github.com/open-mmlab/mmaction2/pull/922)) +- Add Chinese tutorial ([#941](https://github.com/open-mmlab/mmaction2/pull/941)) +- Change ResNet3D default values ([#939](https://github.com/open-mmlab/mmaction2/pull/939)) +- Adjust script structure ([#935](https://github.com/open-mmlab/mmaction2/pull/935)) +- Add font color to args in long_video_demo ([#947](https://github.com/open-mmlab/mmaction2/pull/947)) +- Polish code style with Pylint ([#908](https://github.com/open-mmlab/mmaction2/pull/908)) +- Support PIMS Decoder ([#946](https://github.com/open-mmlab/mmaction2/pull/946)) +- Improve Metafiles ([#956](https://github.com/open-mmlab/mmaction2/pull/956), [#979](https://github.com/open-mmlab/mmaction2/pull/979), [#966](https://github.com/open-mmlab/mmaction2/pull/966)) +- Add links to download Kinetics400 validation ([#920](https://github.com/open-mmlab/mmaction2/pull/920)) +- Audit the usage of shutil.rmtree ([#943](https://github.com/open-mmlab/mmaction2/pull/943)) +- Polish localizer related codes([#913](https://github.com/open-mmlab/mmaction2/pull/913)) + +**Bug and Typo Fixes** + +- Fix spatiotemporal detection demo ([#899](https://github.com/open-mmlab/mmaction2/pull/899)) +- Fix docstring for 3D inflate ([#925](https://github.com/open-mmlab/mmaction2/pull/925)) +- Fix bug of writing text to video with TextClip ([#952](https://github.com/open-mmlab/mmaction2/pull/952)) +- Fix mmcv install in CI ([#977](https://github.com/open-mmlab/mmaction2/pull/977)) + +**ModelZoo** + +- Add TSN with Swin Transformer backbone as an example for using pytorch-image-models(timm) backbones ([#880](https://github.com/open-mmlab/mmaction2/pull/880)) +- Port CSN checkpoints from VMZ ([#945](https://github.com/open-mmlab/mmaction2/pull/945)) +- Release various checkpoints for UCF101, HMDB51 and Sthv1 ([#938](https://github.com/open-mmlab/mmaction2/pull/938)) +- Support Timesformer ([#839](https://github.com/open-mmlab/mmaction2/pull/839)) +- Update TSM modelzoo ([#981](https://github.com/open-mmlab/mmaction2/pull/981)) + +## 0.15.0 (31/05/2021) + +**Highlights** + +- Support PoseC3D +- Support ACRN +- Support MIM + +**New Features** + +- Support PoseC3D ([#786](https://github.com/open-mmlab/mmaction2/pull/786), [#890](https://github.com/open-mmlab/mmaction2/pull/890)) +- Support MIM ([#870](https://github.com/open-mmlab/mmaction2/pull/870)) +- Support ACRN and Focal Loss ([#891](https://github.com/open-mmlab/mmaction2/pull/891)) +- Support Jester dataset ([#864](https://github.com/open-mmlab/mmaction2/pull/864)) + +**Improvements** + +- Add `metric_options` for evaluation to docs ([#873](https://github.com/open-mmlab/mmaction2/pull/873)) +- Support creating a new label map based on custom classes for demos about spatio temporal demo ([#879](https://github.com/open-mmlab/mmaction2/pull/879)) +- Improve document about AVA dataset preparation ([#878](https://github.com/open-mmlab/mmaction2/pull/878)) +- Provide a script to extract clip-level feature ([#856](https://github.com/open-mmlab/mmaction2/pull/856)) + +**Bug and Typo Fixes** + +- Fix issues about resume ([#877](https://github.com/open-mmlab/mmaction2/pull/877), [#878](https://github.com/open-mmlab/mmaction2/pull/878)) +- Correct the key name of `eval_results` dictionary for metric 'mmit_mean_average_precision' ([#885](https://github.com/open-mmlab/mmaction2/pull/885)) + +**ModelZoo** + +- Support Jester dataset ([#864](https://github.com/open-mmlab/mmaction2/pull/864)) +- Support ACRN and Focal Loss ([#891](https://github.com/open-mmlab/mmaction2/pull/891)) + +## 0.14.0 (30/04/2021) + +**Highlights** + +- Support TRN +- Support Diving48 + +**New Features** + +- Support TRN ([#755](https://github.com/open-mmlab/mmaction2/pull/755)) +- Support Diving48 ([#835](https://github.com/open-mmlab/mmaction2/pull/835)) +- Support Webcam Demo for Spatio-temporal Action Detection Models ([#795](https://github.com/open-mmlab/mmaction2/pull/795)) + +**Improvements** + +- Add softmax option for pytorch2onnx tool ([#781](https://github.com/open-mmlab/mmaction2/pull/781)) +- Support TRN ([#755](https://github.com/open-mmlab/mmaction2/pull/755)) +- Test with onnx models and TensorRT engines ([#758](https://github.com/open-mmlab/mmaction2/pull/758)) +- Speed up AVA Testing ([#784](https://github.com/open-mmlab/mmaction2/pull/784)) +- Add `self.with_neck` attribute ([#796](https://github.com/open-mmlab/mmaction2/pull/796)) +- Update installation document ([#798](https://github.com/open-mmlab/mmaction2/pull/798)) +- Use a random master port ([#809](https://github.com/open-mmlab/mmaction2/pull/8098)) +- Update AVA processing data document ([#801](https://github.com/open-mmlab/mmaction2/pull/801)) +- Refactor spatio-temporal augmentation ([#782](https://github.com/open-mmlab/mmaction2/pull/782)) +- Add QR code in CN README ([#812](https://github.com/open-mmlab/mmaction2/pull/812)) +- Add Alternative way to download Kinetics ([#817](https://github.com/open-mmlab/mmaction2/pull/817), [#822](https://github.com/open-mmlab/mmaction2/pull/822)) +- Refactor Sampler ([#790](https://github.com/open-mmlab/mmaction2/pull/790)) +- Use EvalHook in MMCV with backward compatibility ([#793](https://github.com/open-mmlab/mmaction2/pull/793)) +- Use MMCV Model Registry ([#843](https://github.com/open-mmlab/mmaction2/pull/843)) + +**Bug and Typo Fixes** + +- Fix a bug in pytorch2onnx.py when `num_classes <= 4` ([#800](https://github.com/open-mmlab/mmaction2/pull/800), [#824](https://github.com/open-mmlab/mmaction2/pull/824)) +- Fix `demo_spatiotemporal_det.py` error ([#803](https://github.com/open-mmlab/mmaction2/pull/803), [#805](https://github.com/open-mmlab/mmaction2/pull/805)) +- Fix loading config bugs when resume ([#820](https://github.com/open-mmlab/mmaction2/pull/820)) +- Make HMDB51 annotation generation more robust ([#811](https://github.com/open-mmlab/mmaction2/pull/811)) + +**ModelZoo** + +- Update checkpoint for 256 height in something-V2 ([#789](https://github.com/open-mmlab/mmaction2/pull/789)) +- Support Diving48 ([#835](https://github.com/open-mmlab/mmaction2/pull/835)) + +## 0.13.0 (31/03/2021) + +**Highlights** + +- Support LFB +- Support using backbone from MMCls/TorchVision +- Add Chinese documentation + +**New Features** + +- Support LFB ([#553](https://github.com/open-mmlab/mmaction2/pull/553)) +- Support using backbones from MMCls for TSN ([#679](https://github.com/open-mmlab/mmaction2/pull/679)) +- Support using backbones from TorchVision for TSN ([#720](https://github.com/open-mmlab/mmaction2/pull/720)) +- Support Mixup and Cutmix for recognizers ([#681](https://github.com/open-mmlab/mmaction2/pull/681)) +- Support Chinese documentation ([#665](https://github.com/open-mmlab/mmaction2/pull/665), [#680](https://github.com/open-mmlab/mmaction2/pull/680), [#689](https://github.com/open-mmlab/mmaction2/pull/689), [#701](https://github.com/open-mmlab/mmaction2/pull/701), [#702](https://github.com/open-mmlab/mmaction2/pull/702), [#703](https://github.com/open-mmlab/mmaction2/pull/703), [#706](https://github.com/open-mmlab/mmaction2/pull/706), [#716](https://github.com/open-mmlab/mmaction2/pull/716), [#717](https://github.com/open-mmlab/mmaction2/pull/717), [#731](https://github.com/open-mmlab/mmaction2/pull/731), [#733](https://github.com/open-mmlab/mmaction2/pull/733), [#735](https://github.com/open-mmlab/mmaction2/pull/735), [#736](https://github.com/open-mmlab/mmaction2/pull/736), [#737](https://github.com/open-mmlab/mmaction2/pull/737), [#738](https://github.com/open-mmlab/mmaction2/pull/738), [#739](https://github.com/open-mmlab/mmaction2/pull/739), [#740](https://github.com/open-mmlab/mmaction2/pull/740), [#742](https://github.com/open-mmlab/mmaction2/pull/742), [#752](https://github.com/open-mmlab/mmaction2/pull/752), [#759](https://github.com/open-mmlab/mmaction2/pull/759), [#761](https://github.com/open-mmlab/mmaction2/pull/761), [#772](https://github.com/open-mmlab/mmaction2/pull/772), [#775](https://github.com/open-mmlab/mmaction2/pull/775)) + +**Improvements** + +- Add slowfast config/json/log/ckpt for training custom classes of AVA ([#678](https://github.com/open-mmlab/mmaction2/pull/678)) +- Set RandAugment as Imgaug default transforms ([#585](https://github.com/open-mmlab/mmaction2/pull/585)) +- Add `--test-last` & `--test-best` for `tools/train.py` to test checkpoints after training ([#608](https://github.com/open-mmlab/mmaction2/pull/608)) +- Add fcn_testing in TPN ([#684](https://github.com/open-mmlab/mmaction2/pull/684)) +- Remove redundant recall functions ([#741](https://github.com/open-mmlab/mmaction2/pull/741)) +- Recursively remove pretrained step for testing ([#695](https://github.com/open-mmlab/mmaction2/pull/695)) +- Improve demo by limiting inference fps ([#668](https://github.com/open-mmlab/mmaction2/pull/668)) + +**Bug and Typo Fixes** + +- Fix a bug about multi-class in VideoDataset ([#723](https://github.com/open-mmlab/mmaction2/pull/678)) +- Reverse key-value in anet filelist generation ([#686](https://github.com/open-mmlab/mmaction2/pull/686)) +- Fix flow norm cfg typo ([#693](https://github.com/open-mmlab/mmaction2/pull/693)) + +**ModelZoo** + +- Add LFB for AVA2.1 ([#553](https://github.com/open-mmlab/mmaction2/pull/553)) +- Add TSN with ResNeXt-101-32x4d backbone as an example for using MMCls backbones ([#679](https://github.com/open-mmlab/mmaction2/pull/679)) +- Add TSN with Densenet161 backbone as an example for using TorchVision backbones ([#720](https://github.com/open-mmlab/mmaction2/pull/720)) +- Add slowonly_nl_embedded_gaussian_r50_4x16x1_150e_kinetics400_rgb ([#690](https://github.com/open-mmlab/mmaction2/pull/690)) +- Add slowonly_nl_embedded_gaussian_r50_8x8x1_150e_kinetics400_rgb ([#704](https://github.com/open-mmlab/mmaction2/pull/704)) +- Add slowonly_nl_kinetics_pretrained_r50_4x16x1(8x8x1)\_20e_ava_rgb ([#730](https://github.com/open-mmlab/mmaction2/pull/730)) + +## 0.12.0 (28/02/2021) + +**Highlights** + +- Support TSM-MobileNetV2 +- Support TANet +- Support GPU Normalize + +**New Features** + +- Support TSM-MobileNetV2 ([#415](https://github.com/open-mmlab/mmaction2/pull/415)) +- Support flip with label mapping ([#591](https://github.com/open-mmlab/mmaction2/pull/591)) +- Add seed option for sampler ([#642](https://github.com/open-mmlab/mmaction2/pull/642)) +- Support GPU Normalize ([#586](https://github.com/open-mmlab/mmaction2/pull/586)) +- Support TANet ([#595](https://github.com/open-mmlab/mmaction2/pull/595)) + +**Improvements** + +- Training custom classes of ava dataset ([#555](https://github.com/open-mmlab/mmaction2/pull/555)) +- Add CN README in homepage ([#592](https://github.com/open-mmlab/mmaction2/pull/592), [#594](https://github.com/open-mmlab/mmaction2/pull/594)) +- Support soft label for CrossEntropyLoss ([#625](https://github.com/open-mmlab/mmaction2/pull/625)) +- Refactor config: Specify `train_cfg` and `test_cfg` in `model` ([#629](https://github.com/open-mmlab/mmaction2/pull/629)) +- Provide an alternative way to download older kinetics annotations ([#597](https://github.com/open-mmlab/mmaction2/pull/597)) +- Update FAQ for + - 1). data pipeline about video and frames ([#598](https://github.com/open-mmlab/mmaction2/pull/598)) + - 2). how to show results ([#598](https://github.com/open-mmlab/mmaction2/pull/598)) + - 3). batch size setting for batchnorm ([#657](https://github.com/open-mmlab/mmaction2/pull/657)) + - 4). how to fix stages of backbone when finetuning models ([#658](https://github.com/open-mmlab/mmaction2/pull/658)) +- Modify default value of `save_best` ([#600](https://github.com/open-mmlab/mmaction2/pull/600)) +- Use BibTex rather than latex in markdown ([#607](https://github.com/open-mmlab/mmaction2/pull/607)) +- Add warnings of uninstalling mmdet and supplementary documents ([#624](https://github.com/open-mmlab/mmaction2/pull/624)) +- Support soft label for CrossEntropyLoss ([#625](https://github.com/open-mmlab/mmaction2/pull/625)) + +**Bug and Typo Fixes** + +- Fix value of `pem_low_temporal_iou_threshold` in BSN ([#556](https://github.com/open-mmlab/mmaction2/pull/556)) +- Fix ActivityNet download script ([#601](https://github.com/open-mmlab/mmaction2/pull/601)) + +**ModelZoo** + +- Add TSM-MobileNetV2 for Kinetics400 ([#415](https://github.com/open-mmlab/mmaction2/pull/415)) +- Add deeper SlowFast models ([#605](https://github.com/open-mmlab/mmaction2/pull/605)) + +## 0.11.0 (31/01/2021) + +**Highlights** + +- Support imgaug +- Support spatial temporal demo +- Refactor EvalHook, config structure, unittest structure + +**New Features** + +- Support [imgaug](https://imgaug.readthedocs.io/en/latest/index.html) for augmentations in the data pipeline ([#492](https://github.com/open-mmlab/mmaction2/pull/492)) +- Support setting `max_testing_views` for extremely large models to save GPU memory used ([#511](https://github.com/open-mmlab/mmaction2/pull/511)) +- Add spatial temporal demo ([#547](https://github.com/open-mmlab/mmaction2/pull/547), [#566](https://github.com/open-mmlab/mmaction2/pull/566)) + +**Improvements** + +- Refactor EvalHook ([#395](https://github.com/open-mmlab/mmaction2/pull/395)) +- Refactor AVA hook ([#567](https://github.com/open-mmlab/mmaction2/pull/567)) +- Add repo citation ([#545](https://github.com/open-mmlab/mmaction2/pull/545)) +- Add dataset size of Kinetics400 ([#503](https://github.com/open-mmlab/mmaction2/pull/503)) +- Add lazy operation docs ([#504](https://github.com/open-mmlab/mmaction2/pull/504)) +- Add class_weight for CrossEntropyLoss and BCELossWithLogits ([#509](https://github.com/open-mmlab/mmaction2/pull/509)) +- add some explanation about the resampling in slowfast ([#502](https://github.com/open-mmlab/mmaction2/pull/502)) +- Modify paper title in README.md ([#512](https://github.com/open-mmlab/mmaction2/pull/512)) +- Add alternative ways to download Kinetics ([#521](https://github.com/open-mmlab/mmaction2/pull/521)) +- Add OpenMMLab projects link in README ([#530](https://github.com/open-mmlab/mmaction2/pull/530)) +- Change default preprocessing to shortedge to 256 ([#538](https://github.com/open-mmlab/mmaction2/pull/538)) +- Add config tag in dataset README ([#540](https://github.com/open-mmlab/mmaction2/pull/540)) +- Add solution for markdownlint installation issue ([#497](https://github.com/open-mmlab/mmaction2/pull/497)) +- Add dataset overview in readthedocs ([#548](https://github.com/open-mmlab/mmaction2/pull/548)) +- Modify the trigger mode of the warnings of missing mmdet ([#583](https://github.com/open-mmlab/mmaction2/pull/583)) +- Refactor config structure ([#488](https://github.com/open-mmlab/mmaction2/pull/488), [#572](https://github.com/open-mmlab/mmaction2/pull/572)) +- Refactor unittest structure ([#433](https://github.com/open-mmlab/mmaction2/pull/433)) + +**Bug and Typo Fixes** + +- Fix a bug about ava dataset validation ([#527](https://github.com/open-mmlab/mmaction2/pull/527)) +- Fix a bug about ResNet pretrain weight initialization ([#582](https://github.com/open-mmlab/mmaction2/pull/582)) +- Fix a bug in CI due to MMCV index ([#495](https://github.com/open-mmlab/mmaction2/pull/495)) +- Remove invalid links of MiT and MMiT ([#516](https://github.com/open-mmlab/mmaction2/pull/516)) +- Fix frame rate bug for AVA preparation ([#576](https://github.com/open-mmlab/mmaction2/pull/576)) + +**ModelZoo** + +## 0.10.0 (31/12/2020) + +**Highlights** + +- Support Spatio-Temporal Action Detection (AVA) +- Support precise BN + +**New Features** + +- Support precise BN ([#501](https://github.com/open-mmlab/mmaction2/pull/501/)) +- Support Spatio-Temporal Action Detection (AVA) ([#351](https://github.com/open-mmlab/mmaction2/pull/351)) +- Support to return feature maps in `inference_recognizer` ([#458](https://github.com/open-mmlab/mmaction2/pull/458)) + +**Improvements** + +- Add arg `stride` to long_video_demo.py, to make inference faster ([#468](https://github.com/open-mmlab/mmaction2/pull/468)) +- Support training and testing for Spatio-Temporal Action Detection ([#351](https://github.com/open-mmlab/mmaction2/pull/351)) +- Fix CI due to pip upgrade ([#454](https://github.com/open-mmlab/mmaction2/pull/454)) +- Add markdown lint in pre-commit hook ([#255](https://github.com/open-mmlab/mmaction2/pull/225)) +- Speed up confusion matrix calculation ([#465](https://github.com/open-mmlab/mmaction2/pull/465)) +- Use title case in modelzoo statistics ([#456](https://github.com/open-mmlab/mmaction2/pull/456)) +- Add FAQ documents for easy troubleshooting. ([#413](https://github.com/open-mmlab/mmaction2/pull/413), [#420](https://github.com/open-mmlab/mmaction2/pull/420), [#439](https://github.com/open-mmlab/mmaction2/pull/439)) +- Support Spatio-Temporal Action Detection with context ([#471](https://github.com/open-mmlab/mmaction2/pull/471)) +- Add class weight for CrossEntropyLoss and BCELossWithLogits ([#509](https://github.com/open-mmlab/mmaction2/pull/509)) +- Add Lazy OPs docs ([#504](https://github.com/open-mmlab/mmaction2/pull/504)) + +**Bug and Typo Fixes** + +- Fix typo in default argument of BaseHead ([#446](https://github.com/open-mmlab/mmaction2/pull/446)) +- Fix potential bug about `output_config` overwrite ([#463](https://github.com/open-mmlab/mmaction2/pull/463)) + +**ModelZoo** + +- Add SlowOnly, SlowFast for AVA2.1 ([#351](https://github.com/open-mmlab/mmaction2/pull/351)) + +## 0.9.0 (30/11/2020) + +**Highlights** + +- Support GradCAM utils for recognizers +- Support ResNet Audio model + +**New Features** + +- Automatically add modelzoo statistics to readthedocs ([#327](https://github.com/open-mmlab/mmaction2/pull/327)) +- Support GYM99 ([#331](https://github.com/open-mmlab/mmaction2/pull/331), [#336](https://github.com/open-mmlab/mmaction2/pull/336)) +- Add AudioOnly Pathway from AVSlowFast. ([#355](https://github.com/open-mmlab/mmaction2/pull/355)) +- Add GradCAM utils for recognizer ([#324](https://github.com/open-mmlab/mmaction2/pull/324)) +- Add print config script ([#345](https://github.com/open-mmlab/mmaction2/pull/345)) +- Add online motion vector decoder ([#291](https://github.com/open-mmlab/mmaction2/pull/291)) + +**Improvements** + +- Support PyTorch 1.7 in CI ([#312](https://github.com/open-mmlab/mmaction2/pull/312)) +- Support to predict different labels in a long video ([#274](https://github.com/open-mmlab/mmaction2/pull/274)) +- Update docs bout test crops ([#359](https://github.com/open-mmlab/mmaction2/pull/359)) +- Polish code format using pylint manually ([#338](https://github.com/open-mmlab/mmaction2/pull/338)) +- Update unittest coverage ([#358](https://github.com/open-mmlab/mmaction2/pull/358), [#322](https://github.com/open-mmlab/mmaction2/pull/322), [#325](https://github.com/open-mmlab/mmaction2/pull/325)) +- Add random seed for building filelists ([#323](https://github.com/open-mmlab/mmaction2/pull/323)) +- Update colab tutorial ([#367](https://github.com/open-mmlab/mmaction2/pull/367)) +- set default batch_size of evaluation and testing to 1 ([#250](https://github.com/open-mmlab/mmaction2/pull/250)) +- Rename the preparation docs to `README.md` ([#388](https://github.com/open-mmlab/mmaction2/pull/388)) +- Move docs about demo to `demo/README.md` ([#329](https://github.com/open-mmlab/mmaction2/pull/329)) +- Remove redundant code in `tools/test.py` ([#310](https://github.com/open-mmlab/mmaction2/pull/310)) +- Automatically calculate number of test clips for Recognizer2D ([#359](https://github.com/open-mmlab/mmaction2/pull/359)) + +**Bug and Typo Fixes** + +- Fix rename Kinetics classnames bug ([#384](https://github.com/open-mmlab/mmaction2/pull/384)) +- Fix a bug in BaseDataset when `data_prefix` is None ([#314](https://github.com/open-mmlab/mmaction2/pull/314)) +- Fix a bug about `tmp_folder` in `OpenCVInit` ([#357](https://github.com/open-mmlab/mmaction2/pull/357)) +- Fix `get_thread_id` when not using disk as backend ([#354](https://github.com/open-mmlab/mmaction2/pull/354), [#357](https://github.com/open-mmlab/mmaction2/pull/357)) +- Fix the bug of HVU object `num_classes` from 1679 to 1678 ([#307](https://github.com/open-mmlab/mmaction2/pull/307)) +- Fix typo in `export_model.md` ([#399](https://github.com/open-mmlab/mmaction2/pull/399)) +- Fix OmniSource training configs ([#321](https://github.com/open-mmlab/mmaction2/pull/321)) +- Fix Issue #306: Bug of SampleAVAFrames ([#317](https://github.com/open-mmlab/mmaction2/pull/317)) + +**ModelZoo** + +- Add SlowOnly model for GYM99, both RGB and Flow ([#336](https://github.com/open-mmlab/mmaction2/pull/336)) +- Add auto modelzoo statistics in readthedocs ([#327](https://github.com/open-mmlab/mmaction2/pull/327)) +- Add TSN for HMDB51 pretrained on Kinetics400, Moments in Time and ImageNet. ([#372](https://github.com/open-mmlab/mmaction2/pull/372)) + +## v0.8.0 (31/10/2020) + +**Highlights** + +- Support [OmniSource](https://arxiv.org/abs/2003.13042) +- Support C3D +- Support video recognition with audio modality +- Support HVU +- Support X3D + +**New Features** + +- Support AVA dataset preparation ([#266](https://github.com/open-mmlab/mmaction2/pull/266)) +- Support the training of video recognition dataset with multiple tag categories ([#235](https://github.com/open-mmlab/mmaction2/pull/235)) +- Support joint training with multiple training datasets of multiple formats, including images, untrimmed videos, etc. ([#242](https://github.com/open-mmlab/mmaction2/pull/242)) +- Support to specify a start epoch to conduct evaluation ([#216](https://github.com/open-mmlab/mmaction2/pull/216)) +- Implement X3D models, support testing with model weights converted from SlowFast ([#288](https://github.com/open-mmlab/mmaction2/pull/288)) +- Support specify a start epoch to conduct evaluation ([#216](https://github.com/open-mmlab/mmaction2/pull/216)) + +**Improvements** + +- Set default values of 'average_clips' in each config file so that there is no need to set it explicitly during testing in most cases ([#232](https://github.com/open-mmlab/mmaction2/pull/232)) +- Extend HVU datatools to generate individual file list for each tag category ([#258](https://github.com/open-mmlab/mmaction2/pull/258)) +- Support data preparation for Kinetics-600 and Kinetics-700 ([#254](https://github.com/open-mmlab/mmaction2/pull/254)) +- Use `metric_dict` to replace hardcoded arguments in `evaluate` function ([#286](https://github.com/open-mmlab/mmaction2/pull/286)) +- Add `cfg-options` in arguments to override some settings in the used config for convenience ([#212](https://github.com/open-mmlab/mmaction2/pull/212)) +- Rename the old evaluating protocol `mean_average_precision` as `mmit_mean_average_precision` since it is only used on MMIT and is not the `mAP` we usually talk about. Add `mean_average_precision`, which is the real `mAP` ([#235](https://github.com/open-mmlab/mmaction2/pull/235)) +- Add accurate setting (Three crop * 2 clip) and report corresponding performance for TSM model ([#241](https://github.com/open-mmlab/mmaction2/pull/241)) +- Add citations in each preparing_dataset.md in `tools/data/dataset` ([#289](https://github.com/open-mmlab/mmaction2/pull/289)) +- Update the performance of audio-visual fusion on Kinetics-400 ([#281](https://github.com/open-mmlab/mmaction2/pull/281)) +- Support data preparation of OmniSource web datasets, including GoogleImage, InsImage, InsVideo and KineticsRawVideo ([#294](https://github.com/open-mmlab/mmaction2/pull/294)) +- Use `metric_options` dict to provide metric args in `evaluate` ([#286](https://github.com/open-mmlab/mmaction2/pull/286)) + +**Bug Fixes** + +- Register `FrameSelector` in `PIPELINES` ([#268](https://github.com/open-mmlab/mmaction2/pull/268)) +- Fix the potential bug for default value in dataset_setting ([#245](https://github.com/open-mmlab/mmaction2/pull/245)) +- Fix multi-node dist test ([#292](https://github.com/open-mmlab/mmaction2/pull/292)) +- Fix the data preparation bug for `something-something` dataset ([#278](https://github.com/open-mmlab/mmaction2/pull/278)) +- Fix the invalid config url in slowonly README data benchmark ([#249](https://github.com/open-mmlab/mmaction2/pull/249)) +- Validate that the performance of models trained with videos have no significant difference comparing to the performance of models trained with rawframes ([#256](https://github.com/open-mmlab/mmaction2/pull/256)) +- Correct the `img_norm_cfg` used by TSN-3seg-R50 UCF-101 model, improve the Top-1 accuracy by 3% ([#273](https://github.com/open-mmlab/mmaction2/pull/273)) + +**ModelZoo** + +- Add Baselines for Kinetics-600 and Kinetics-700, including TSN-R50-8seg and SlowOnly-R50-8x8 ([#259](https://github.com/open-mmlab/mmaction2/pull/259)) +- Add OmniSource benchmark on MiniKineitcs ([#296](https://github.com/open-mmlab/mmaction2/pull/296)) +- Add Baselines for HVU, including TSN-R18-8seg on 6 tag categories of HVU ([#287](https://github.com/open-mmlab/mmaction2/pull/287)) +- Add X3D models ported from [SlowFast](https://github.com/facebookresearch/SlowFast/) ([#288](https://github.com/open-mmlab/mmaction2/pull/288)) + +## v0.7.0 (30/9/2020) + +**Highlights** + +- Support TPN +- Support JHMDB, UCF101-24, HVU dataset preparation +- support onnx model conversion + +**New Features** + +- Support the data pre-processing pipeline for the HVU Dataset ([#277](https://github.com/open-mmlab/mmaction2/pull/227/)) +- Support real-time action recognition from web camera ([#171](https://github.com/open-mmlab/mmaction2/pull/171)) +- Support onnx ([#160](https://github.com/open-mmlab/mmaction2/pull/160)) +- Support UCF101-24 preparation ([#219](https://github.com/open-mmlab/mmaction2/pull/219)) +- Support evaluating mAP for ActivityNet with [CUHK17_activitynet_pred](http://activity-net.org/challenges/2017/evaluation.html) ([#176](https://github.com/open-mmlab/mmaction2/pull/176)) +- Add the data pipeline for ActivityNet, including downloading videos, extracting RGB and Flow frames, finetuning TSN and extracting feature ([#190](https://github.com/open-mmlab/mmaction2/pull/190)) +- Support JHMDB preparation ([#220](https://github.com/open-mmlab/mmaction2/pull/220)) + +**ModelZoo** + +- Add finetuning setting for SlowOnly ([#173](https://github.com/open-mmlab/mmaction2/pull/173)) +- Add TSN and SlowOnly models trained with [OmniSource](https://arxiv.org/abs/2003.13042), which achieve 75.7% Top-1 with TSN-R50-3seg and 80.4% Top-1 with SlowOnly-R101-8x8 ([#215](https://github.com/open-mmlab/mmaction2/pull/215)) + +**Improvements** + +- Support demo with video url ([#165](https://github.com/open-mmlab/mmaction2/pull/165)) +- Support multi-batch when testing ([#184](https://github.com/open-mmlab/mmaction2/pull/184)) +- Add tutorial for adding a new learning rate updater ([#181](https://github.com/open-mmlab/mmaction2/pull/181)) +- Add config name in meta info ([#183](https://github.com/open-mmlab/mmaction2/pull/183)) +- Remove git hash in `__version__` ([#189](https://github.com/open-mmlab/mmaction2/pull/189)) +- Check mmcv version ([#189](https://github.com/open-mmlab/mmaction2/pull/189)) +- Update url with 'https://download.openmmlab.com' ([#208](https://github.com/open-mmlab/mmaction2/pull/208)) +- Update Docker file to support PyTorch 1.6 and update `install.md` ([#209](https://github.com/open-mmlab/mmaction2/pull/209)) +- Polish readsthedocs display ([#217](https://github.com/open-mmlab/mmaction2/pull/217), [#229](https://github.com/open-mmlab/mmaction2/pull/229)) + +**Bug Fixes** + +- Fix the bug when using OpenCV to extract only RGB frames with original shape ([#184](https://github.com/open-mmlab/mmaction2/pull/187)) +- Fix the bug of sthv2 `num_classes` from 339 to 174 ([#174](https://github.com/open-mmlab/mmaction2/pull/174), [#207](https://github.com/open-mmlab/mmaction2/pull/207)) + +## v0.6.0 (2/9/2020) + +**Highlights** + +- Support TIN, CSN, SSN, NonLocal +- Support FP16 training + +**New Features** + +- Support NonLocal module and provide ckpt in TSM and I3D ([#41](https://github.com/open-mmlab/mmaction2/pull/41)) +- Support SSN ([#33](https://github.com/open-mmlab/mmaction2/pull/33), [#37](https://github.com/open-mmlab/mmaction2/pull/37), [#52](https://github.com/open-mmlab/mmaction2/pull/52), [#55](https://github.com/open-mmlab/mmaction2/pull/55)) +- Support CSN ([#87](https://github.com/open-mmlab/mmaction2/pull/87)) +- Support TIN ([#53](https://github.com/open-mmlab/mmaction2/pull/53)) +- Support HMDB51 dataset preparation ([#60](https://github.com/open-mmlab/mmaction2/pull/60)) +- Support encoding videos from frames ([#84](https://github.com/open-mmlab/mmaction2/pull/84)) +- Support FP16 training ([#25](https://github.com/open-mmlab/mmaction2/pull/25)) +- Enhance demo by supporting rawframe inference ([#59](https://github.com/open-mmlab/mmaction2/pull/59)), output video/gif ([#72](https://github.com/open-mmlab/mmaction2/pull/72)) + +**ModelZoo** + +- Update Slowfast modelzoo ([#51](https://github.com/open-mmlab/mmaction2/pull/51)) +- Update TSN, TSM video checkpoints ([#50](https://github.com/open-mmlab/mmaction2/pull/50)) +- Add data benchmark for TSN ([#57](https://github.com/open-mmlab/mmaction2/pull/57)) +- Add data benchmark for SlowOnly ([#77](https://github.com/open-mmlab/mmaction2/pull/77)) +- Add BSN/BMN performance results with feature extracted by our codebase ([#99](https://github.com/open-mmlab/mmaction2/pull/99)) + +**Improvements** + +- Polish data preparation codes ([#70](https://github.com/open-mmlab/mmaction2/pull/70)) +- Improve data preparation scripts ([#58](https://github.com/open-mmlab/mmaction2/pull/58)) +- Improve unittest coverage and minor fix ([#62](https://github.com/open-mmlab/mmaction2/pull/62)) +- Support PyTorch 1.6 in CI ([#117](https://github.com/open-mmlab/mmaction2/pull/117)) +- Support `with_offset` for rawframe dataset ([#48](https://github.com/open-mmlab/mmaction2/pull/48)) +- Support json annotation files ([#119](https://github.com/open-mmlab/mmaction2/pull/119)) +- Support `multi-class` in TSMHead ([#104](https://github.com/open-mmlab/mmaction2/pull/104)) +- Support using `val_step()` to validate data for each `val` workflow ([#123](https://github.com/open-mmlab/mmaction2/pull/123)) +- Use `xxInit()` method to get `total_frames` and make `total_frames` a required key ([#90](https://github.com/open-mmlab/mmaction2/pull/90)) +- Add paper introduction in model readme ([#140](https://github.com/open-mmlab/mmaction2/pull/140)) +- Adjust the directory structure of `tools/` and rename some scripts files ([#142](https://github.com/open-mmlab/mmaction2/pull/142)) + +**Bug Fixes** + +- Fix configs for localization test ([#67](https://github.com/open-mmlab/mmaction2/pull/67)) +- Fix configs of SlowOnly by fixing lr to 8 gpus ([#136](https://github.com/open-mmlab/mmaction2/pull/136)) +- Fix the bug in analyze_log ([#54](https://github.com/open-mmlab/mmaction2/pull/54)) +- Fix the bug of generating HMDB51 class index file ([#69](https://github.com/open-mmlab/mmaction2/pull/69)) +- Fix the bug of using `load_checkpoint()` in ResNet ([#93](https://github.com/open-mmlab/mmaction2/pull/93)) +- Fix the bug of `--work-dir` when using slurm training script ([#110](https://github.com/open-mmlab/mmaction2/pull/110)) +- Correct the sthv1/sthv2 rawframes filelist generate command ([#71](https://github.com/open-mmlab/mmaction2/pull/71)) +- `CosineAnnealing` typo ([#47](https://github.com/open-mmlab/mmaction2/pull/47)) + +## v0.5.0 (9/7/2020) + +**Highlights** + +- MMAction2 is released + +**New Features** + +- Support various datasets: UCF101, Kinetics-400, Something-Something V1&V2, Moments in Time, + Multi-Moments in Time, THUMOS14 +- Support various action recognition methods: TSN, TSM, R(2+1)D, I3D, SlowOnly, SlowFast, Non-local +- Support various action localization methods: BSN, BMN +- Colab demo for action recognition diff --git a/docs/zh_cn/notes/contribution_guide.md b/docs/zh_cn/notes/contribution_guide.md new file mode 100644 index 0000000000..07dbbd105c --- /dev/null +++ b/docs/zh_cn/notes/contribution_guide.md @@ -0,0 +1,63 @@ +# 参与贡献 MMAction2 + +欢迎任何类型的贡献,包括但不限于 + +- 修改拼写错误或代码错误 +- 添加文档或将文档翻译成其他语言 +- 添加新功能和新组件 + +## 工作流程 + +1. fork 并 pull 最新的 OpenMMLab 仓库 (MMAction2) +2. 签出到一个新分支(不要使用 master 分支提交 PR) +3. 进行修改并提交至 fork 出的自己的远程仓库 +4. 在我们的仓库中创建一个 PR + +```{note} +如果你计划添加一些新的功能,并引入大量改动,请尽量首先创建一个 issue 来进行讨论。 +如果你是论文作者,希望在 MMAction2 中支持你的算法,请联系我们。 我们十分感谢你的贡献。 +``` + +## 代码风格 + +### Python + +我们采用 [PEP8](https://www.python.org/dev/peps/pep-0008/) 作为统一的代码风格。 + +我们使用下列工具来进行代码风格检查与格式化: + +- [flake8](https://github.com/PyCQA/flake8): Python 官方发布的代码规范检查工具,是多个检查工具的封装 +- [isort](https://github.com/timothycrosley/isort): 自动调整模块导入顺序的工具 +- [yapf](https://github.com/google/yapf): 一个 Python 文件的格式化工具。 +- [codespell](https://github.com/codespell-project/codespell): 检查单词拼写是否有误 +- [mdformat](https://github.com/executablebooks/mdformat): 检查 markdown 文件的工具 +- [docformatter](https://github.com/myint/docformatter): 一个 docstring 格式化工具。 + +yapf 和 isort 的格式设置位于 [setup.cfg](../../../setup.cfg) + +我们使用 [pre-commit hook](https://pre-commit.com/) 来保证每次提交时自动进行代 +码检查和格式化,启用的功能包括 `flake8`, `yapf`, `isort`, `trailing whitespaces`, `markdown files`, 修复 `end-of-files`, `double-quoted-strings`, +`python-encoding-pragma`, `mixed-line-ending`, 对 `requirments.txt`的排序等。 +pre-commit hook 的配置文件位于 [.pre-commit-config](../../../.pre-commit-config.yaml) + +在你克隆仓库后,你需要按照如下步骤安装并初始化 pre-commit hook。 + +```shell +pip install -U pre-commit +``` + +在仓库文件夹中执行 + +```shell +pre-commit install +``` + +在此之后,每次提交,代码规范检查和格式化工具都将被强制执行。 + +```{important} +在创建 PR 之前,请确保你的代码完成了代码规范检查,并经过了 yapf 的格式化。 +``` + +### C++ 和 CUDA + +C++ 和 CUDA 的代码规范遵从 [Google C++ Style Guide](https://google.github.io/styleguide/cppguide.html) diff --git a/docs/zh_cn/notes/faq.md b/docs/zh_cn/notes/faq.md new file mode 100644 index 0000000000..85d30ff06c --- /dev/null +++ b/docs/zh_cn/notes/faq.md @@ -0,0 +1 @@ +# 常见问题(内容建设中) diff --git a/docs/zh_cn/notes/projects.md b/docs/zh_cn/notes/projects.md new file mode 100644 index 0000000000..6734f69804 --- /dev/null +++ b/docs/zh_cn/notes/projects.md @@ -0,0 +1 @@ +# 基于 MMAction2 的项目列表(内容建设中) diff --git a/docs/zh_cn/stat.py b/docs/zh_cn/stat.py new file mode 100644 index 0000000000..166f7b32bd --- /dev/null +++ b/docs/zh_cn/stat.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python +# Copyright (c) OpenMMLab. All rights reserved. +import functools as func +import glob +import re +from os.path import basename, splitext + +import numpy as np +import titlecase + + +def anchor(name): + return re.sub(r'-+', '-', re.sub(r'[^a-zA-Z0-9]', '-', + name.strip().lower())).strip('-') + + +# Count algorithms + +files = sorted(glob.glob('*_models.md')) +# files = sorted(glob.glob('docs/*_models.md')) + +stats = [] + +for f in files: + with open(f, 'r') as content_file: + content = content_file.read() + + # title + title = content.split('\n')[0].replace('#', '') + + # skip IMAGE and ABSTRACT tags + content = [ + x for x in content.split('\n') + if 'IMAGE' not in x and 'ABSTRACT' not in x + ] + content = '\n'.join(content) + + # count papers + papers = set( + (papertype, titlecase.titlecase(paper.lower().strip())) + for (papertype, paper) in re.findall( + r'\s*\n.*?\btitle\s*=\s*{(.*?)}', + content, re.DOTALL)) + # paper links + revcontent = '\n'.join(list(reversed(content.splitlines()))) + paperlinks = {} + for _, p in papers: + print(p) + q = p.replace('\\', '\\\\').replace('?', '\\?') + paperlinks[p] = ' '.join( + (f'[->]({splitext(basename(f))[0]}.html#{anchor(paperlink)})' + for paperlink in re.findall( + rf'\btitle\s*=\s*{{\s*{q}\s*}}.*?\n## (.*?)\s*[,;]?\s*\n', + revcontent, re.DOTALL | re.IGNORECASE))) + print(' ', paperlinks[p]) + paperlist = '\n'.join( + sorted(f' - [{t}] {x} ({paperlinks[x]})' for t, x in papers)) + # count configs + configs = set(x.lower().strip() + for x in re.findall(r'https.*configs/.*\.py', content)) + + # count ckpts + ckpts = set(x.lower().strip() + for x in re.findall(r'https://download.*\.pth', content) + if 'mmaction' in x) + + statsmsg = f""" +## [{title}]({f}) + +* 模型权重文件数量: {len(ckpts)} +* 配置文件数量: {len(configs)} +* 论文数量: {len(papers)} +{paperlist} + + """ + + stats.append((papers, configs, ckpts, statsmsg)) + +allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _, _ in stats]) +allconfigs = func.reduce(lambda a, b: a.union(b), [c for _, c, _, _ in stats]) +allckpts = func.reduce(lambda a, b: a.union(b), [c for _, _, c, _ in stats]) +msglist = '\n'.join(x for _, _, _, x in stats) + +papertypes, papercounts = np.unique([t for t, _ in allpapers], + return_counts=True) +countstr = '\n'.join( + [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) + +modelzoo = f""" +# 模型库统计 + +* 模型权重文件数量: {len(allckpts)} +* 配置文件数量: {len(allconfigs)} +* 论文数量: {len(allpapers)} +{countstr} + +For supported datasets, see [datasets overview](datasets.md). + +{msglist} +""" + +with open('modelzoo.md', 'w') as f: + f.write(modelzoo) + +# # Count datasets +# +# files = ['supported_datasets.md'] +# # files = sorted(glob.glob('docs/tasks/*.md')) +# +# datastats = [] +# +# for f in files: +# with open(f, 'r') as content_file: +# content = content_file.read() +# +# # title +# title = content.split('\n')[0].replace('#', '') +# +# # count papers +# papers = set( +# (papertype, titlecase.titlecase(paper.lower().strip())) +# for (papertype, paper) in re.findall( +# r'\s*\n.*?\btitle\s*=\s*{(.*?)}', +# content, re.DOTALL)) +# # paper links +# revcontent = '\n'.join(list(reversed(content.splitlines()))) +# paperlinks = {} +# for _, p in papers: +# print(p) +# q = p.replace('\\', '\\\\').replace('?', '\\?') +# paperlinks[p] = ', '.join( +# (f'[{p.strip()} ->]({splitext(basename(f))[0]}.html#{anchor(p)})' +# for p in re.findall( +# rf'\btitle\s*=\s*{{\s*{q}\s*}}.*?\n## (.*?)\s*[,;]?\s*\n', +# revcontent, re.DOTALL | re.IGNORECASE))) +# print(' ', paperlinks[p]) +# paperlist = '\n'.join( +# sorted(f' - [{t}] {x} ({paperlinks[x]})' for t, x in papers)) +# +# statsmsg = f""" +# ## [{title}]({f}) +# +# * Number of papers: {len(papers)} +# {paperlist} +# +# """ +# +# datastats.append((papers, configs, ckpts, statsmsg)) +# +# alldatapapers = func.reduce(lambda a, b: a.union(b), +# [p for p, _, _, _ in datastats]) +# +# # Summarize +# +# msglist = '\n'.join(x for _, _, _, x in stats) +# datamsglist = '\n'.join(x for _, _, _, x in datastats) +# papertypes, papercounts = np.unique([t for t, _ in alldatapapers], +# return_counts=True) +# countstr = '\n'.join( +# [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) +# +# modelzoo = f""" +# # Overview +# +# * Number of papers: {len(alldatapapers)} +# {countstr} +# +# For supported action algorithms, see [modelzoo overview](modelzoo.md). +# +# {datamsglist} +# """ +# +# with open('datasets.md', 'w') as f: +# f.write(modelzoo) diff --git a/docs/zh_cn/switch_language.md b/docs/zh_cn/switch_language.md new file mode 100644 index 0000000000..0009eafa9e --- /dev/null +++ b/docs/zh_cn/switch_language.md @@ -0,0 +1,3 @@ +## English + +## 简体中文 diff --git a/docs/zh_cn/user_guides/1_config.md b/docs/zh_cn/user_guides/1_config.md new file mode 100644 index 0000000000..22d2d90b90 --- /dev/null +++ b/docs/zh_cn/user_guides/1_config.md @@ -0,0 +1 @@ +# 教程 1:学习配置文件(内容建设中) diff --git a/docs/zh_cn/user_guides/2_data_prepare.md b/docs/zh_cn/user_guides/2_data_prepare.md new file mode 100644 index 0000000000..a2312461da --- /dev/null +++ b/docs/zh_cn/user_guides/2_data_prepare.md @@ -0,0 +1 @@ +# 教程 2:准备数据集(内容建设中) diff --git a/docs/zh_cn/user_guides/3_inference.md b/docs/zh_cn/user_guides/3_inference.md new file mode 100644 index 0000000000..cae7e314f6 --- /dev/null +++ b/docs/zh_cn/user_guides/3_inference.md @@ -0,0 +1 @@ +# 教程 3:使用现有模型推理(内容建设中) diff --git a/docs/zh_cn/user_guides/4_train_test.md b/docs/zh_cn/user_guides/4_train_test.md new file mode 100644 index 0000000000..70973799e2 --- /dev/null +++ b/docs/zh_cn/user_guides/4_train_test.md @@ -0,0 +1 @@ +# 教程 4:训练与测试(内容建设中) diff --git a/docs/zh_cn/user_guides/useful_tools.md b/docs/zh_cn/user_guides/useful_tools.md new file mode 100644 index 0000000000..e16e41793c --- /dev/null +++ b/docs/zh_cn/user_guides/useful_tools.md @@ -0,0 +1 @@ +# 其他实用工具(内容建设中) diff --git a/docs/zh_cn/user_guides/visualization.md b/docs/zh_cn/user_guides/visualization.md new file mode 100644 index 0000000000..3f6d7b9286 --- /dev/null +++ b/docs/zh_cn/user_guides/visualization.md @@ -0,0 +1 @@ +# 可视化工具(内容建设中) From 62f962c1548f066cd0727472e689c13e6dad18af Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Fri, 9 Dec 2022 16:31:43 +0800 Subject: [PATCH 36/57] [Enhance] add visualization tools (#2092) --- demo/README.md | 9 +- demo/demo.py | 92 +++--- docs/en/get_started.md | 4 +- mmaction/models/heads/base.py | 6 +- mmaction/structures/action_data_sample.py | 15 + mmaction/visualization/action_visualizer.py | 8 +- mmaction/visualization/video_backend.py | 59 +++- tools/analysis_tools/browse_dataset.py | 155 ---------- tools/visualizations/browse_dataset.py | 241 ++++++++++++++++ .../visualizations/vis_cam.py | 3 +- tools/visualizations/vis_scheduler.py | 273 ++++++++++++++++++ 11 files changed, 647 insertions(+), 218 deletions(-) delete mode 100644 tools/analysis_tools/browse_dataset.py create mode 100644 tools/visualizations/browse_dataset.py rename demo/demo_gradcam.py => tools/visualizations/vis_cam.py (98%) create mode 100644 tools/visualizations/vis_scheduler.py diff --git a/demo/README.md b/demo/README.md index 220cd84eea..91e4f48936 100644 --- a/demo/README.md +++ b/demo/README.md @@ -40,10 +40,11 @@ python demo/demo.py ${CONFIG_FILE} ${CHECKPOINT_FILE} ${VIDEO_FILE} ${LABEL_FILE Optional arguments: -- `DEVICE_TYPE`: Type of device to run the demo. Allowed values are cuda device like `'cuda:0'` or `'cpu'`. Defaults to `'cuda:0'`. -- `FPS`: FPS value of the output video. Defaults to 30. -- `FONT_SCALE`: Font scale of the label added in the video. Defaults to 0.5. -- `FONT_COLOR`: Font color of the label added in the video. Defaults to `'white'`. +- `--use-frames`: If specified, the demo will take rawframes as input. Otherwise, it will take a video as input. +- `DEVICE_TYPE`: Type of device to run the demo. Allowed values are cuda device like `cuda:0` or `cpu`. If not specified, it will be set to `cuda:0`. +- `FPS`: FPS value of the output video when using rawframes as input. If not specified, it will be set to 30. +- `FONT_SCALE`: Font scale of the text added in the video. If not specified, it will be None. +- `FONT_COLOR`: Font color of the text added in the video. If not specified, it will be `white`. - `TARGET_RESOLUTION`: Resolution(desired_width, desired_height) for resizing the frames before output when using a video as input. If not specified, it will be None and the frames are resized by keeping the existing aspect ratio. - `OUT_FILE`: Path to the output file which can be a video format or gif format. If not specified, it will be set to `None` and does not generate the output file. diff --git a/demo/demo.py b/demo/demo.py index 5112d64707..5cebcd3abe 100644 --- a/demo/demo.py +++ b/demo/demo.py @@ -5,12 +5,11 @@ from typing import Optional, Tuple import cv2 -import numpy as np -import webcolors from mmengine import Config, DictAction from mmaction.apis import inference_recognizer, init_recognizer from mmaction.utils import register_all_modules +from mmaction.visualization import ActionVisualizer def parse_args(): @@ -36,13 +35,13 @@ def parse_args(): 'generate file') parser.add_argument( '--font-scale', - default=0.5, + default=None, type=float, - help='font scale of the label in output video') + help='font scale of the text in output video') parser.add_argument( '--font-color', default='white', - help='font color of the label in output video') + help='font color of the text in output video') parser.add_argument( '--target-resolution', nargs=2, @@ -56,13 +55,16 @@ def parse_args(): return args -def get_output(video_path: str, - out_filename: str, - label: str, - fps: int = 30, - font_scale: float = 0.5, - font_color: str = 'white', - target_resolution: Optional[Tuple[int]] = None) -> None: +def get_output( + video_path: str, + out_filename: str, + data_sample: str, + labels: list, + fps: int = 30, + font_scale: Optional[str] = None, + font_color: str = 'white', + target_resolution: Optional[Tuple[int]] = None, +) -> None: """Get demo output using ``moviepy``. This function will generate video file or gif file from raw video or @@ -72,10 +74,11 @@ def get_output(video_path: str, Args: video_path (str): The video file path. out_filename (str): Output filename for the generated file. - label (str): Predicted label of the generated file. + datasample (str): Predicted label of the generated file. + labels (list): Label list of current dataset. fps (int): Number of picture frames to read per second. Defaults to 30. - font_scale (float): Font scale of the label. Defaults to 0.5. - font_color (str): Font color of the label. Defaults to ``white``. + font_scale (float): Font scale of the text. Defaults to None. + font_color (str): Font color of the text. Defaults to ``white``. target_resolution (Tuple[int], optional): Set to (desired_width desired_height) to have resized frames. If either dimension is None, the frames are resized by keeping @@ -87,15 +90,12 @@ def get_output(video_path: str, try: import decord - from moviepy.editor import ImageSequenceClip except ImportError: - raise ImportError('Please install moviepy and decord to ' - 'enable output file.') + raise ImportError('Please install decord to enable output file.') # Channel Order is `BGR` video = decord.VideoReader(video_path) frames = [x.asnumpy()[..., ::-1] for x in video] - if target_resolution: w, h = target_resolution frame_h, frame_w, _ = frames[0].shape @@ -105,29 +105,30 @@ def get_output(video_path: str, h = int(w / frame_w * frame_h) frames = [cv2.resize(f, (w, h)) for f in frames] - textsize = cv2.getTextSize(label, cv2.FONT_HERSHEY_DUPLEX, font_scale, - 1)[0] - textheight = textsize[1] - padding = 10 - location = (padding, padding + textheight) - - if isinstance(font_color, str): - font_color = webcolors.name_to_rgb(font_color)[::-1] - - frames = [np.array(frame) for frame in frames] - for frame in frames: - cv2.putText(frame, label, location, cv2.FONT_HERSHEY_DUPLEX, - font_scale, font_color, 1) - - # RGB order - frames = [x[..., ::-1] for x in frames] - video_clips = ImageSequenceClip(frames, fps=fps) - - out_type = osp.splitext(out_filename)[1][1:] - if out_type == 'gif': - video_clips.write_gif(out_filename) - else: - video_clips.write_videofile(out_filename, remove_temp=True) + # init visualizer + out_type = 'gif' if osp.splitext(out_filename)[1] == '.gif' else 'video' + vis_backends_cfg = [ + dict( + type='LocalVisBackend', + out_type=out_type, + save_dir='demo', + fps=fps) + ] + visualizer = ActionVisualizer( + vis_backends=vis_backends_cfg, save_dir='place_holder') + visualizer.dataset_meta = dict(classes=labels) + + text_cfg = {'colors': font_color} + if font_scale is not None: + text_cfg.update({'font_sizes': font_scale}) + + visualizer.add_datasample( + out_filename, + frames, + data_sample, + draw_pred=True, + draw_gt=False, + text_cfg=text_cfg) def main(): @@ -142,9 +143,9 @@ def main(): # Build the recognizer from a config file and checkpoint file/url model = init_recognizer(cfg, args.checkpoint, device=args.device) - result = inference_recognizer(model, args.video) + pred_result = inference_recognizer(model, args.video) - pred_scores = result.pred_scores.item.tolist() + pred_scores = pred_result.pred_scores.item.tolist() score_tuples = tuple(zip(range(len(pred_scores)), pred_scores)) score_sorted = sorted(score_tuples, key=itemgetter(1), reverse=True) top5_label = score_sorted[:5] @@ -171,7 +172,8 @@ def main(): get_output( args.video, args.out_filename, - results[0][0], + pred_result, + labels, fps=args.fps, font_scale=args.font_scale, font_color=args.font_color, diff --git a/docs/en/get_started.md b/docs/en/get_started.md index 59a8b231b7..7ae03f9774 100644 --- a/docs/en/get_started.md +++ b/docs/en/get_started.md @@ -119,9 +119,9 @@ video_file = 'demo/demo.mp4' label_file = 'tools/data/kinetics/label_map_k400.txt' register_all_modules() # register all modules and set mmaction2 as the default scope. model = init_recognizer(config_file, checkpoint_file, device='cpu') # or device='cuda:0' -result = inference_recognizer(model, video_file) +pred_result = inference_recognizer(model, video_file) -pred_scores = result.pred_scores.item.tolist() +pred_scores = pred_result.pred_scores.item.tolist() score_tuples = tuple(zip(range(len(pred_scores)), pred_scores)) score_sorted = sorted(score_tuples, key=itemgetter(1), reverse=True) top5_label = score_sorted[:5] diff --git a/mmaction/models/heads/base.py b/mmaction/models/heads/base.py index 0f3e0785f0..10ceae3dbb 100644 --- a/mmaction/models/heads/base.py +++ b/mmaction/models/heads/base.py @@ -184,10 +184,14 @@ def predict_by_feat(self, cls_scores: Tensor, """ num_segs = cls_scores.shape[0] // len(data_samples) cls_scores = self.average_clip(cls_scores, num_segs=num_segs) + pred_labels = cls_scores.argmax(dim=-1, keepdim=True).detach() - for data_sample, score in zip(data_samples, cls_scores): + for data_sample, score, pred_lable in zip(data_samples, cls_scores, + pred_labels): prediction = LabelData(item=score) + pred_label = LabelData(item=pred_lable) data_sample.pred_scores = prediction + data_sample.pred_labels = pred_label return data_samples def average_clip(self, cls_scores: Tensor, num_segs: int = 1) -> Tensor: diff --git a/mmaction/structures/action_data_sample.py b/mmaction/structures/action_data_sample.py index ab0eef440e..c75f6654a1 100644 --- a/mmaction/structures/action_data_sample.py +++ b/mmaction/structures/action_data_sample.py @@ -52,6 +52,21 @@ def pred_scores(self): """Deleter of `pred_scores`""" del self._pred_scores + @property + def pred_labels(self): + """Property of `pred_labels`""" + return self._pred_labels + + @pred_labels.setter + def pred_labels(self, value): + """Setter of `pred_labels`""" + self.set_field(value, '_pred_labels', LabelData) + + @pred_labels.deleter + def pred_labels(self): + """Deleter of `pred_labels`""" + del self._pred_labels + @property def proposals(self): """Property of `proposals`""" diff --git a/mmaction/visualization/action_visualizer.py b/mmaction/visualization/action_visualizer.py index be2eb1cac7..fba9d6c600 100644 --- a/mmaction/visualization/action_visualizer.py +++ b/mmaction/visualization/action_visualizer.py @@ -203,7 +203,7 @@ def add_datasample(self, resulted_video = [] for frame_idx, frame in enumerate(video): - frame_name = 'frame %d of %s' % (frame_idx, name) + frame_name = 'frame %d of %s' % (frame_idx + 1, name) if rescale_factor is not None: frame = mmcv.imrescale(frame, rescale_factor) @@ -243,16 +243,16 @@ def add_datasample(self, texts.append(prefix + ('\n' + ' ' * len(prefix)).join(labels)) img_scale = _get_adaptive_scale(frame.shape[:2]) - text_cfg = { + _text_cfg = { 'positions': np.array([(img_scale * 5, ) * 2]).astype(np.int32), 'font_sizes': int(img_scale * 7), 'font_families': 'monospace', 'colors': 'white', 'bboxes': dict(facecolor='black', alpha=0.5, boxstyle='Round'), - **text_cfg } - self.draw_texts('\n'.join(texts), **text_cfg) + _text_cfg.update(text_cfg) + self.draw_texts('\n'.join(texts), **_text_cfg) drawn_img = self.get_image() resulted_video.append(drawn_img) diff --git a/mmaction/visualization/video_backend.py b/mmaction/visualization/video_backend.py index 3c1373a350..9b6366650e 100644 --- a/mmaction/visualization/video_backend.py +++ b/mmaction/visualization/video_backend.py @@ -21,8 +21,36 @@ class LocalVisBackend(LocalVisBackend): """Local visualization backend class with video support. See mmengine.visualization.LocalVisBackend for more details. + + Args: + save_dir (str, optional): The root directory to save the files + produced by the visualizer. If it is none, it means no data + is stored. + img_save_dir (str): The directory to save images. + Defaults to ``'vis_image'``. + config_save_file (str): The file name to save config. + Defaults to ``'config.py'``. + scalar_save_file (str): The file name to save scalar values. + Defaults to ``'scalars.json'``. + out_type (str): Output format type, choose from 'img', 'gif', + 'video'. Defaults to ``'img'``. + fps (int): Frames per second for saving video. Defaults to 5. """ + def __init__( + self, + save_dir: str, + img_save_dir: str = 'vis_image', + config_save_file: str = 'config.py', + scalar_save_file: str = 'scalars.json', + out_type: str = 'img', + fps: int = 5, + ): + super().__init__(save_dir, img_save_dir, config_save_file, + scalar_save_file) + self.out_type = out_type + self.fps = fps + @force_init_env def add_video(self, name: str, @@ -39,12 +67,30 @@ def add_video(self, """ assert frames.dtype == np.uint8 - frames_dir = osp.join(self._save_dir, name, f'frames_{step}') - os.makedirs(frames_dir, exist_ok=True) - for idx, frame in enumerate(frames): - drawn_image = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) - save_file_name = f'{idx}.png' - cv2.imwrite(osp.join(frames_dir, save_file_name), drawn_image) + if self.out_type == 'img': + frames_dir = osp.join(self._save_dir, name, f'frames_{step}') + os.makedirs(frames_dir, exist_ok=True) + for idx, frame in enumerate(frames): + drawn_image = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) + save_file_name = f'{idx}.png' + cv2.imwrite(osp.join(frames_dir, save_file_name), drawn_image) + else: + try: + from moviepy.editor import ImageSequenceClip + except ImportError: + raise ImportError('Please install moviepy to enable ' + 'output file.') + + frames = [x[..., ::-1] for x in frames] + video_clips = ImageSequenceClip(frames, fps=self.fps) + name = osp.splitext(name)[0] + if self.out_type == 'gif': + out_path = osp.join(self._save_dir, name + '.gif') + video_clips.write_gif(out_path, logger=None) + elif self.out_type == 'video': + out_path = osp.join(self._save_dir, name + '.mp4') + video_clips.write_videofile( + out_path, remove_temp=True, logger=None) @VISBACKENDS.register_module() @@ -71,6 +117,7 @@ def add_video(self, frames (np.ndarray): The frames to be saved. The format should be RGB. The shape should be (T, H, W, C). step is a useless parameter that Wandb does not need. + fps (int): Frames per second. Defaults to 4. """ frames = frames.transpose(0, 3, 1, 2) self._wandb.log({'video': wandb.Video(frames, fps=fps, format='gif')}) diff --git a/tools/analysis_tools/browse_dataset.py b/tools/analysis_tools/browse_dataset.py deleted file mode 100644 index ae41662b41..0000000000 --- a/tools/analysis_tools/browse_dataset.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import argparse -import os.path as osp -import sys - -import mmcv -from mmengine.config import Config, DictAction -from mmengine.dataset import Compose - -from mmaction.registry import DATASETS, VISUALIZERS -from mmaction.utils import register_all_modules -from mmaction.visualization import ActionVisualizer - - -def parse_args(): - parser = argparse.ArgumentParser(description='Browse a dataset') - parser.add_argument('config', help='train config file path') - parser.add_argument( - '--output-dir', - default=None, - type=str, - help='If there is no display interface, you can save it.') - parser.add_argument( - '--show-frames', - default=False, - action='store_true', - help='Whether to display the frames of the video. Defaults to False,' - 'Please make sure you have the display interface') - parser.add_argument( - '--phase', - default='train', - type=str, - choices=['train', 'test', 'val'], - help='phase of dataset to visualize, accept "train" "test" and "val".' - ' Defaults to "train".') - parser.add_argument( - '--show-number', - type=int, - default=sys.maxsize, - help='number of images selected to visualize, must bigger than 0. if ' - 'the number is bigger than length of dataset, show all the images in ' - 'dataset; default "sys.maxsize", show all images in dataset') - parser.add_argument( - '--show-interval', - type=float, - default=2, - help='the interval of show (s)') - parser.add_argument( - '--mode', - default='transformed', - type=str, - choices=['original', 'transformed'], - help='display mode; display original videos or transformed videos.' - '"original" means show videos load from disk;' - '"transformed" means to show videos after transformed; ' - 'Defaults to "transformed".') - parser.add_argument( - '--rescale-factor', - type=float, - help='video rescale factor, which is useful if the output is too ' - 'large or too small.') - parser.add_argument( - '--cfg-options', - nargs='+', - action=DictAction, - help='override some settings in the used config, the key-value pair ' - 'in xxx=yyy format will be merged into config file. If the value to ' - 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' - 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' - 'Note that the quotation marks are necessary and that no white space ' - 'is allowed.') - args = parser.parse_args() - return args - - -class InspectCompose(Compose): - """Compose multiple transforms sequentially. - - And record "imgs" field of all results in one list. - """ - - def __init__(self, transforms, intermediate_imgs): - super().__init__(transforms=transforms) - self.intermediate_imgs = intermediate_imgs - - def __call__(self, data): - - for idx, t in enumerate(self.transforms): - data = t(data) - if data is None: - return None - if 'imgs' in data: - name = t.__class__.__name__ - imgs = data['imgs'].copy() - if name != 'FormatShape': - self.intermediate_imgs.append({'name': name, 'imgs': imgs}) - return data - - -def main(): - args = parse_args() - cfg = Config.fromfile(args.config) - if args.cfg_options is not None: - cfg.merge_from_dict(args.cfg_options) - - # register all modules in mmaction2 into the registries - register_all_modules() - - dataset_cfg = cfg.get(args.phase + '_dataloader').get('dataset') - dataset = DATASETS.build(dataset_cfg) - - intermediate_imgs = [] - dataset.pipeline = InspectCompose(dataset.pipeline.transforms, - intermediate_imgs) - - # init visualizer - default_visualizer = { - 'type': 'ActionVisualizer', - 'name': 'dataset_browser', - 'save_dir': 'temp_browse_dataset' - } - visualizer = cfg.get('visualizer', default_visualizer) - visualizer: ActionVisualizer = VISUALIZERS.build(cfg.visualizer) - visualizer.dataset_meta = dataset.metainfo - - # init visualization video number - display_number = min(args.show_number, len(dataset)) - progress_bar = mmcv.ProgressBar(display_number) - - for i, item in zip(range(display_number), dataset): - if args.mode == 'original': - video = intermediate_imgs[0]['imgs'] - elif args.mode == 'transformed': - video = intermediate_imgs[-1]['imgs'] - else: - raise NameError('Currently %s mode is not supported!' % args.mode) - intermediate_imgs.clear() - - data_sample = item['data_sample'].numpy() - - file_id = f'video_{i}' - out_folder = osp.join(args.output_dir, - file_id) if args.output_dir is not None else None - - visualizer.add_datasample( - file_id, - video, - data_sample, - show_frames=args.show_frames, - out_folder=out_folder) - progress_bar.update() - - -if __name__ == '__main__': - main() diff --git a/tools/visualizations/browse_dataset.py b/tools/visualizations/browse_dataset.py new file mode 100644 index 0000000000..5247db19c2 --- /dev/null +++ b/tools/visualizations/browse_dataset.py @@ -0,0 +1,241 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import sys +import warnings +from copy import deepcopy + +import cv2 +import mmcv +import numpy as np +from mmengine.config import Config, DictAction +from mmengine.dataset import Compose +from mmengine.utils import ProgressBar +from mmengine.visualization import Visualizer + +from mmaction.registry import DATASETS +from mmaction.utils import register_all_modules +from mmaction.visualization import ActionVisualizer +from mmaction.visualization.action_visualizer import _get_adaptive_scale + + +def parse_args(): + parser = argparse.ArgumentParser(description='Browse a dataset') + parser.add_argument('config', help='train config file path') + parser.add_argument('--label', default=None, type=str, help='label file') + parser.add_argument( + '--output-dir', + '-o', + default=None, + type=str, + help='If there is no display interface, you can save it.') + parser.add_argument( + '--phase', + '-p', + default='train', + type=str, + choices=['train', 'test', 'val'], + help='phase of dataset to visualize, accept "train" "test" and "val".' + ' Defaults to "train".') + parser.add_argument( + '--show-number', + '-n', + type=int, + default=sys.maxsize, + help='number of images selected to visualize, must bigger than 0. if ' + 'the number is bigger than length of dataset, show all the images in ' + 'dataset; default "sys.maxsize", show all images in dataset') + parser.add_argument( + '--fps', + default=5, + type=int, + help='specify fps value of the output video when using rawframes to ' + 'generate file') + parser.add_argument( + '--mode', + '-m', + default='transformed', + type=str, + choices=['original', 'transformed', 'concat', 'pipeline'], + help='display mode; display original pictures or transformed pictures' + ' or comparison pictures. "original" means show images load from disk' + '; "transformed" means to show images after transformed; "concat" ' + 'means show images stitched by "original" and "output" images. ' + '"pipeline" means show all the intermediate images. ' + 'Defaults to "transformed".') + parser.add_argument( + '--rescale-factor', + '-r', + type=float, + help='video rescale factor, which is useful if the output is too ' + 'large or too small.') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + return args + + +def make_grid(videos, names, rescale_factor=None): + """Concat list of pictures into a single big picture, align height here.""" + vis = Visualizer() + + ori_shapes = [vid[0].shape[:2] for vid in videos] + if rescale_factor is not None: + videos = [[mmcv.imrescale(img, rescale_factor) for img in video] + for video in videos] + + max_height = int(max(vid[0].shape[0] for vid in videos) * 1.4) + min_width = min(vid[0].shape[1] for vid in videos) + horizontal_gap = min_width // 10 + img_scale = _get_adaptive_scale((max_height, min_width)) + + texts = [] + text_positions = [] + start_x = 0 + for i, vid in enumerate(videos): + for j, img in enumerate(vid): + pad_height = (max_height - img.shape[0]) // 2 + pad_width = horizontal_gap // 2 + # make border + videos[i][j] = cv2.copyMakeBorder( + img, + pad_height, + max_height - img.shape[0] - pad_height + + int(img_scale * 30 * 2), + pad_width, + pad_width, + cv2.BORDER_CONSTANT, + value=(255, 255, 255)) + + texts.append(f'{names[i]}\n{ori_shapes[i]}') + text_positions.append( + [start_x + img.shape[1] // 2 + pad_width, max_height]) + start_x += img.shape[1] + horizontal_gap + + out_frames = [] + for i in range(len(videos[0])): + imgs = [vid[i] for vid in videos] + display_img = np.concatenate(imgs, axis=1) + vis.set_image(display_img) + img_scale = _get_adaptive_scale(display_img.shape[:2]) + vis.draw_texts( + texts, + positions=np.array(text_positions), + font_sizes=img_scale * 7, + colors='black', + horizontal_alignments='center', + font_families='monospace') + out_frames.append(vis.get_image()) + return out_frames + + +class InspectCompose(Compose): + """Compose multiple transforms sequentially. + + And record "imgs" field of all results in one list. + """ + + def __init__(self, transforms, intermediate_imgs): + super().__init__(transforms=transforms) + self.intermediate_imgs = intermediate_imgs + + def __call__(self, data): + + for idx, t in enumerate(self.transforms): + data = t(data) + if data is None: + return None + if 'imgs' in data: + name = t.__class__.__name__ + imgs = deepcopy(data['imgs']) + if name == 'FormatShape': + continue + if name == 'ThreeCrop': + n_crops = 3 + clip_len = len(imgs) // n_crops + crop_imgs = [ + imgs[idx * clip_len:(idx + 1) * clip_len] + for idx in range(n_crops) + ] + imgs = np.concatenate(crop_imgs, axis=1) + imgs = [img for img in imgs] + if name == 'TenCrop': + warnings.warn( + 'TenCrop is not supported, only show one crop') + self.intermediate_imgs.append({'name': name, 'imgs': imgs}) + return data + + +def main(): + args = parse_args() + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # register all modules in mmaction2 into the registries + register_all_modules() + + dataset_cfg = cfg.get(args.phase + '_dataloader').get('dataset') + dataset = DATASETS.build(dataset_cfg) + + intermediate_imgs = [] + dataset.pipeline = InspectCompose(dataset.pipeline.transforms, + intermediate_imgs) + + # init visualizer + vis_backends = [ + dict( + type='LocalVisBackend', + out_type='video', + save_dir=args.output_dir, + fps=args.fps) + ] + visualizer = ActionVisualizer( + vis_backends=vis_backends, save_dir='place_holder') + + if args.label: + labels = open(args.label).readlines() + labels = [x.strip() for x in labels] + visualizer.dataset_meta = dict(classes=labels) + + # init visualization video number + display_number = min(args.show_number, len(dataset)) + progress_bar = ProgressBar(display_number) + + for i, item in zip(range(display_number), dataset): + rescale_factor = args.rescale_factor + if args.mode == 'original': + video = intermediate_imgs[0]['imgs'] + elif args.mode == 'transformed': + video = intermediate_imgs[-1]['imgs'] + elif args.mode == 'concat': + ori_video = intermediate_imgs[0]['imgs'] + trans_video = intermediate_imgs[-1]['imgs'] + video = make_grid([ori_video, trans_video], + ['original', 'transformed'], rescale_factor) + rescale_factor = None + else: + video = make_grid([result['imgs'] for result in intermediate_imgs], + [result['name'] for result in intermediate_imgs], + rescale_factor) + rescale_factor = None + + intermediate_imgs.clear() + + data_sample = item['data_samples'].numpy() + + file_id = f'video_{i}' + video = [x[..., ::-1] for x in video] + visualizer.add_datasample(file_id, video, data_sample) + progress_bar.update() + + +if __name__ == '__main__': + main() diff --git a/demo/demo_gradcam.py b/tools/visualizations/vis_cam.py similarity index 98% rename from demo/demo_gradcam.py rename to tools/visualizations/vis_cam.py index 9a97145d37..f816cce922 100644 --- a/demo/demo_gradcam.py +++ b/tools/visualizations/vis_cam.py @@ -15,7 +15,8 @@ def parse_args(): - parser = argparse.ArgumentParser(description='MMAction2 GradCAM demo') + parser = argparse.ArgumentParser( + description='MMAction2 GradCAM Visualization') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file/url') parser.add_argument('video', help='video file/url or rawframes directory') diff --git a/tools/visualizations/vis_scheduler.py b/tools/visualizations/vis_scheduler.py new file mode 100644 index 0000000000..0d50c5191e --- /dev/null +++ b/tools/visualizations/vis_scheduler.py @@ -0,0 +1,273 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import json +import os.path as osp +import re +from pathlib import Path +from unittest.mock import MagicMock + +import matplotlib.pyplot as plt +import rich +import torch.nn as nn +from mmengine.config import Config, DictAction +from mmengine.hooks import Hook +from mmengine.model import BaseModel +from mmengine.runner import Runner +from mmengine.visualization import Visualizer +from rich.progress import BarColumn, MofNCompleteColumn, Progress, TextColumn + +from mmaction.utils import register_all_modules + + +class SimpleModel(BaseModel): + """simple model that do nothing in train_step.""" + + def __init__(self): + super(SimpleModel, self).__init__() + self.data_preprocessor = nn.Identity() + self.conv = nn.Conv2d(1, 1, 1) + + def forward(self, inputs, data_samples, mode='tensor'): + pass + + def train_step(self, data, optim_wrapper): + pass + + +class ParamRecordHook(Hook): + + def __init__(self, by_epoch): + super().__init__() + self.by_epoch = by_epoch + self.lr_list = [] + self.momentum_list = [] + self.task_id = 0 + self.progress = Progress(BarColumn(), MofNCompleteColumn(), + TextColumn('{task.description}')) + + def before_train(self, runner): + if self.by_epoch: + total = runner.train_loop.max_epochs + self.task_id = self.progress.add_task( + 'epochs', start=True, total=total) + else: + total = runner.train_loop.max_iters + self.task_id = self.progress.add_task( + 'iters', start=True, total=total) + self.progress.start() + + def after_train_epoch(self, runner): + if self.by_epoch: + self.progress.update(self.task_id, advance=1) + + def after_train_iter(self, runner, batch_idx, data_batch, outputs): + if not self.by_epoch: + self.progress.update(self.task_id, advance=1) + self.lr_list.append(runner.optim_wrapper.get_lr()['lr'][0]) + self.momentum_list.append( + runner.optim_wrapper.get_momentum()['momentum'][0]) + + def after_train(self, runner): + self.progress.stop() + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Visualize a Dataset Pipeline') + parser.add_argument('config', help='config file path') + parser.add_argument( + '-p', + '--parameter', + type=str, + default='lr', + choices=['lr', 'momentum'], + help='The parameter to visualize its change curve, choose from' + '"lr" and "momentum". Defaults to "lr".') + parser.add_argument( + '-d', + '--dataset-size', + type=int, + help='The size of the dataset. If specify, `build_dataset` will ' + 'be skipped and use this size as the dataset size.') + parser.add_argument( + '-n', + '--ngpus', + type=int, + default=1, + help='The number of GPUs used in training.') + parser.add_argument( + '-s', + '--save-path', + type=Path, + help='The learning rate curve plot save path') + parser.add_argument( + '--log-level', + default='WARNING', + help='The log level of the handler and logger. Defaults to ' + 'WARNING.') + parser.add_argument('--title', type=str, help='title of figure') + parser.add_argument( + '--style', type=str, default='whitegrid', help='style of plt') + parser.add_argument('--not-show', default=False, action='store_true') + parser.add_argument( + '--window-size', + default='12*7', + help='Size of the window to display images, in format of "$W*$H".') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. If the value to ' + 'be overwritten is a list, it should be like key="[a,b]" or key=a,b ' + 'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" ' + 'Note that the quotation marks are necessary and that no white space ' + 'is allowed.') + args = parser.parse_args() + if args.window_size != '': + assert re.match(r'\d+\*\d+', args.window_size), \ + "'window-size' must be in format 'W*H'." + + return args + + +def plot_curve(lr_list, args, param_name, iters_per_epoch, by_epoch=True): + """Plot learning rate vs iter graph.""" + try: + import seaborn as sns + sns.set_style(args.style) + except ImportError: + pass + + wind_w, wind_h = args.window_size.split('*') + wind_w, wind_h = int(wind_w), int(wind_h) + plt.figure(figsize=(wind_w, wind_h)) + + ax: plt.Axes = plt.subplot() + ax.plot(lr_list, linewidth=1) + + if by_epoch: + ax.xaxis.tick_top() + ax.set_xlabel('Iters') + ax.xaxis.set_label_position('top') + sec_ax = ax.secondary_xaxis( + 'bottom', + functions=(lambda x: x / iters_per_epoch, + lambda y: y * iters_per_epoch)) + sec_ax.set_xlabel('Epochs') + else: + plt.xlabel('Iters') + plt.ylabel(param_name) + + if args.title is None: + plt.title(f'{osp.basename(args.config)} {param_name} curve') + else: + plt.title(args.title) + + +def simulate_train(data_loader, cfg, by_epoch): + model = SimpleModel() + param_record_hook = ParamRecordHook(by_epoch=by_epoch) + default_hooks = dict( + param_scheduler=cfg.default_hooks['param_scheduler'], + runtime_info=None, + timer=None, + logger=None, + checkpoint=None, + sampler_seed=None, + param_record=param_record_hook) + + runner = Runner( + model=model, + work_dir=cfg.work_dir, + train_dataloader=data_loader, + train_cfg=cfg.train_cfg, + log_level=cfg.log_level, + optim_wrapper=cfg.optim_wrapper, + param_scheduler=cfg.param_scheduler, + default_scope=cfg.default_scope, + default_hooks=default_hooks, + visualizer=MagicMock(spec=Visualizer), + custom_hooks=cfg.get('custom_hooks', None)) + + runner.train() + + return param_record_hook.lr_list, param_record_hook.momentum_list + + +def main(): + args = parse_args() + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + if cfg.get('work_dir', None) is None: + # use config filename as default work_dir if cfg.work_dir is None + cfg.work_dir = osp.join('./work_dirs', + osp.splitext(osp.basename(args.config))[0]) + + cfg.log_level = args.log_level + # register all modules in mmcls into the registries + register_all_modules() + + # make sure save_root exists + if args.save_path and not args.save_path.parent.exists(): + raise FileNotFoundError( + f'The save path is {args.save_path}, and directory ' + f"'{args.save_path.parent}' do not exist.") + + # init logger + print('Param_scheduler :') + rich.print_json(json.dumps(cfg.param_scheduler)) + + # prepare data loader + batch_size = cfg.train_dataloader.batch_size * args.ngpus + + if 'by_epoch' in cfg.train_cfg: + by_epoch = cfg.train_cfg.get('by_epoch') + elif 'type' in cfg.train_cfg: + by_epoch = cfg.train_cfg.get('type') == 'EpochBasedTrainLoop' + else: + raise ValueError('please set `train_cfg`.') + + if args.dataset_size is None and by_epoch: + from mmaction.registry import DATASETS + dataset_size = len(DATASETS.build(cfg.train_dataloader.dataset)) + print(f'dataset is {dataset_size}') + # dataset_size = len(build_dataset(cfg.train_dataloader.dataset)) + else: + dataset_size = args.dataset_size or batch_size + + class FakeDataloader(list): + dataset = MagicMock(metainfo=None) + + data_loader = FakeDataloader(range(dataset_size // batch_size)) + dataset_info = ( + f'\nDataset infos:' + f'\n - Dataset size: {dataset_size}' + f'\n - Batch size per GPU: {cfg.train_dataloader.batch_size}' + f'\n - Number of GPUs: {args.ngpus}' + f'\n - Total batch size: {batch_size}') + if by_epoch: + dataset_info += f'\n - Iterations per epoch: {len(data_loader)}' + rich.print(dataset_info + '\n') + + # simulation training process + lr_list, momentum_list = simulate_train(data_loader, cfg, by_epoch) + if args.parameter == 'lr': + param_list = lr_list + else: + param_list = momentum_list + + param_name = 'Learning Rate' if args.parameter == 'lr' else 'Momentum' + plot_curve(param_list, args, param_name, len(data_loader), by_epoch) + + if args.save_path: + plt.savefig(args.save_path) + print(f'\nThe {param_name} graph is saved at {args.save_path}') + + if not args.not_show: + plt.show() + + +if __name__ == '__main__': + main() From 8da05663997df8dfc88383295355e13035e6321b Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Mon, 12 Dec 2022 04:24:02 -0500 Subject: [PATCH 37/57] [Doc] Update CSN models' README & metafile (#2121) --- configs/recognition/csn/README.md | 30 ++++--- ...frozen_8xb12-32x2x1-58e_kinetics400-rgb.py | 24 ++++-- configs/recognition/csn/metafile.yml | 80 +++++++++---------- 3 files changed, 70 insertions(+), 64 deletions(-) diff --git a/configs/recognition/csn/README.md b/configs/recognition/csn/README.md index 4e8e8e6ecc..77c3aaf900 100644 --- a/configs/recognition/csn/README.md +++ b/configs/recognition/csn/README.md @@ -20,24 +20,22 @@ Group convolution has been shown to offer great computational savings in various ### Kinetics-400 -| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | inference time(video/s) | gpu_mem(M) | config | ckpt | log | -| :---------------------: | :------------: | :--: | :---------------------: | :------: | :------: | :------: | :---------------: | :---------------------: | :--------: | :--------------------: | :------------------: | :------------------: | -| 32x2x1 | short-side 320 | 8 | ResNet152 (IR) | IG65M | 82.66 | 95.82 | 10 clips x 3 crop | x | 32703 | [config](/configs/recognition/csn/ircsn_ig65m-pretrained-r152_8xb12-32x2x1-58e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r152_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r152_8xb12-32x2x1-58e_kinetics400-rgb_20220811-c7a3cc5b.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r152_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r152_8xb12-32x2x1-58e_kinetics400-rgb.log) | -| 32x2x1 | short-side 320 | 8 | ResNet152 (IR+BNFrozen) | IG65M | 82.58 | 95.76 | 10 clips x 3 crop | x | 32703 | [config](/configs/recognition/csn/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb_20220811-7d1dacde.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb.log) | -| 32x2x1 | short-side 320 | 8 | ResNet50 (IR+BNFrozen) | IG65M | 79.17 | 94.14 | 10 clips x 3 crop | x | 22238 | [config](/configs/recognition/csn/ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb_20220811-44395bae.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb.log) | -| 32x2x1 | short-side 320 | x | ResNet152 (IP) | None | 77.69 | 92.83 | 10 clips x 3 crop | x | x | [config](/configs/recognition/csn/ipcsn_r152_32x2x1-180e_kinetics400-rgb.py) | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ipcsn_from_scratch_r152_32x2x1_180e_kinetics400_rgb_20210617-d565828d.pth) | x | -| 32x2x1 | short-side 320 | x | ResNet152 (IR) | None | 79.17 | 94.14 | 10 clips x 3 crop | x | x | [config](/configs/recognition/csn/ircsn_r152_32x2x1-180e_kinetics400-rgb.py) | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ircsn_from_scratch_r152_32x2x1_180e_kinetics400_rgb_20210617-5c933ae1.pth) | x | -| 32x2x1 | short-side 320 | x | ResNet152 (IP+BNFrozen) | IG65M | 82.51 | 95.52 | 10 clips x 3 crop | x | x | [config](/configs/recognition/csn/ipcsn_ig65m-pretrained-r152-bnfrozen_32x2x1-58e_kinetics400-rgb.py) | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ipcsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-c3be9793.pth) | x | -| 32x2x1 | short-side 320 | x | ResNet152 (IP+BNFrozen) | Sports1M | 78.77 | 93.78 | 10 clips x 3 crop | x | x | [config](/configs/recognition/csn/ipcsn_sports1m-pretrained-r152-bnfrozen_32x2x1-58e_kinetics400-rgb.py) | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ipcsn_sports1m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-3367437a.pth) | x | -| 32x2x1 | short-side 320 | x | ResNet152 (IR+BNFrozen) | Sports1M | 78.82 | 93.34 | 10 clips x 3 crop | x | x | [config](/configs/recognition/csn/ircsn_sports1m-pretrained-r152-bnfrozen_32x2x1-58e_kinetics400-rgb.py) | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ircsn_sports1m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-b9b10241.pth) | x | - -1. The **gpus** indicates the number of gpu (80G A100) we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. - According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, - e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu. +| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :--------: | :--: | :---------------------: | :------: | :------: | :------: | :---------------: | :----: | :----: | :----------------------------: | :---------------------------: | :--------------------------: | +| 32x2x1 | 224x224 | 8 | ResNet152 (IR) | IG65M | 82.87 | 95.90 | 10 clips x 3 crop | 97.63G | 29.70M | [config](/configs/recognition/csn/ircsn_ig65m-pretrained-r152_8xb12-32x2x1-58e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r152_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r152_8xb12-32x2x1-58e_kinetics400-rgb_20220811-c7a3cc5b.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r152_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r152_8xb12-32x2x1-58e_kinetics400-rgb.log) | +| 32x2x1 | 224x224 | 8 | ResNet152 (IR+BNFrozen) | IG65M | 82.84 | 95.92 | 10 clips x 3 crop | 97.63G | 29.70M | [config](/configs/recognition/csn/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb_20220811-7d1dacde.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb.log) | +| 32x2x1 | 224x224 | 8 | ResNet50 (IR+BNFrozen) | IG65M | 79.44 | 94.26 | 10 clips x 3 crop | 55.90G | 13.13M | [config](/configs/recognition/csn/ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb_20220811-44395bae.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb.log) | +| 32x2x1 | 224x224 | x | ResNet152 (IP) | None | 77.80 | 93.10 | 10 clips x 3 crop | 109.9G | 33.02M | [config](/configs/recognition/csn/ipcsn_r152_32x2x1-180e_kinetics400-rgb.py) | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ipcsn_from_scratch_r152_32x2x1_180e_kinetics400_rgb_20210617-d565828d.pth) | x | +| 32x2x1 | 224x224 | x | ResNet152 (IR) | None | 76.53 | 92.28 | 10 clips x 3 crop | 97.6G | 29.70M | [config](/configs/recognition/csn/ircsn_r152_32x2x1-180e_kinetics400-rgb.py) | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ircsn_from_scratch_r152_32x2x1_180e_kinetics400_rgb_20210617-5c933ae1.pth) | x | +| 32x2x1 | 224x224 | x | ResNet152 (IP+BNFrozen) | IG65M | 82.68 | 95.69 | 10 clips x 3 crop | 109.9G | 33.02M | [config](/configs/recognition/csn/ipcsn_ig65m-pretrained-r152-bnfrozen_32x2x1-58e_kinetics400-rgb.py) | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ipcsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-c3be9793.pth) | x | +| 32x2x1 | 224x224 | x | ResNet152 (IP+BNFrozen) | Sports1M | 79.07 | 93.82 | 10 clips x 3 crop | 109.9G | 33.02M | [config](/configs/recognition/csn/ipcsn_sports1m-pretrained-r152-bnfrozen_32x2x1-58e_kinetics400-rgb.py) | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ipcsn_sports1m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-3367437a.pth) | x | +| 32x2x1 | 224x224 | x | ResNet152 (IR+BNFrozen) | Sports1M | 78.57 | 93.44 | 10 clips x 3 crop | 109.9G | 33.02M | [config](/configs/recognition/csn/ircsn_sports1m-pretrained-r152-bnfrozen_32x2x1-58e_kinetics400-rgb.py) | [infer_ckpt](https://download.openmmlab.com/mmaction/recognition/csn/vmz/vmz_ircsn_sports1m_pretrained_r152_32x2x1_58e_kinetics400_rgb_20210617-b9b10241.pth) | x | + +1. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size and the original batch size. 2. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available. 3. The **infer_ckpt** means those checkpoints are ported from [VMZ](https://github.com/facebookresearch/VMZ). -For more details on data preparation, you can refer to the **Prepare videos** part in the [Data Preparation Tutorial](/docs/en/user_guides/2_data_prepare.md). +For more details on data preparation, you can refer to [Kinetics400](/tools/data/kinetics/README.md). ## Train @@ -51,7 +49,7 @@ Example: train CSN model on Kinetics-400 dataset in a deterministic option with ```shell python tools/train.py configs/recognition/csn/ircsn_ig65m-pretrained-r152_8xb12-32x2x1-58e_kinetics400-rgb.py \ - --cfg-options randomness.seed=0 randomness.deterministic=True + --seed=0 --deterministic ``` For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). diff --git a/configs/recognition/csn/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb.py b/configs/recognition/csn/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb.py index 3869e55b70..ddaad925a3 100644 --- a/configs/recognition/csn/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb.py +++ b/configs/recognition/csn/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb.py @@ -21,8 +21,14 @@ ann_file_val = root + 'kinetics400_val_list_videos.txt' ann_file_test = ann_file_val +# file_client_args = dict( +# io_backend='petrel', +# path_mapping=dict( +# {'data/kinetics400': 's3://openmmlab/datasets/action/Kinetics400'})) +file_client_args = dict(io_backend='disk') + train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -33,7 +39,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=32, @@ -47,7 +53,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=32, @@ -114,14 +120,16 @@ milestones=[32, 48], gamma=0.1) ] -""" -The learning rate is for total_batch_size = 8 x 12 (num_gpus x batch_size) -If you want to use other batch size or number of GPU settings, please update -the learning rate with the linear scaling rule. -""" + optim_wrapper = dict( optimizer=dict(type='SGD', lr=5e-4, momentum=0.9, weight_decay=1e-4), clip_grad=dict(max_norm=40, norm_type=2)) default_hooks = dict(checkpoint=dict(interval=2, max_keep_ckpts=5)) find_unused_parameters = True + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (12 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=96) diff --git a/configs/recognition/csn/metafile.yml b/configs/recognition/csn/metafile.yml index b4473c33ab..be7e432583 100644 --- a/configs/recognition/csn/metafile.yml +++ b/configs/recognition/csn/metafile.yml @@ -13,10 +13,10 @@ Models: Architecture: ResNet152 Batch Size: 12 Epochs: 58 - FLOPs: 98096676864 - Parameters: 29703568 + FLOPs: 97.63G + Parameters: 29.70M Pretrained: IG65M - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -24,8 +24,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 82.66 - Top 5 Accuracy: 95.82 + Top 1 Accuracy: 82.87 + Top 5 Accuracy: 95.90 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r152_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r152_8xb12-32x2x1-58e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r152_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r152_8xb12-32x2x1-58e_kinetics400-rgb_20220811-c7a3cc5b.pth @@ -36,10 +36,10 @@ Models: Architecture: ResNet152 Batch Size: 12 Epochs: 58 - FLOPs: 98096676864 - Parameters: 29703568 + FLOPs: 97.63G + Parameters: 29.70M Pretrained: IG65M - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -47,8 +47,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 82.58 - Top 5 Accuracy: 95.76 + Top 1 Accuracy: 82.84 + Top 5 Accuracy: 95.92 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r152-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb_20220811-7d1dacde.pth @@ -59,10 +59,10 @@ Models: Architecture: ResNet50 Batch Size: 12 Epochs: 58 - FLOPs: 56209211392 - Parameters: 13131152 + FLOPs: 55.90G + Parameters: 13.13M Pretrained: IG65M - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -70,8 +70,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 79.17 - Top 5 Accuracy: 94.14 + Top 1 Accuracy: 79.44 + Top 5 Accuracy: 94.26 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/csn/ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb/ircsn_ig65m-pretrained-r50-bnfrozen_8xb12-32x2x1-58e_kinetics400-rgb_20220811-44395bae.pth @@ -81,18 +81,18 @@ Models: Metadata: Architecture: ResNet152 Epochs: 180 - FLOPs: 110337228800 - Parameters: 33016592 + FLOPs: 109.9G + Parameters: 33.02M Pretrained: None - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Modality: RGB Results: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 77.69 - Top 5 Accuracy: 92.93 + Top 1 Accuracy: 77.80 + Top 5 Accuracy: 93.10 Converted From: Weights: https://www.dropbox.com/s/3fihu6ti60047mu/ipCSN_152_kinetics_from_scratch_f129594342.pkl?dl=0 Code: https://github.com/facebookresearch/VMZ/tree/b61b08194bc3273bef4c45fdfdd36c56c8579ff3 @@ -104,18 +104,18 @@ Models: Metadata: Architecture: ResNet152 Epochs: 180 - FLOPs: 98096676864 - Parameters: 29703568 + FLOPs: 97.63G + Parameters: 29.70M Pretrained: None - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Modality: RGB Results: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 76.25 - Top 5 Accuracy: 92.11 + Top 1 Accuracy: 76.53 + Top 5 Accuracy: 92.28 Converted From: Weights: https://www.dropbox.com/s/46gcm7up60ssx5c/irCSN_152_kinetics_from_scratch_f98268019.pkl?dl=0 Code: https://github.com/facebookresearch/VMZ/tree/b61b08194bc3273bef4c45fdfdd36c56c8579ff3 @@ -127,18 +127,18 @@ Models: Metadata: Architecture: ResNet152 Epochs: 58 - FLOPs: 98096676864 - Parameters: 29703568 + FLOPs: 109.9G + Parameters: 33.02M Pretrained: IG65M - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Modality: RGB Results: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 82.51 - Top 5 Accuracy: 95.52 + Top 1 Accuracy: 82.68 + Top 5 Accuracy: 95.69 Converted From: Weights: https://www.dropbox.com/s/zpp3p0vn2i7bibl/ipCSN_152_ft_kinetics_from_ig65m_f133090949.pkl?dl=0 Code: https://github.com/facebookresearch/VMZ/tree/b61b08194bc3273bef4c45fdfdd36c56c8579ff3 @@ -150,18 +150,18 @@ Models: Metadata: Architecture: ResNet152 Epochs: 58 - FLOPs: 110337228800 - Parameters: 33016592 + FLOPs: 109.9G + Parameters: 33.02M Pretrained: Sports1M - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Modality: RGB Results: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 78.77 - Top 5 Accuracy: 93.78 + Top 1 Accuracy: 79.07 + Top 5 Accuracy: 93.82 Converted From: Weights: https://www.dropbox.com/s/ir7cr0hda36knux/ipCSN_152_ft_kinetics_from_sports1m_f111279053.pkl?dl=0 Code: https://github.com/facebookresearch/VMZ/tree/b61b08194bc3273bef4c45fdfdd36c56c8579ff3 @@ -173,18 +173,18 @@ Models: Metadata: Architecture: ResNet152 Epochs: 58 - FLOPs: 98096676864 - Parameters: 29703568 + FLOPs: 109.9G + Parameters: 33.02M Pretrained: Sports1M - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Modality: RGB Results: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 78.82 - Top 5 Accuracy: 93.34 + Top 1 Accuracy: 78.57 + Top 5 Accuracy: 93.44 Converted From: Weights: https://www.dropbox.com/s/zuoj1aqouh6bo6k/irCSN_152_ft_kinetics_from_sports1m_f101599884.pkl?dl=0 Code: https://github.com/facebookresearch/VMZ/tree/b61b08194bc3273bef4c45fdfdd36c56c8579ff3 From 2a10e5b09df19e86365cf37e8c79c3f86c08ce58 Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Tue, 13 Dec 2022 11:27:15 +0800 Subject: [PATCH 38/57] [fix] fix check_videos.py and acclerate for large dataset(ie. k400) (#2134) --- tools/analysis_tools/check_videos.py | 44 +++++++++++++++------------- 1 file changed, 24 insertions(+), 20 deletions(-) diff --git a/tools/analysis_tools/check_videos.py b/tools/analysis_tools/check_videos.py index 140777e503..87e26980e9 100644 --- a/tools/analysis_tools/check_videos.py +++ b/tools/analysis_tools/check_videos.py @@ -3,13 +3,15 @@ import os import warnings from functools import partial -from multiprocessing import Manager, Pool, cpu_count +from multiprocessing import Manager, cpu_count -import mmcv import numpy as np -from mmcv import Config, DictAction +from mmengine import Config, DictAction, track_parallel_progress -from mmaction.datasets import TRANSFORMS, build_dataset +from mmaction.registry import DATASETS, TRANSFORMS +from mmaction.utils import register_all_modules + +register_all_modules() def parse_args(): @@ -46,7 +48,7 @@ def parse_args(): choices=['decord', 'opencv', 'pyav'], help='Video decoder type, should be one of [decord, opencv, pyav]') parser.add_argument( - '--num-processes', + '--nproc', type=int, default=(cpu_count() - 1 or 1), help='Number of processes to check videos') @@ -93,14 +95,14 @@ def __call__(self, results): return results -def _do_check_videos(lock, dataset, output_file, idx): +def _do_check_videos(lock, pipeline, output_file, data_info): try: - dataset[idx] + pipeline(data_info) except: # noqa # save invalid video path to output file lock.acquire() with open(output_file, 'a') as f: - f.write(dataset.video_infos[idx]['filename'] + '\n') + f.write(data_info['filename'] + '\n') lock.release() @@ -115,31 +117,33 @@ def _do_check_videos(lock, dataset, output_file, idx): cfg.merge_from_dict(args.cfg_options) # build dataset - dataset_type = cfg.data[args.split].type + dataset_cfg = cfg.get(f'{args.split}_dataloader').dataset + dataset_type = dataset_cfg.type assert dataset_type == 'VideoDataset' - cfg.data[args.split].pipeline = [ + dataset_cfg.pipeline = [ dict(type=decoder_to_pipeline_prefix[args.decoder] + 'Init'), dict(type='RandomSampleFrames'), dict(type=decoder_to_pipeline_prefix[args.decoder] + 'Decode') ] - dataset = build_dataset(cfg.data[args.split], - dict(test_mode=(args.split != 'train'))) + + dataset = DATASETS.build(dataset_cfg) + dataset_cfg.pop('type') + pipeline = dataset.pipeline # prepare for checking if os.path.exists(args.output_file): # remove existing output file os.remove(args.output_file) - pool = Pool(args.num_processes) + lock = Manager().Lock() - worker_fn = partial(_do_check_videos, lock, dataset, args.output_file) - ids = range(len(dataset)) + worker_fn = partial(_do_check_videos, lock, pipeline, args.output_file) + # avoid copy dataset for multiprocess + data_info_list = [ + dataset.get_data_info(idx) for idx in range(len(dataset)) + ] # start checking - prog_bar = mmcv.ProgressBar(len(dataset)) - for _ in pool.imap_unordered(worker_fn, ids): - prog_bar.update() - pool.close() - pool.join() + track_parallel_progress(worker_fn, data_info_list, nproc=args.nproc) if os.path.exists(args.output_file): num_lines = sum(1 for _ in open(args.output_file)) From 9f3836015b2dbd2f1b410b05be8195c63e8328db Mon Sep 17 00:00:00 2001 From: coco <69197635+cocoshe@users.noreply.github.com> Date: Tue, 13 Dec 2022 11:54:04 +0800 Subject: [PATCH 39/57] CodeCamp #156 (#2083) Co-authored-by: Dai-Wenxun Co-authored-by: Yining Li --- docs/zh_cn/get_started.md | 187 +++++++++++++++++++++++- docs/zh_cn/user_guides/3_inference.md | 42 +++++- docs/zh_cn/user_guides/visualization.md | 21 ++- 3 files changed, 246 insertions(+), 4 deletions(-) diff --git a/docs/zh_cn/get_started.md b/docs/zh_cn/get_started.md index 658a611f8e..57d1ba97d8 100644 --- a/docs/zh_cn/get_started.md +++ b/docs/zh_cn/get_started.md @@ -1,3 +1,186 @@ -# 依赖环境(内容建设中) +# 前置条件 -# 安装(内容建设中) +在本节中,我们将演示如何准备 PyTorch 相关的依赖环境。 + +MMAction2 适用于 Linux、Windows 和 MacOS。它需要 Python 3.6+,CUDA 9.2+ 和 PyTorch 1.6+。 + +``` +如果你对配置 PyTorch 环境已经很熟悉,并且已经完成了配置,可以直接进入[下一节](#安装)。 +否则的话,请依照以下步骤完成配置。 +``` + +**第一步** 从[官网](https://docs.conda.io/en/latest/miniconda.html)下载并安装 Miniconda。 + +**第二步** 创建一个 conda 虚拟环境并激活它。 + +```shell +conda create --name openmmlab python=3.8 -y +conda activate openmmlab +``` + +**第三步** 根据[官方指南](https://pytorch.org/get-started/locally/)安装 PyTorch。例如: + +在GPU平台: + +```shell +conda install pytorch torchvision -c pytorch +``` + +``` +以上命令将自动安装最新版本的 PyTorch 和 cudatoolkit,请检查它们是否和你的环境匹配。 +``` + +在CPU平台: + +```shell +conda install pytorch torchvision cpuonly -c pytorch +``` + +# 安装 + +我们推荐用户按照我们的最佳实践安装 MMAction2。但除此之外,如果你想根据你的习惯完成安装,流程见[自定义安装](#自定义安装)章节获取更多信息。 + +## 最佳实践 + +**第一步** 使用 MIM 安装 MMEngine 和 MMCV。 + +```shell +pip install -U openmim +mim install mmengine 'mmcv>=2.0.0rc1' +``` + +**第二步** 安装 MMAction2。 + +根据你的需要,我们支持两种安装模式: + +- [从源码安装(推荐)](#从源码安装):希望开发自己的动作识别任务或者在 MMAction2 上开发新功能,例如,添加新的数据集或者新的模型。因此,你可以使用我们提供的所有工具。 +- [作为 Python 包安装](#作为-Python-包安装):只想希望调用 MMAction2 的 API 接口,或者在你的项目中导入 MMAction2 中的模块。 + +### 从源码安装 + +这种情况下,从源码按如下方式安装 MMAction2: + +```shell +git clone https://github.com/open-mmlab/mmaction2.git +cd mmaction2 +git checkout 1.x +pip install -v -e . +# "-v" 表示输出更多安装相关的信息 +# "-e" 表示以可编辑形式安装,这样可以在不重新安装的情况下,让本地修改直接生效 +``` + +另外,如果你想为 MMAction2 贡献代码,或者体验试验中的功能,请签出到 `dev-1.x` 分支。 + +```shell +git checkout dev-1.x +``` + +### 作为 Python 包安装 + +直接使用 pip 安装即可。 + +```shell +pip install "mmaction2>=1.0rc0" +``` + +## 验证安装 + +为了验证 MMAction2 的安装是否正确,我们提供了一些示例代码来执行模型推理。 + +**第一步** 我们需要下载配置文件和模型权重文件。 + +```shell +mim download mmaction2 --config tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb --dest . +``` + +**第二步** 验证示例的推理流程。 + +如果你从源码安装 MMAction2,那么直接运行以下命令进行验证: + +```shell +# demo.mp4 和 label_map_k400.txt 都来自于 Kinetics-400 +python demo/demo.py tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.py \ + tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb_20220906-2692d16c.pth \ + demo/demo.mp4 tools/data/kinetics/label_map_k400.txt +``` + +终端上将输出获得最高分数的标签以及相应的分数。 + +如果你是作为 Python 包安装,那么可以打开你的 Python 解释器,并粘贴如下代码: + +```python +from mmaction.apis import init_recognizer, inference_recognizer +from mmaction.utils import register_all_modules + +config_file = 'tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.py' +checkpoint_file = 'tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb_20220906-2692d16c.pth' +video_file = 'demo/demo.mp4' +label_file = 'tools/data/kinetics/label_map_k400.txt' +register_all_modules() # register all modules and set mmaction2 as the default scope. +model = init_recognizer(config_file, checkpoint_file, device='cpu') # or device='cuda:0' +result = inference_recognizer(model, video_file) +pred_scores = result.pred_scores.item.tolist() +score_tuples = tuple(zip(range(len(pred_scores)), pred_scores)) +score_sorted = sorted(score_tuples, key=itemgetter(1), reverse=True) +top5_label = score_sorted[:5] + +labels = open(label_file).readlines() +labels = [x.strip() for x in labels] +results = [(labels[k[0]], k[1]) for k in top5_label] + +print('The top-5 labels with corresponding scores are:') +for result in results: + print(f'{result[0]}: ', result[1]) +``` + +## 自定义安装 + +### CUDA 版本 + +安装 PyTorch 时,你可能需要安装特定的 CUDA 的版本。如果你不清楚应该选择哪个版本,请遵循我们的建议: + +- 对于 Ampere 架构的 NVIDIA GPU,例如 GeForce 30 series 以及 NVIDIA A100,CUDA 11 是必需的。 +- 对于更早的 NVIDIA GPU,CUDA 11 是向前兼容的,但 CUDA 10.2 能够提供更好的兼容性,也更加轻量。 + +请确保你的 GPU 驱动满足要求的最低版本,详见[此表格](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions__table-cuda-toolkit-driver-versions)。 + +``` +如果按照我们的最佳实践进行安装,CUDA 运行时库就足够了,因为我们提供相关 CUDA 代码的预编译,你不需要进行本地编译。 +但如果你希望从源码进行 MMCV 的编译,或是进行其他 CUDA 算子的开发,那么就必须安装完整的 CUDA 工具链,参见 +[NVIDIA 官网](https://developer.nvidia.com/cuda-downloads),另外还需要确保该 CUDA 工具链的版本与 PyTorch 安装时 +的配置相匹配(如用 `conda install` 安装 PyTorch 时指定的 cudatoolkit 版本)。 +``` + +### 不使用 MIM 安装 MMCV + +MMCV 包含 C++ 和 CUDA 扩展,因此其对 PyTorch 的依赖比较复杂。 MIM 会自动解析此类依赖关系,选择合适的 MMCV 预编译包,使安装更简单,但它并不是必需的。 + +要使用 pip 而不是 MIM 安装 MMCV,请遵循 MMCV [安装指南](https://mmcv.readthedocs.io/en/2.x/get_started/installation.html)。它需要你用指定 url 的形式手动指定对应的 PyTorch 和 CUDA 版本。 + +例如,以下命令安装为 PyTorch 1.10.x 和 CUDA 11.3 构建的 mmcv。 + +```shell +pip install 'mmcv>=2.0.0rc1' -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.10/index.html +``` + +### 在 CPU 环境中安装 + +MMAction2 可以仅在 CPU 环境中安装。在 CPU 模式下,你可以完成训练、测试和模型推理等所有操作。 + +在 CPU 模式下,MMCV 的部分功能将不可用,通常是一些 GPU 编译的算子。不过不用担心, MMAction2 中几乎所有的模型都不会依赖这些算子。 + +### 通过Docker使用MMAction2 + +我们提供一个[Dockerfile](https://github.com/open-mmlab/mmaction2/blob/1.x/docker/Dockerfile)用来构建镜像,确保你的 [Docker版本](https://docs.docker.com/engine/install/)>=19.03. + +```shell +# 例如构建PyTorch 1.6.0, CUDA 10.1, CUDNN 7的镜像 +# 如果你喜欢其他版本,只要修改Dockerfile +docker build -f ./docker/Dockerfile --rm -t mmaction2 . +``` + +用以下命令运行 Docker 镜像: + +```shell +docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmaction2/data mmaction2 +``` diff --git a/docs/zh_cn/user_guides/3_inference.md b/docs/zh_cn/user_guides/3_inference.md index cae7e314f6..20c346c7b2 100644 --- a/docs/zh_cn/user_guides/3_inference.md +++ b/docs/zh_cn/user_guides/3_inference.md @@ -1 +1,41 @@ -# 教程 3:使用现有模型推理(内容建设中) +# 教程3:利用现有模型进行推理 + +MMAction2 在 [Model Zoo](../modelzoo.md) 中提供预训练的视频理解模型。 +本教程将展示**如何使用现有模型对给定视频进行推理**。 + +至于如何在标准数据集上测试现有模型,请参阅这该[指南](./4_train_test.md#test) + +## 给定视频的推理 + +MMAction2提供了高级 Python APIs,用于对给定视频进行推理: + +- [init_recognizer](mmaction.apis.init_recognizer): 用配置和检查点初始化一个识别器。 +- [inference_recognizer](mmaction.apis.inference_recognizer): 对给定视频进行推理。 + +下面是一个使用 Kinetics-400 预训练检查点在给定视频上构建模型和推理的示例。 + +```{note} +如果使用mmaction2作为第三方包,则需要下载示例中的config和演示视频。 + +下载所需的配置:'mim download mmaction2 --config tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb --dest .' + +下载所需的演示视频:'wget https://github.com/open-mmlab/mmaction2/blob/dev-1.x/demo/demo.mp4' +``` + +```python +from mmaction.apis import inference_recognizer, init_recognizer +from mmaction.utils import register_all_modules + +config_path = 'configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.py' +checkpoint_path = 'https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb_20220906-2692d16c.pth' # 可以是本地路径 +img_path = 'demo/demo.mp4' # 您可以指定自己的视频路径 + +# 注册所有模块,并将 MMACTION 设置为默认作用域。 +register_all_modules() +# 从配置文件和检查点文件构建模型 +model = init_recognizer(config_path, checkpoint_path, device="cpu") # 也可以是 'cuda:0' +# 测试单个视频 +result = inference_recognizer(model, img_path) +``` + +`result` 是一个包含 `pred_scores` 的字典。动作识别示例代码详见 [demo/demo.py](https://github.com/open-mmlab/mmaction2/blob/1.x/demo/demo.py)。 diff --git a/docs/zh_cn/user_guides/visualization.md b/docs/zh_cn/user_guides/visualization.md index 3f6d7b9286..9d1aa2a2e7 100644 --- a/docs/zh_cn/user_guides/visualization.md +++ b/docs/zh_cn/user_guides/visualization.md @@ -1 +1,20 @@ -# 可视化工具(内容建设中) +# 可视化工具 + +## 对数据集可视化 + +你可以使用`tools/analysis_tools/browse_dataset.py`去可视化数据集。 + +```shell +python tools/analysis_tools/browse_dataset.py ${CONFIG_FILE} [ARGS] +``` + +| 参数 | 含义 | +| ------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `CONFIG_FILE` | 配置文件的路径。 | +| `--output-dir OUTPUT_DIR` | 如果没有display显示接口,你能将可视化结果保存到`OUTPUT_DIR`,默认为None。 | +| `--show-frames` | 如果你拥有显示接口,会显示视频的帧内容,默认为False。 | +| `--phase PHASE` | 想要可视化的数据集阶段,接受`train`, `test` 和`val`. 默认为`train`。 | +| `--show-number SHOW_NUMBER` | 选择可视化的图像数量,必须比0大,如果数量比数据集长度更大,则展示数据集中的所有图像,默认为"sys.maxsize",展示数据集中所有图像。 | +| `--show-interval SHOW_INTERVAL` | 显示的间隔,默认为2The interval of show (s). Defaults to 2。 | +| `--mode MODE` | 显示模式:显示原始视频或者变换后的视频。`original` 表示显示从硬盘中导入的视频,而`transformed` 表示显示变换后的视频,默认为`transformed`。 | +| `--cfg-options CFG_OPTIONS` | 覆盖一些正在使用的config配置的设置,像”xxx=yyy“形式的键值对将会被合并进config配置文件。如果将被覆盖的是一个列表,它的形式将是`key="[a,b]"` 或 `key=a,b`的格式。该参数还允许嵌套列表/元组值,例如`key="[(a,b),(c,d)]"`. 请注意,引号是必需的,不允许有空格。 | From ec1eecb7f8a3a5efe5111fb4750194862e4c9e7e Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Thu, 22 Dec 2022 10:51:47 -0500 Subject: [PATCH 40/57] [Fix] fix test model of SampleFrames (#2140) --- mmaction/datasets/transforms/loading.py | 87 +++++++++++++++---------- 1 file changed, 51 insertions(+), 36 deletions(-) diff --git a/mmaction/datasets/transforms/loading.py b/mmaction/datasets/transforms/loading.py index e0b5ce75a6..88e0517122 100644 --- a/mmaction/datasets/transforms/loading.py +++ b/mmaction/datasets/transforms/loading.py @@ -83,40 +83,46 @@ def __repr__(self): class SampleFrames(BaseTransform): """Sample frames from the video. - Required keys are "total_frames", "start_index" , added or modified keys - are "frame_inds", "frame_interval" and "num_clips". + Required Keys: + + - total_frames + - start_index + + Added Keys: + + - frame_inds + - frame_interval + - num_clips Args: clip_len (int): Frames of each sampled output clip. frame_interval (int): Temporal interval of adjacent sampled frames. - Default: 1. + Defaults to 1. num_clips (int): Number of clips to be sampled. Default: 1. temporal_jitter (bool): Whether to apply temporal jittering. - Default: False. + Defaults to False. twice_sample (bool): Whether to use twice sample when testing. If set to True, it will sample frames with and without fixed shift, - which is commonly used for testing in TSM model. Default: False. + which is commonly used for testing in TSM model. Defaults to False. out_of_bound_opt (str): The way to deal with out of bounds frame indexes. Available options are 'loop', 'repeat_last'. - Default: 'loop'. + Defaults to 'loop'. test_mode (bool): Store True when building test or validation dataset. - Default: False. - start_index (None): This argument is deprecated and moved to dataset - class (``BaseDataset``, ``VideoDatset``, ``RawframeDataset``, etc), - see this: https://github.com/open-mmlab/mmaction2/pull/89. + Defaults to False. keep_tail_frames (bool): Whether to keep tail frames when sampling. - Default: False. + Defaults to False. """ def __init__(self, - clip_len, - frame_interval=1, - num_clips=1, - temporal_jitter=False, - twice_sample=False, - out_of_bound_opt='loop', - test_mode=False, - keep_tail_frames=False): + clip_len: int, + frame_interval: int = 1, + num_clips: int = 1, + temporal_jitter: bool = False, + twice_sample: bool = False, + out_of_bound_opt: str = 'loop', + test_mode: bool = False, + keep_tail_frames: bool = False, + **kwargs) -> None: self.clip_len = clip_len self.frame_interval = frame_interval @@ -128,7 +134,7 @@ def __init__(self, self.keep_tail_frames = keep_tail_frames assert self.out_of_bound_opt in ['loop', 'repeat_last'] - def _get_train_clips(self, num_frames): + def _get_train_clips(self, num_frames: int) -> np.array: """Get clip offsets in train mode. It will calculate the average interval for selected frames, @@ -172,7 +178,7 @@ def _get_train_clips(self, num_frames): return clip_offsets - def _get_test_clips(self, num_frames): + def _get_test_clips(self, num_frames: int) -> np.array: """Get clip offsets in test mode. If the total number of frames is @@ -184,21 +190,30 @@ def _get_test_clips(self, num_frames): Returns: np.ndarray: Sampled frame indices in test mode. """ - k = 2 if self.twice_sample else 1 - num_clips = self.num_clips * k - ori_clip_len = (self.clip_len - 1) * self.frame_interval + 1 - max_offset = max(num_frames - ori_clip_len, 0) - - if num_clips > 1: - num_segments = num_clips - 1 - offset_between = max_offset / float(num_segments) - clip_offsets = np.arange(num_clips) * offset_between - clip_offsets = np.round(clip_offsets).astype(np.int32) - else: - clip_offsets = np.array([max_offset // 2]) + if self.clip_len == 1: # 2D recognizer + # assert self.frame_interval == 1 + avg_interval = num_frames / float(self.num_clips) + base_offsets = np.arange(self.num_clips) * avg_interval + clip_offsets = base_offsets + avg_interval / 2.0 + if self.twice_sample: + clip_offsets = np.concatenate([clip_offsets, base_offsets]) + else: # 3D recognizer + ori_clip_len = (self.clip_len - 1) * self.frame_interval + 1 + max_offset = max(num_frames - ori_clip_len, 0) + if self.twice_sample: + num_clips = self.num_clips * 2 + else: + num_clips = self.num_clips + if num_clips > 1: + num_segments = self.num_clips - 1 + offset_between = max_offset / float(num_segments) + clip_offsets = np.arange(num_clips) * offset_between + clip_offsets = np.round(clip_offsets) + else: + clip_offsets = np.array([max_offset // 2]) return clip_offsets - def _sample_clips(self, num_frames): + def _sample_clips(self, num_frames: int) -> np.array: """Choose clip offsets for the video in a given mode. Args: @@ -214,7 +229,7 @@ def _sample_clips(self, num_frames): return clip_offsets - def transform(self, results): + def transform(self, results: dict) -> dict: """Perform the SampleFrames loading. Args: @@ -253,7 +268,7 @@ def transform(self, results): results['num_clips'] = self.num_clips return results - def __repr__(self): + def __repr__(self) -> str: repr_str = (f'{self.__class__.__name__}(' f'clip_len={self.clip_len}, ' f'frame_interval={self.frame_interval}, ' From 465d7debd3ff6b1e59ae9602fd186dc2297702b3 Mon Sep 17 00:00:00 2001 From: wxDai Date: Thu, 22 Dec 2022 23:55:14 +0800 Subject: [PATCH 41/57] [Fix] Update Testing Accuracy (#2117) --- configs/recognition/c2d/README.md | 16 ++--- ...nopool_8xb32-8x8x1-100e_kinetics400-rgb.py | 5 -- ...k-pre_8xb32-16x4x1-100e_kinetics400-rgb.py | 5 -- ...1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py | 5 -- configs/recognition/c2d/metafile.yml | 24 +++---- configs/recognition/c3d/README.md | 14 ++-- ...-pretrained_8xb30-16x1x1-45e_ucf101-rgb.py | 14 +++- configs/recognition/c3d/metafile.yml | 9 +-- configs/recognition/i3d/README.md | 28 ++++---- ...roduct_8xb8-32x2x1-100e_kinetics400-rgb.py | 14 +++- ...ed-r50_8xb8-32x2x1-100e_kinetics400-rgb.py | 14 +++- ..._8xb8-dense-32x2x1-100e_kinetics400-rgb.py | 8 ++- configs/recognition/i3d/metafile.yml | 66 ++++++++++--------- configs/recognition/r2plus1d/README.md | 18 +++-- configs/recognition/r2plus1d/metafile.yml | 22 +++---- ...1d_r34_8xb8-32x2x1-180e_kinetics400-rgb.py | 8 ++- ...s1d_r34_8xb8-8x8x1-180e_kinetics400-rgb.py | 14 +++- configs/recognition/slowfast/README.md | 24 ++++--- configs/recognition/slowfast/metafile.yml | 50 +++++++------- ...st_r50_8xb8-4x16x1-256e_kinetics400-rgb.py | 8 ++- configs/recognition/swin/README.md | 22 +++---- configs/recognition/swin/metafile.yml | 40 +++++------ ...pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py | 4 -- ...re_16xb8-amp-32x2x1-30e_kinetics700-rgb.py | 4 -- ...pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py | 4 -- ...pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py | 4 -- ...pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py | 4 -- configs/recognition/tanet/README.md | 24 ++++--- configs/recognition/tanet/metafile.yml | 28 ++++---- ...retrained-r50_8xb6-1x1x16-50e_sthv1-rgb.py | 15 +++-- ...pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb.py | 13 +++- ...0_8xb8-dense-1x1x8-100e_kinetics400-rgb.py | 13 +++- 32 files changed, 279 insertions(+), 262 deletions(-) diff --git a/configs/recognition/c2d/README.md b/configs/recognition/c2d/README.md index 22bfd6ed86..651193dad2 100644 --- a/configs/recognition/c2d/README.md +++ b/configs/recognition/c2d/README.md @@ -21,18 +21,18 @@ Both convolutional and recurrent operations are building blocks that process one ### Kinetics-400 -| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | reference top1 acc | reference top5 acc | testing protocol | gpu_mem(M) | FLOPs | params | config | ckpt | log | -| :---------------------: | :-------: | :------------: | :--: | :-----------: | :------: | :------: | :------: | :---------------------: | :---------------------: | :----------------: | :--------: | :---: | :----: | :---------: | :-------: | :------: | -| 8x8x1 | MultiStep | short-side 320 | 8 | ResNet50
| ImageNet | 73.16 | 90.88 | 67.2
[\[PySlowFast\]](https://github.com/facebookresearch/SlowFast/blob/main/MODEL_ZOO.md#kinetics-400-and-600) | 87.8
[\[PySlowFast\]](https://github.com/facebookresearch/SlowFast/blob/main/MODEL_ZOO.md#kinetics-400-and-600) | 10 clips x 3 crops | 21547 | 33G | 24.3M | [config](/configs/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb_20221027-e0227b22.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.log) | -| 8x8x1 | MultiStep | short-side 320 | 8 | ResNet101
| ImageNet | 74.57 | 91.60 | x | x | 10 clips x 3 crops | 31836 | 63G | 43.3M | [config](/configs/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb_20221027-557bd8bc.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.log) | -| 8x8x1 | MultiStep | short-side 320 | 8 | ResNet50
(TemporalPool) | ImageNet | 73.57 | 90.96 | 71.9
[\[Non-Local\]](https://github.com/facebookresearch/video-nonlocal-net#modifications-for-improving-speed) | 90.0
[\[Non-Local\]](https://github.com/facebookresearch/video-nonlocal-net#modifications-for-improving-speed) | 10 clips x 3 crops | 17006 | 19G | 24.3M | [config](/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb_20221027-3ca304fa.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.log) | -| 16x4x1 | MultiStep | short-side 320 | 8 | ResNet50
(TemporalPool) | ImageNet | 74.54 | 91.76 | x | x | 10 clips x 3 crops | 33630 | 39G | 24.3M | [config](/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb_20221027-5f382a43.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb.log) | +| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | reference top1 acc | reference top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :-------: | :--------: | :--: | :-------------: | :------: | :------: | :------: | :-----------------------: | :-----------------------: | :---------------: | :---: | :----: | :------------: | :----------: | :---------: | +| 8x8x1 | MultiStep | 224x224 | 8 | ResNet50
| ImageNet | 73.44 | 91.00 | 67.2
[\[PySlowFast\]](https://github.com/facebookresearch/SlowFast/blob/main/MODEL_ZOO.md#kinetics-400-and-600) | 87.8
[\[PySlowFast\]](https://github.com/facebookresearch/SlowFast/blob/main/MODEL_ZOO.md#kinetics-400-and-600) | 10 clips x 3 crop | 33G | 24.3M | [config](/configs/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb_20221027-e0227b22.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.log) | +| 8x8x1 | MultiStep | 224x224 | 8 | ResNet101
| ImageNet | 74.97 | 91.77 | x | x | 10 clips x 3 crop | 63G | 43.3M | [config](/configs/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb_20221027-557bd8bc.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.log) | +| 8x8x1 | MultiStep | 224x224 | 8 | ResNet50
(TemporalPool) | ImageNet | 73.89 | 91.21 | 71.9
[\[Non-Local\]](https://github.com/facebookresearch/video-nonlocal-net#modifications-for-improving-speed) | 90.0
[\[Non-Local\]](https://github.com/facebookresearch/video-nonlocal-net#modifications-for-improving-speed) | 10 clips x 3 crop | 19G | 24.3M | [config](/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb_20221027-3ca304fa.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.log) | +| 16x4x1 | MultiStep | 224x224 | 8 | ResNet50
(TemporalPool) | ImageNet | 74.97 | 91.91 | x | x | 10 clips x 3 crop | 39G | 24.3M | [config](/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb_20221027-5f382a43.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb.log) | 1. The values in columns named after "reference" are the results reported in the original repo, using the same model settings. 2. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size and the original batch size. 3. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available. -For more details on data preparation, you can refer to [preparing_kinetics](/tools/data/kinetics/README.md). +For more details on data preparation, you can refer to [Kinetics400](/tools/data/kinetics/README.md). ## Train @@ -46,7 +46,7 @@ Example: train C2D model on Kinetics-400 dataset in a deterministic option with ```shell python tools/train.py configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py \ - --cfg-options randomness.seed=0 randomness.deterministic=True + --seed 0 --deterministic ``` For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). diff --git a/configs/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py b/configs/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py index 13795ffc00..cf8ae40e40 100644 --- a/configs/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py +++ b/configs/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.py @@ -10,12 +10,7 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' -# file_client_args = dict( -# io_backend='petrel', -# path_mapping=dict( -# {'data/kinetics400': 's3://openmmlab/datasets/action/Kinetics400'})) file_client_args = dict(io_backend='disk') - train_pipeline = [ dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), diff --git a/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb.py b/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb.py index 4247cd8d9c..515dd621ac 100644 --- a/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb.py +++ b/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb.py @@ -10,12 +10,7 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' -# file_client_args = dict( -# io_backend='petrel', -# path_mapping=dict( -# {'data/kinetics400': 's3://openmmlab/datasets/action/Kinetics400'})) file_client_args = dict(io_backend='disk') - train_pipeline = [ dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=16, frame_interval=4, num_clips=1), diff --git a/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py b/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py index b2ca2c707e..135907a8c9 100644 --- a/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py +++ b/configs/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.py @@ -10,12 +10,7 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' -# file_client_args = dict( -# io_backend='petrel', -# path_mapping=dict( -# {'data/kinetics400': 's3://openmmlab/datasets/action/Kinetics400'})) file_client_args = dict(io_backend='disk') - train_pipeline = [ dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), diff --git a/configs/recognition/c2d/metafile.yml b/configs/recognition/c2d/metafile.yml index b629e5d55d..8d20761f8c 100644 --- a/configs/recognition/c2d/metafile.yml +++ b/configs/recognition/c2d/metafile.yml @@ -16,7 +16,7 @@ Models: FLOPs: 33G Parameters: 24.3M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -24,8 +24,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 73.16 - Top 5 Accuracy: 90.88 + Top 1 Accuracy: 73.44 + Top 5 Accuracy: 91.00 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb_20221027-e0227b22.pth @@ -39,7 +39,7 @@ Models: FLOPs: 63G Parameters: 43.3M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -47,8 +47,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 74.57 - Top 5 Accuracy: 91.60 + Top 1 Accuracy: 74.97 + Top 5 Accuracy: 91.77 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r101-in1k-pre-nopool_8xb32-8x8x1-100e_kinetics400-rgb_20221027-557bd8bc.pth @@ -62,7 +62,7 @@ Models: FLOPs: 19G Parameters: 24.3M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -70,8 +70,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 73.57 - Top 5 Accuracy: 90.96 + Top 1 Accuracy: 73.89 + Top 5 Accuracy: 91.21 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-8x8x1-100e_kinetics400-rgb_20221027-3ca304fa.pth @@ -85,7 +85,7 @@ Models: FLOPs: 39G Parameters: 24.3M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -93,7 +93,7 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 74.54 - Top 5 Accuracy: 91.76 + Top 1 Accuracy: 74.97 + Top 5 Accuracy: 91.91 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/c2d/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb/c2d_r50-in1k-pre_8xb32-16x4x1-100e_kinetics400-rgb_20221027-5f382a43.pth diff --git a/configs/recognition/c3d/README.md b/configs/recognition/c3d/README.md index fb5f4036a4..958119f048 100644 --- a/configs/recognition/c3d/README.md +++ b/configs/recognition/c3d/README.md @@ -20,16 +20,14 @@ We propose a simple, yet effective approach for spatiotemporal feature learning ### UCF-101 -| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | inference_time(video/s) | gpu_mem(M) | config | ckpt | log | -| :---------------------: | :--------: | :--: | :------: | :------: | :------: | :------: | :---------------: | :---------------------: | :--------: | :--------------------------: | :-------------------------: | :------------------------: | -| 16x1x1 | raw | 8 | c3d | sports1m | 82.92 | 96.11 | 10 clips x 1 crop | x | 6067 | [config](/configs/recognition/c3d/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/c3d/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb_20220811-31723200.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/c3d/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb.log) | +| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :--------: | :--: | :------: | :------: | :------: | :------: | :---------------: | :---: | :----: | :----------------------------------: | :--------------------------------: | :-------------------------------: | +| 16x1x1 | 112x112 | 8 | c3d | sports1m | 83.08 | 95.93 | 10 clips x 1 crop | 38.5G | 78.4M | [config](/configs/recognition/c3d/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/c3d/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb_20220811-31723200.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/c3d/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb.log) | 1. The author of C3D normalized UCF-101 with volume mean and used SVM to classify videos, while we normalized the dataset with RGB mean value and used a linear classifier. -2. The **gpus** indicates the number of gpu (80G A100) we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. - According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, - e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu. +2. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size and the original batch size. -For more details on data preparation, you can refer to the **Prepare videos** part in the [Data Preparation Tutorial](/docs/en/user_guides/2_data_prepare.md). +For more details on data preparation, you can refer to [UCF101](/tools/data/ucf101/README.md). ## Train @@ -43,7 +41,7 @@ Example: train C3D model on UCF-101 dataset in a deterministic option with perio ```shell python tools/train.py configs/recognition/c3d/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb.py \ - --cfg-options randomness.seed=0 randomness.deterministic=True + --seed=0 --deterministic ``` For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). diff --git a/configs/recognition/c3d/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb.py b/configs/recognition/c3d/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb.py index b87df85c01..49635fa412 100644 --- a/configs/recognition/c3d/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb.py +++ b/configs/recognition/c3d/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb.py @@ -11,8 +11,10 @@ ann_file_train = f'data/ucf101/ucf101_train_split_{split}_videos.txt' ann_file_val = f'data/ucf101/ucf101_val_split_{split}_videos.txt' ann_file_test = f'data/ucf101/ucf101_val_split_{split}_videos.txt' + +file_client_args = dict(io_backend='disk') train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=16, frame_interval=1, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 128)), @@ -22,7 +24,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=16, @@ -36,7 +38,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=16, @@ -106,3 +108,9 @@ clip_grad=dict(max_norm=40, norm_type=2)) default_hooks = dict(checkpoint=dict(interval=5)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (30 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=240) diff --git a/configs/recognition/c3d/metafile.yml b/configs/recognition/c3d/metafile.yml index 26becfc8a2..9944352256 100644 --- a/configs/recognition/c3d/metafile.yml +++ b/configs/recognition/c3d/metafile.yml @@ -13,9 +13,10 @@ Models: Architecture: c3d Batch Size: 30 Epochs: 45 - FLOPs: 38615475200 - Parameters: 78409573 + FLOPs: 38.5G + Parameters: 78.4M Pretrained: sports1m + Resolution: 112x112 Training Data: UCF101 Training Resources: 8 GPUs Modality: RGB @@ -23,7 +24,7 @@ Models: - Dataset: UCF101 Task: Action Recognition Metrics: - Top 1 Accuracy: 82.92 - Top 5 Accuracy: 96.11 + Top 1 Accuracy: 83.08 + Top 5 Accuracy: 95.93 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/c3d/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/c3d/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb/c3d_sports1m-pretrained_8xb30-16x1x1-45e_ucf101-rgb_20220811-31723200.pth diff --git a/configs/recognition/i3d/README.md b/configs/recognition/i3d/README.md index 66703707c4..e181eaf195 100644 --- a/configs/recognition/i3d/README.md +++ b/configs/recognition/i3d/README.md @@ -22,23 +22,19 @@ The paucity of videos in current action classification datasets (UCF-101 and HMD ### Kinetics-400 -| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | inference time(video/s) | gpu_mem(M) | config | ckpt | log | -| :---------------------: | :------------: | :--: | :----------------------: | :------: | :------: | :------: | :---------------: | :---------------------: | :--------: | :--------------------: | :------------------: | :-----------------: | -| 32x2x1 | short-side 320 | 8 | ResNet50 (NonLocalDotProduct) | ImageNet | 74.76 | 91.84 | 10 clips x 3 crop | x | 6245 | [config](/configs/recognition/i3d/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb_20220812-8e1f2148.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb.log) | -| 32x2x1 | short-side 320 | 8 | ResNet50 (NonLocalEmbedGauss) | ImageNet | 74.69 | 91.69 | 10 clips x 3 crop | x | 6415 | [config](/configs/recognition/i3d/i3d_imagenet-pretrained-r50-nl-embedded-gaussian_8xb8-32x2x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-embedded-gaussian_8xb8-32x2x1-100e_kinetics400-rgb/ii3d_imagenet-pretrained-r50-nl-embedded-gaussian_8xb8-32x2x1-100e_kinetics400-rgb_20220812-afd8f562.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-embedded-gaussian_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-embedded-gaussian_8xb8-32x2x1-100e_kinetics400-rgb.log) | -| 32x2x1 | short-side 320 | 8 | ResNet50 (NonLocalGauss) | ImageNet | 73.90 | 91.15 | 10 clips x 3 crop | x | 6108 | [config](/configs/recognition/i3d/i3d_imagenet-pretrained-r50-nl-gaussian_8xb8-32x2x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-gaussian_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-gaussian_8xb8-32x2x1-100e_kinetics400-rgb_20220812-0c5cbf5a.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-gaussian_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-gaussian_8xb8-32x2x1-100e_kinetics400-rgb.log) | -| 32x2x1 | short-side 320 | 8 | ResNet50 | ImageNet | 73.22 | 91.11 | 10 clips x 3 crop | x | 5149 | [config](/configs/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb_20220812-e213c223.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb.log) | -| dense-32x2x1 | short-side 320 | 8 | ResNet50 | ImageNet | 73.77 | 91.35 | 10 clips x 3 crop | x | 5151 | [config](/configs/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-dense-32x2x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-dense-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50_8xb8-dense-32x2x1-100e_kinetics400-rgb_20220812-9f46003f.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-dense-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50_8xb8-dense-32x2x1-100e_kinetics400-rgb.log) | -| 32x2x1 | short-side 320 | 8 | ResNet50 (Heavy) | ImageNet | 76.08 | 92.34 | 10 clips x 3 crop | x | 17350 | [config](/configs/recognition/i3d/i3d_imagenet-pretrained-r50-heavy_8xb8-32x2x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-heavy_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-heavy_8xb8-32x2x1-100e_kinetics400-rgb_20220812-ed501b31.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-heavy_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-heavy_8xb8-32x2x1-100e_kinetics400-rgb.log) | - -1. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. - According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, - e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu. +| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :--------: | :--: | :---------------------------: | :------: | :------: | :------: | :---------------: | :----: | :----: | :--------------------------: | :-------------------------: | :------------------------: | +| 32x2x1 | 224x224 | 8 | ResNet50 (NonLocalDotProduct) | ImageNet | 74.80 | 92.07 | 10 clips x 3 crop | 59.3G | 35.4M | [config](/configs/recognition/i3d/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb_20220812-8e1f2148.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb.log) | +| 32x2x1 | 224x224 | 8 | ResNet50 (NonLocalEmbedGauss) | ImageNet | 74.73 | 91.80 | 10 clips x 3 crop | 59.3G | 35.4M | [config](/configs/recognition/i3d/i3d_imagenet-pretrained-r50-nl-embedded-gaussian_8xb8-32x2x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-embedded-gaussian_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-embedded-gaussian_8xb8-32x2x1-100e_kinetics400-rgb_20220812-afd8f562.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-embedded-gaussian_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-embedded-gaussian_8xb8-32x2x1-100e_kinetics400-rgb.log) | +| 32x2x1 | 224x224 | 8 | ResNet50 (NonLocalGauss) | ImageNet | 73.97 | 91.33 | 10 clips x 3 crop | 56.5 | 31.7M | [config](/configs/recognition/i3d/i3d_imagenet-pretrained-r50-nl-gaussian_8xb8-32x2x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-gaussian_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-gaussian_8xb8-32x2x1-100e_kinetics400-rgb_20220812-0c5cbf5a.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-gaussian_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-gaussian_8xb8-32x2x1-100e_kinetics400-rgb.log) | +| 32x2x1 | 224x224 | 8 | ResNet50 | ImageNet | 73.47 | 91.27 | 10 clips x 3 crop | 43.5G | 28.0M | [config](/configs/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb_20220812-e213c223.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb.log) | +| dense-32x2x1 | 224x224 | 8 | ResNet50 | ImageNet | 73.77 | 91.35 | 10 clips x 3 crop | 43.5G | 28.0M | [config](/configs/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-dense-32x2x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-dense-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50_8xb8-dense-32x2x1-100e_kinetics400-rgb_20220812-9f46003f.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-dense-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50_8xb8-dense-32x2x1-100e_kinetics400-rgb.log) | +| 32x2x1 | 224x224 | 8 | ResNet50 (Heavy) | ImageNet | 76.21 | 92.48 | 10 clips x 3 crop | 166.3G | 33.0M | [config](/configs/recognition/i3d/i3d_imagenet-pretrained-r50-heavy_8xb8-32x2x1-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-heavy_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-heavy_8xb8-32x2x1-100e_kinetics400-rgb_20220812-ed501b31.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-heavy_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-heavy_8xb8-32x2x1-100e_kinetics400-rgb.log) | + +1. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size and the original batch size. 2. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available. -For more details on data preparation, you can refer to - -- [preparing_kinetics](/tools/data/kinetics/README.md) +For more details on data preparation, you can refer to [Kinetics400](/tools/data/kinetics/README.md). ## Train @@ -52,7 +48,7 @@ Example: train I3D model on Kinetics-400 dataset in a deterministic option with ```shell python tools/train.py configs/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb.py \ - --cfg-options randomness.seed=0 randomness.deterministic=True + --seed=0 --deterministic ``` For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). diff --git a/configs/recognition/i3d/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb.py b/configs/recognition/i3d/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb.py index 95110c3327..5ec792b3dc 100644 --- a/configs/recognition/i3d/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb.py +++ b/configs/recognition/i3d/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb.py @@ -20,8 +20,10 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -37,7 +39,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=32, @@ -51,7 +53,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=32, @@ -102,3 +104,9 @@ test_evaluator = val_evaluator default_hooks = dict(checkpoint=dict(interval=5, max_keep_ckpts=5)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) diff --git a/configs/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb.py b/configs/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb.py index d36054d2a5..63b14db296 100644 --- a/configs/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb.py +++ b/configs/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb.py @@ -10,8 +10,10 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -27,7 +29,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=32, @@ -41,7 +43,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=32, @@ -93,3 +95,9 @@ test_evaluator = val_evaluator default_hooks = dict(checkpoint=dict(interval=5, max_keep_ckpts=5)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) diff --git a/configs/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-dense-32x2x1-100e_kinetics400-rgb.py b/configs/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-dense-32x2x1-100e_kinetics400-rgb.py index 5215659677..a8593e221c 100644 --- a/configs/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-dense-32x2x1-100e_kinetics400-rgb.py +++ b/configs/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-dense-32x2x1-100e_kinetics400-rgb.py @@ -7,8 +7,10 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -24,7 +26,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='DenseSampleFrames', clip_len=32, @@ -38,7 +40,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='DenseSampleFrames', clip_len=32, diff --git a/configs/recognition/i3d/metafile.yml b/configs/recognition/i3d/metafile.yml index dc94092b51..63ad017343 100644 --- a/configs/recognition/i3d/metafile.yml +++ b/configs/recognition/i3d/metafile.yml @@ -1,9 +1,9 @@ Collections: -- Name: I3D - README: configs/recognition/i3d/README.md - Paper: - URL: https://arxiv.org/abs/1705.07750 - Title: 'Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset' + - Name: I3D + README: configs/recognition/i3d/README.md + Paper: + URL: https://arxiv.org/abs/1705.07750 + Title: 'Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset' Models: - Name: i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb @@ -13,10 +13,10 @@ Models: Architecture: ResNet50 Batch Size: 8 Epochs: 100 - FLOPs: 54334488576 - Parameters: 35397840 + FLOPs: 59.3G + Parameters: 35.4M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -24,8 +24,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 74.76 - Top 5 Accuracy: 91.84 + Top 1 Accuracy: 74.80 + Top 5 Accuracy: 92.07 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-dot-product_8xb8-32x2x1-100e_kinetics400-rgb_20220812-8e1f2148.pth @@ -36,10 +36,10 @@ Models: Architecture: ResNet50 Batch Size: 8 Epochs: 100 - FLOPs: 54334488576 - Parameters: 35397840 + FLOPs: 59.3G + Parameters: 35.4M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -47,10 +47,10 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 74.69 - Top 5 Accuracy: 91.69 + Top 1 Accuracy: 74.73 + Top 5 Accuracy: 91.80 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-embedded-gaussian_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-embedded-gaussian_8xb8-32x2x1-100e_kinetics400-rgb.log - Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-embedded-gaussian_8xb8-32x2x1-100e_kinetics400-rgb/ii3d_imagenet-pretrained-r50-nl-embedded-gaussian_8xb8-32x2x1-100e_kinetics400-rgb_20220812-afd8f562.pth + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-embedded-gaussian_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-embedded-gaussian_8xb8-32x2x1-100e_kinetics400-rgb_20220812-afd8f562.pth - Name: i3d_imagenet-pretrained-r50-nl-gaussian_8xb8-32x2x1-100e_kinetics400-rgb Config: configs/recognition/i3d/i3d_imagenet-pretrained-r50-nl-gaussian_8xb8-32x2x1-100e_kinetics400-rgb.py @@ -59,10 +59,10 @@ Models: Architecture: ResNet50 Batch Size: 8 Epochs: 100 - FLOPs: 48962109440 - Parameters: 31723728 + FLOPs: 56.5G + Parameters: 31.7M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -70,8 +70,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 73.90 - Top 5 Accuracy: 91.15 + Top 1 Accuracy: 73.97 + Top 5 Accuracy: 91.33 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-gaussian_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-gaussian_8xb8-32x2x1-100e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-nl-gaussian_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-nl-gaussian_8xb8-32x2x1-100e_kinetics400-rgb_20220812-0c5cbf5a.pth @@ -82,10 +82,10 @@ Models: Architecture: ResNet50 Batch Size: 8 Epochs: 100 - FLOPs: 43564040192 - Parameters: 28043472 + FLOPs: 43.5G + Parameters: 28.0M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -93,8 +93,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 73.22 - Top 5 Accuracy: 91.11 + Top 1 Accuracy: 73.47 + Top 5 Accuracy: 91.27 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb_20220812-e213c223.pth @@ -105,10 +105,10 @@ Models: Architecture: ResNet50 Batch Size: 8 Epochs: 100 - FLOPs: 43564040192 - Parameters: 28043472 + FLOPs: 43.5G + Parameters: 28.0M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -128,8 +128,10 @@ Models: Architecture: ResNet50 Batch Size: 8 Epochs: 100 + FLOPs: 166.3G + Parameters: 33.0M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -137,7 +139,7 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 76.08 - Top 5 Accuracy: 92.34 + Top 1 Accuracy: 76.21 + Top 5 Accuracy: 92.48 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-heavy_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-heavy_8xb8-32x2x1-100e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/i3d/i3d_imagenet-pretrained-r50-heavy_8xb8-32x2x1-100e_kinetics400-rgb/i3d_imagenet-pretrained-r50-heavy_8xb8-32x2x1-100e_kinetics400-rgb_20220812-ed501b31.pth diff --git a/configs/recognition/r2plus1d/README.md b/configs/recognition/r2plus1d/README.md index beb4d0ffa7..29a619e696 100644 --- a/configs/recognition/r2plus1d/README.md +++ b/configs/recognition/r2plus1d/README.md @@ -20,17 +20,15 @@ In this paper we discuss several forms of spatiotemporal convolutions for video ### Kinetics-400 -| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | inference time(video/s) | gpu_mem(M) | config | ckpt | log | -| :---------------------: | :------------: | :--: | :------: | :------: | :------: | :------: | :---------------: | :---------------------: | :--------: | :-------------------------: | :-----------------------: | :-----------------------: | -| 8x8x1 | short-side 320 | 8 | ResNet34 | None | 69.35 | 88.32 | 10 clips x 3 crop | x | 5036 | [config](/configs/recognition/r2plus1d/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/r2plus1d/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb_20220812-47cfe041.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/r2plus1d/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb.log) | -| 32x2x1 | short-side 320 | 8 | ResNet34 | None | 75.27 | 92.03 | 10 clips x 3 crop | x | 17006 | [config](/configs/recognition/r2plus1d/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v2.0/recognition/r2plus1d/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb_20220812-4270588c.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/r2plus1d/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb.log) | - -1. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. - According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, - e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu. +| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :--------: | :--: | :------: | :------: | :------: | :------: | :---------------: | :---: | :----: | :----------------------------------: | :--------------------------------: | :-------------------------------: | +| 8x8x1 | 224x224 | 8 | ResNet34 | None | 69.76 | 88.41 | 10 clips x 3 crop | 53.1G | 63.8M | [config](/configs/recognition/r2plus1d/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/r2plus1d/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb_20220812-47cfe041.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/r2plus1d/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb.log) | +| 32x2x1 | 224x224 | 8 | ResNet34 | None | 75.46 | 92.28 | 10 clips x 3 crop | 213G | 63.8M | [config](/configs/recognition/r2plus1d/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/r2plus1d/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb_20220812-4270588c.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/r2plus1d/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb.log) | + +1. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size and the original batch size. 2. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available. -For more details on data preparation, you can refer to the **Prepare videos** part in the [Data Preparation Tutorial](/docs/en/user_guides/2_data_prepare.md). +For more details on data preparation, you can refer to [Kinetics400](/tools/data/kinetics/README.md). ## Train @@ -44,7 +42,7 @@ Example: train R(2+1)D model on Kinetics-400 dataset in a deterministic option. ```shell python tools/train.py configs/recognition/r2plus1d/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb.py \ - --cfg-options randomness.seed=0 randomness.deterministic=True + --seed=0 --deterministic ``` For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). diff --git a/configs/recognition/r2plus1d/metafile.yml b/configs/recognition/r2plus1d/metafile.yml index e89c475037..376687b031 100644 --- a/configs/recognition/r2plus1d/metafile.yml +++ b/configs/recognition/r2plus1d/metafile.yml @@ -13,10 +13,10 @@ Models: Architecture: ResNet34 Batch Size: 8 Epochs: 180 - FLOPs: 53175572992 - Parameters: 63759281 + FLOPs: 53.1G + Parameters: 63.8M Pretrained: None - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -24,8 +24,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 69.35 - Top 5 Accuracy: 88.32 + Top 1 Accuracy: 69.76 + Top 5 Accuracy: 88.41 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/r2plus1d/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/r2plus1d/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb_20220812-47cfe041.pth @@ -36,10 +36,10 @@ Models: Architecture: ResNet34 Batch Size: 8 Epochs: 180 - FLOPs: 212701677568 - Parameters: 63759281 + FLOPs: 213G + Parameters: 63.8M Pretrained: None - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -47,7 +47,7 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 75.27 - Top 5 Accuracy: 92.03 + Top 1 Accuracy: 75.46 + Top 5 Accuracy: 92.28 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/r2plus1d/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb.log - Weights: https://download.openmmlab.com/mmaction/v2.0/recognition/r2plus1d/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb_20220812-4270588c.pth + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/r2plus1d/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb_20220812-4270588c.pth diff --git a/configs/recognition/r2plus1d/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb.py b/configs/recognition/r2plus1d/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb.py index 4815cb7451..ddd7b1c2f8 100644 --- a/configs/recognition/r2plus1d/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb.py +++ b/configs/recognition/r2plus1d/r2plus1d_r34_8xb8-32x2x1-180e_kinetics400-rgb.py @@ -7,8 +7,10 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -19,7 +21,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=32, @@ -33,7 +35,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=32, diff --git a/configs/recognition/r2plus1d/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb.py b/configs/recognition/r2plus1d/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb.py index d7e3d93d94..ab28168ab9 100644 --- a/configs/recognition/r2plus1d/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb.py +++ b/configs/recognition/r2plus1d/r2plus1d_r34_8xb8-8x8x1-180e_kinetics400-rgb.py @@ -9,8 +9,10 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -21,7 +23,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=8, @@ -35,7 +37,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=8, @@ -103,3 +105,9 @@ ] default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) diff --git a/configs/recognition/slowfast/README.md b/configs/recognition/slowfast/README.md index 389d8e98f0..3bf1666152 100644 --- a/configs/recognition/slowfast/README.md +++ b/configs/recognition/slowfast/README.md @@ -20,20 +20,18 @@ We present SlowFast networks for video recognition. Our model involves (i) a Slo ### Kinetics-400 -| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | inference time(video/s) | gpu_mem(M) | config | ckpt | log | -| :---------------------: | :--------------: | :------------: | :--: | :------------------: | :------: | :------: | :------: | :---------------: | :---------------------: | :--------: | :----------------: | :--------------: | :-------------: | -| 4x16x1 | Linear+Cosine | short-side 320 | 8 | ResNet50 | None | 75.27 | 92.27 | 10 clips x 3 crop | x | 6332 | [config](/configs/recognition/slowfast/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb_20220901-701b0f6f.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb.log) | -| 8x8x1 | Linear+Cosine | short-side 320 | 8 | ResNet50 | None | 76.31 | 92.88 | 10 clips x 3 crop | x | 9201 | [config](/configs/recognition/slowfast/slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb/slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb_20220818-1cb6dfc8.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb/slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb.log) | -| 8x8x1 | Linear+MultiStep | short-side 320 | 8 | ResNet50 | None | 76.33 | 92.66 | 10 clips x 3 crop | x | 9395 | [config](/configs/recognition/slowfast/slowfast_r50_8xb8-8x8x1-steplr-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-8x8x1-steplr-256e_kinetics400-rgb/slowfast_r50_8xb8-8x8x1-steplr-256e_kinetics400-rgb_20220818-b62a501f.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-8x8x1-steplr-256e_kinetics400-rgb/slowfast_r50_8xb8-8x8x1-steplr-256e_kinetics400-rgb.log) | -| 8x8x1 | Linear+Cosine | short-side 320 | 8 | ResNet101 | None | 78.30 | 93.77 | 10 clips x 3 crop | x | 13431 | [config](/configs/recognition/slowfast/slowfast_r101_8xb8-8x8x1-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r101_8xb8-8x8x1-256e_kinetics400-rgb/slowfast_r101_8xb8-8x8x1-256e_kinetics400-rgb_20220818-9c0e09bd.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r101_8xb8-8x8x1-256e_kinetics400-rgb/slowfast_r101_8xb8-8x8x1-256e_kinetics400-rgb.log) | -| 4x16x1 | Linear+Cosine | short-side 320 | 32 | ResNet101 + ResNet50 | None | 76.68 | 92.82 | 10 clips x 3 crop | x | 8039 | [config](/configs/recognition/slowfast/slowfast_r101-r50_32xb8-4x16x1-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r101-r50_32xb8-4x16x1-256e_kinetics400-rgb/slowfast_r101-r50_32xb8-4x16x1-256e_kinetics400-rgb_20220901-a77ac3ee.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r101-r50_32xb8-4x16x1-256e_kinetics400-rgb/slowfast_r101-r50_32xb8-4x16x1-256e_kinetics400-rgb.log) | - -1. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. - According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, - e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu. +| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :--------------: | :--------: | :--: | :------------------: | :------: | :------: | :------: | :---------------: | :---: | :----: | :------------------------: | :-----------------------: | :----------------------: | +| 4x16x1 | Linear+Cosine | 224x224 | 8 | ResNet50 | None | 75.55 | 92.35 | 10 clips x 3 crop | 36.3G | 34.5M | [config](/configs/recognition/slowfast/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb_20220901-701b0f6f.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb.log) | +| 8x8x1 | Linear+Cosine | 224x224 | 8 | ResNet50 | None | 76.80 | 92.99 | 10 clips x 3 crop | 66.1G | 34.6M | [config](/configs/recognition/slowfast/slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb/slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb_20220818-1cb6dfc8.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb/slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb.log) | +| 8x8x1 | Linear+MultiStep | 224x224 | 8 | ResNet50 | None | 76.65 | 92.86 | 10 clips x 3 crop | 66.1G | 34.6M | [config](/configs/recognition/slowfast/slowfast_r50_8xb8-8x8x1-steplr-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-8x8x1-steplr-256e_kinetics400-rgb/slowfast_r50_8xb8-8x8x1-steplr-256e_kinetics400-rgb_20220818-b62a501f.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-8x8x1-steplr-256e_kinetics400-rgb/slowfast_r50_8xb8-8x8x1-steplr-256e_kinetics400-rgb.log) | +| 8x8x1 | Linear+Cosine | 224x224 | 8 | ResNet101 | None | 78.65 | 93.88 | 10 clips x 3 crop | 126G | 62.9M | [config](/configs/recognition/slowfast/slowfast_r101_8xb8-8x8x1-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r101_8xb8-8x8x1-256e_kinetics400-rgb/slowfast_r101_8xb8-8x8x1-256e_kinetics400-rgb_20220818-9c0e09bd.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r101_8xb8-8x8x1-256e_kinetics400-rgb/slowfast_r101_8xb8-8x8x1-256e_kinetics400-rgb.log) | +| 4x16x1 | Linear+Cosine | 224x224 | 32 | ResNet101 + ResNet50 | None | 77.03 | 92.99 | 10 clips x 3 crop | 64.9G | 62.4M | [config](/configs/recognition/slowfast/slowfast_r101-r50_32xb8-4x16x1-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r101-r50_32xb8-4x16x1-256e_kinetics400-rgb/slowfast_r101-r50_32xb8-4x16x1-256e_kinetics400-rgb_20220901-a77ac3ee.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r101-r50_32xb8-4x16x1-256e_kinetics400-rgb/slowfast_r101-r50_32xb8-4x16x1-256e_kinetics400-rgb.log) | + +1. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size and the original batch size. 2. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available. -For more details on data preparation, you can refer to the **Prepare videos** part in the [Data Preparation Tutorial](/docs/en/user_guides/2_data_prepare.md). +For more details on data preparation, you can refer to [Kinetics400](/tools/data/kinetics/README.md). ## Train @@ -47,7 +45,7 @@ Example: train SlowFast model on Kinetics-400 dataset in a deterministic option ```shell python tools/train.py configs/recognition/slowfast/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb.py \ - --cfg-options randomness.seed=0 randomness.deterministic=True + --seed=0 --deterministic ``` For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). diff --git a/configs/recognition/slowfast/metafile.yml b/configs/recognition/slowfast/metafile.yml index 8d81d510f3..94423659d1 100644 --- a/configs/recognition/slowfast/metafile.yml +++ b/configs/recognition/slowfast/metafile.yml @@ -13,10 +13,10 @@ Models: Architecture: ResNet50 Batch Size: 8 Epochs: 256 - FLOPs: 36441296896 - Parameters: 34479288 + FLOPs: 36.3G + Parameters: 34.5M Pretrained: None - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -24,8 +24,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 75.27 - Top 5 Accuracy: 92.27 + Top 1 Accuracy: 75.55 + Top 5 Accuracy: 92.35 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb_20220901-701b0f6f.pth @@ -36,10 +36,10 @@ Models: Architecture: ResNet50 Batch Size: 8 Epochs: 256 - FLOPs: 66222034944 - Parameters: 34565560 + FLOPs: 66.1G + Parameters: 34.6M Pretrained: None - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -47,8 +47,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 76.31 - Top 5 Accuracy: 92.88 + Top 1 Accuracy: 76.80 + Top 5 Accuracy: 92.99 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb/slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb/slowfast_r50_8xb8-8x8x1-256e_kinetics400-rgb_20220818-1cb6dfc8.pth @@ -59,10 +59,10 @@ Models: Architecture: ResNet50 Batch Size: 8 Epochs: 256 - FLOPs: 66222034944 - Parameters: 34565560 + FLOPs: 66.1G + Parameters: 34.6M Pretrained: None - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -70,8 +70,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 76.33 - Top 5 Accuracy: 92.66 + Top 1 Accuracy: 76.65 + Top 5 Accuracy: 92.86 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-8x8x1-steplr-256e_kinetics400-rgb/slowfast_r50_8xb8-8x8x1-steplr-256e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r50_8xb8-8x8x1-steplr-256e_kinetics400-rgb/slowfast_r50_8xb8-8x8x1-steplr-256e_kinetics400-rgb_20220818-b62a501f.pth @@ -82,10 +82,10 @@ Models: Architecture: ResNet101 Batch Size: 8 Epochs: 256 - FLOPs: 127070375936 - Parameters: 62912312 + FLOPs: 126G + Parameters: 62.9M Pretrained: None - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -93,8 +93,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 78.30 - Top 5 Accuracy: 93.77 + Top 1 Accuracy: 78.65 + Top 5 Accuracy: 93.88 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r101_8xb8-8x8x1-256e_kinetics400-rgb/slowfast_r101_8xb8-8x8x1-256e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r101_8xb8-8x8x1-256e_kinetics400-rgb/slowfast_r101_8xb8-8x8x1-256e_kinetics400-rgb_20220901-9c0e09bd.pth @@ -105,18 +105,18 @@ Models: Architecture: ResNet101 + ResNet50 Batch Size: 8 Epochs: 256 - FLOPs: 65042780160 - Parameters: 62384312 + FLOPs: 64.9G + Parameters: 62.4M Pretrained: None - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 32 GPUs Modality: RGB Results: - Dataset: Kinetics-400 Metrics: - Top 1 Accuracy: 76.68 - Top 5 Accuracy: 92.82 + Top 1 Accuracy: 77.03 + Top 5 Accuracy: 92.99 Task: Action Recognition Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r101-r50_32xb8-4x16x1-256e_kinetics400-rgb/slowfast_r101-r50_32xb8-4x16x1-256e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/slowfast/slowfast_r101-r50_32xb8-4x16x1-256e_kinetics400-rgb/slowfast_r101-r50_32xb8-4x16x1-256e_kinetics400-rgb_20220901-a77ac3ee.pth diff --git a/configs/recognition/slowfast/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb.py b/configs/recognition/slowfast/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb.py index ed4fbdac3c..7c3c0a66ef 100644 --- a/configs/recognition/slowfast/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb.py +++ b/configs/recognition/slowfast/slowfast_r50_8xb8-4x16x1-256e_kinetics400-rgb.py @@ -8,8 +8,10 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' + +file_client_args = dict(io_backend='disk') train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -20,7 +22,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=32, @@ -34,7 +36,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=32, diff --git a/configs/recognition/swin/README.md b/configs/recognition/swin/README.md index c36a47b39e..1e6074c4a9 100644 --- a/configs/recognition/swin/README.md +++ b/configs/recognition/swin/README.md @@ -20,25 +20,25 @@ The vision community is witnessing a modeling shift from CNNs to Transformers, w ### Kinetics-400 -| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | reference top1 acc | reference top1 acc | testing protocol | gpu_mem(M) | FLOPs | params | config | ckpt | log | -| :---------------------: | :------------: | :--: | :------: | :----------: | :------: | :------: | :-----------------------: | :-----------------------: | :---------------: | :--------: | :---: | :----: | :-----------: | :---------: | :---------: | -| 32x2x1 | short-side 320 | 8 | Swin-T | ImageNet-1k | 78.29 | 93.58 | 78.46 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 93.46 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 4 clips x 3 crops | 21072 | 88G | 28.2M | [config](/configs/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-241016b2.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log) | -| 32x2x1 | short-side 320 | 8 | Swin-S | ImageNet-1k | 80.23 | 94.32 | 80.23 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 94.16 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 4 clips x 3 crops | 33632 | 166G | 49.8M | [config](/configs/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-e91ab986.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log) | -| 32x2x1 | short-side 320 | 8 | Swin-B | ImageNet-1k | 80.21 | 94.32 | 80.27 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 94.42 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 4 clips x 3 crops | 45143 | 282G | 88.0M | [config](/configs/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-182ec6cc.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log) | -| 32x2x1 | short-side 320 | 8 | Swin-L | ImageNet-22k | 83.15 | 95.76 | 83.1\* | 95.9\* | 4 clips x 3 crops | 68881 | 604G | 197M | [config](/configs/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-78ad8b11.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log) | +| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | reference top1 acc | reference top1 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :--------: | :--: | :------: | :----------: | :------: | :------: | :--------------------------: | :--------------------------: | :--------------: | :---: | :----: | :--------------: | :------------: | :------------: | +| 32x2x1 | 224x224 | 8 | Swin-T | ImageNet-1k | 78.90 | 93.77 | 78.84 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 93.76 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 4 clips x 3 crop | 88G | 28.2M | [config](/configs/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-241016b2.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log) | +| 32x2x1 | 224x224 | 8 | Swin-S | ImageNet-1k | 80.54 | 94.46 | 80.58 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 94.45 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 4 clips x 3 crop | 166G | 49.8M | [config](/configs/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-e91ab986.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log) | +| 32x2x1 | 224x224 | 8 | Swin-B | ImageNet-1k | 80.57 | 94.49 | 80.55 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 94.66 \[[VideoSwin](https://github.com/SwinTransformer/Video-Swin-Transformer)\] | 4 clips x 3 crop | 282G | 88.0M | [config](/configs/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-182ec6cc.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log) | +| 32x2x1 | 224x224 | 8 | Swin-L | ImageNet-22k | 83.46 | 95.91 | 83.1\* | 95.9\* | 4 clips x 3 crop | 604G | 197M | [config](/configs/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-78ad8b11.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log) | ### Kinetics-700 -| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | gpu_mem(M) | FLOPs | params | config | ckpt | log | -| :---------------------: | :------------: | :--: | :------: | :----------: | :------: | :------: | :---------------: | :--------: | :---: | :----: | :----------------------------: | :--------------------------: | :-------------------------: | -| 32x2x1 | short-side 320 | 16 | Swin-L | ImageNet-22k | 75.26 | 92.44 | 4 clips x 3 crops | 68898 | 604G | 197M | [config](/configs/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb_20220930-f8d74db7.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py.log) | +| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :--------: | :--: | :------: | :----------: | :------: | :------: | :--------------: | :---: | :----: | :---------------------------------: | :-------------------------------: | :------------------------------: | +| 32x2x1 | 224x224 | 16 | Swin-L | ImageNet-22k | 75.92 | 92.72 | 4 clips x 3 crop | 604G | 197M | [config](/configs/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb_20220930-f8d74db7.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py.log) | 1. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size and the original batch size. 2. The values in columns named after "reference" are the results got by testing on our dataset, using the checkpoints provided by the author with same model settings. `*` means that the numbers are copied from the paper. 3. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available. 4. Pre-trained image models can be downloaded from [Swin Transformer for ImageNet Classification](https://github.com/microsoft/Swin-Transformer#main-results-on-imagenet-with-pretrained-models). -For more details on data preparation, you can refer to [preparing_kinetics](/tools/data/kinetics/README.md). +For more details on data preparation, you can refer to [Kinetics](/tools/data/kinetics/README.md). ## Train @@ -52,7 +52,7 @@ Example: train VideoSwin model on Kinetics-400 dataset in a deterministic option ```shell python tools/train.py configs/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py \ - --cfg-options randomness.seed=0 randomness.deterministic=True + --seed=0 --deterministic ``` For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). diff --git a/configs/recognition/swin/metafile.yml b/configs/recognition/swin/metafile.yml index b557d6e2ed..0a4cc41cb7 100644 --- a/configs/recognition/swin/metafile.yml +++ b/configs/recognition/swin/metafile.yml @@ -1,9 +1,9 @@ Collections: -- Name: Swin - README: configs/recognition/swin/README.md - Paper: - URL: https://arxiv.org/abs/2106.13230 - Title: 'Video Swin Transformer' + - Name: Swin + README: configs/recognition/swin/README.md + Paper: + URL: https://arxiv.org/abs/2106.13230 + Title: 'Video Swin Transformer' Models: - Name: swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb @@ -16,7 +16,7 @@ Models: FLOPs: 88G Parameters: 28.2M Pretrained: ImageNet-1K - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -24,8 +24,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 78.29 - Top 5 Accuracy: 93.58 + Top 1 Accuracy: 78.90 + Top 5 Accuracy: 93.77 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-241016b2.pth @@ -39,7 +39,7 @@ Models: FLOPs: 166G Parameters: 49.8M Pretrained: ImageNet-1K - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -47,8 +47,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 80.23 - Top 5 Accuracy: 94.32 + Top 1 Accuracy: 80.54 + Top 5 Accuracy: 94.46 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-e91ab986.pth @@ -62,7 +62,7 @@ Models: FLOPs: 282G Parameters: 88.0M Pretrained: ImageNet-1K - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -70,8 +70,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 80.21 - Top 5 Accuracy: 94.32 + Top 1 Accuracy: 80.57 + Top 5 Accuracy: 94.49 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-182ec6cc.pth @@ -85,7 +85,7 @@ Models: FLOPs: 604G Parameters: 197M Pretrained: ImageNet-22K - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -93,8 +93,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 83.15 - Top 5 Accuracy: 95.76 + Top 1 Accuracy: 83.46 + Top 5 Accuracy: 95.91 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb_20220930-78ad8b11.pth @@ -108,7 +108,7 @@ Models: FLOPs: 604G Parameters: 197M Pretrained: ImageNet-22K - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-700 Training Resources: 16 GPUs Modality: RGB @@ -116,7 +116,7 @@ Models: - Dataset: Kinetics-700 Task: Action Recognition Metrics: - Top 1 Accuracy: 75.26 - Top 5 Accuracy: 92.44 + Top 1 Accuracy: 75.92 + Top 5 Accuracy: 92.72 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb_20220930-f8d74db7.pth diff --git a/configs/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py b/configs/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py index ebb304b6a0..1e9874d132 100644 --- a/configs/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py +++ b/configs/recognition/swin/swin-base-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py @@ -19,10 +19,6 @@ ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' -# file_client_args = dict( -# io_backend='petrel', -# path_mapping=dict( -# {'data/kinetics400': 's3://openmmlab/datasets/action/Kinetics400'})) file_client_args = dict(io_backend='disk') train_pipeline = [ dict(type='DecordInit', **file_client_args), diff --git a/configs/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py b/configs/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py index 0fbbb465ec..b5c3e57150 100644 --- a/configs/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py +++ b/configs/recognition/swin/swin-large-p244-w877_in22k-pre_16xb8-amp-32x2x1-30e_kinetics700-rgb.py @@ -11,10 +11,6 @@ ann_file_val = 'data/kinetics700/kinetics700_val_list_videos.txt' ann_file_test = 'data/kinetics700/kinetics700_val_list_videos.txt' -# file_client_args = dict( -# io_backend='petrel', -# path_mapping=dict( -# {'data/kinetics700': 's3://openmmlab/datasets/action/Kinetics700'})) file_client_args = dict(io_backend='disk') train_pipeline = [ dict(type='DecordInit', **file_client_args), diff --git a/configs/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py b/configs/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py index 2696d18c9c..a1dffd1d06 100644 --- a/configs/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py +++ b/configs/recognition/swin/swin-large-p244-w877_in22k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py @@ -19,10 +19,6 @@ ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' -# file_client_args = dict( -# io_backend='petrel', -# path_mapping=dict( -# {'data/kinetics400': 's3://openmmlab/datasets/action/Kinetics400'})) file_client_args = dict(io_backend='disk') train_pipeline = [ dict(type='DecordInit', **file_client_args), diff --git a/configs/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py b/configs/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py index 1d6312c224..09df2e6c4f 100644 --- a/configs/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py +++ b/configs/recognition/swin/swin-small-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py @@ -18,10 +18,6 @@ ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' -# file_client_args = dict( -# io_backend='petrel', -# path_mapping=dict( -# {'data/kinetics400': 's3://openmmlab/datasets/action/Kinetics400'})) file_client_args = dict(io_backend='disk') train_pipeline = [ dict(type='DecordInit', **file_client_args), diff --git a/configs/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py b/configs/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py index f44e1d5e72..e5b14fc8fc 100644 --- a/configs/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py +++ b/configs/recognition/swin/swin-tiny-p244-w877_in1k-pre_8xb8-amp-32x2x1-30e_kinetics400-rgb.py @@ -16,10 +16,6 @@ ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' -# file_client_args = dict( -# io_backend='petrel', -# path_mapping=dict( -# {'data/kinetics400': 's3://openmmlab/datasets/action/Kinetics400'})) file_client_args = dict(io_backend='disk') train_pipeline = [ dict(type='DecordInit', **file_client_args), diff --git a/configs/recognition/tanet/README.md b/configs/recognition/tanet/README.md index 77635a3120..1a67a40aa0 100644 --- a/configs/recognition/tanet/README.md +++ b/configs/recognition/tanet/README.md @@ -20,27 +20,25 @@ Video data is with complex temporal dynamics due to various factors such as came ### Kinetics-400 -| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | reference top1 acc | reference top5 acc | testing protocol | inference time(video/s) | gpu_mem(M) | config | ckpt | log | -| :---------------------: | :------------: | :--: | :------: | :------: | :------: | :------: | :---------------------: | :---------------------: | :---------------: | :---------------------: | :--------: | :----------: | :--------: | :-------: | -| dense-1x1x8 | short-side 320 | 8 | ResNet50 | ImageNet | 76.25 | 92.41 | [76.22](https://github.com/liu-zhy/temporal-adaptive-module/blob/master/scripts/test_tam_kinetics_rgb_8f.sh) | [92.53](https://github.com/liu-zhy/temporal-adaptive-module/blob/master/scripts/test_tam_kinetics_rgb_8f.sh) | 80 clips x 3 crop | x | 7627 | [config](/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb_20220919-a34346bc.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb.log) | +| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | reference top1 acc | reference top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :--------: | :--: | :------: | :------: | :------: | :------: | :---------------------------: | :---------------------------: | :--------------: | :---: | :----: | :---------------: | :-------------: | :------------: | +| dense-1x1x8 | 224x224 | 8 | ResNet50 | ImageNet | 76.25 | 92.41 | [76.22](https://github.com/liu-zhy/temporal-adaptive-module/blob/master/scripts/test_tam_kinetics_rgb_8f.sh) | [92.53](https://github.com/liu-zhy/temporal-adaptive-module/blob/master/scripts/test_tam_kinetics_rgb_8f.sh) | 8 clips x 3 crop | 43.0G | 25.6M | [config](/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb_20220919-a34346bc.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb.log) | ### Something-Something V1 -| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc (efficient/accurate) | top5 acc (efficient/accurate) | testing protocol | inference time(video/s) | gpu_mem(M) | config | ckpt | log | -| :---------------------: | :--------: | :--: | :------: | :------: | :---------------------------: | :---------------------------: | :--------------: | :---------------------: | :--------: | :-------------: | :-----------: | :----------: | -| 1x1x8 | height 100 | 8 | ResNet50 | ImageNet | 46.98/49.71 | 75.75/77.43 | 8 clips x 3 crop | x | 7116 | [config](/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb_20220906-de50e4ef.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb.log) | -| 1x1x16 | height 100 | 8 | ResNet50 | ImageNet | 48.24/50.95 | 78.16/79.28 | 8 clips x 3 crop | x | 10464 | [config](/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb6-1x1x16-50e_sthv1-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tanet/tanet_imagenet-pretrained-r50_8xb6-1x1x16-50e_sthv1-rgb/tanet_imagenet-pretrained-r50_8xb6-1x1x16-50e_sthv1-rgb_20220919-cc37e9b8.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tanet/tanet_imagenet-pretrained-r50_8xb6-1x1x16-50e_sthv1-rgb/tanet_imagenet-pretrained-r50_8xb6-1x1x16-50e_sthv1-rgb.log) | +| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :--------: | :--: | :------: | :------: | :---------: | :---------: | :---------------: | :---: | :----: | :--------------------------------: | :------------------------------: | :-----------------------------: | +| 1x1x8 | 224x224 | 8 | ResNet50 | ImageNet | 46.98/49.71 | 75.75/77.43 | 16 clips x 3 crop | 43.1G | 25.1M | [config](/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb_20220906-de50e4ef.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb.log) | +| 1x1x16 | 224x224 | 8 | ResNet50 | ImageNet | 48.24/50.95 | 78.16/79.28 | 16 clips x 3 crop | 86.1G | 25.1M | [config](/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb6-1x1x16-50e_sthv1-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tanet/tanet_imagenet-pretrained-r50_8xb6-1x1x16-50e_sthv1-rgb/tanet_imagenet-pretrained-r50_8xb6-1x1x16-50e_sthv1-rgb_20220919-cc37e9b8.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tanet/tanet_imagenet-pretrained-r50_8xb6-1x1x16-50e_sthv1-rgb/tanet_imagenet-pretrained-r50_8xb6-1x1x16-50e_sthv1-rgb.log) | -1. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. - According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, - e.g., lr=0.01 for 8 GPUs x 8 videos/gpu and lr=0.04 for 16 GPUs x 16 videos/gpu. +1. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size and the original batch size. 2. The values in columns named after "reference" are the results got by testing on our dataset, using the checkpoints provided by the author with same model settings. The checkpoints for reference repo can be downloaded [here](https://drive.google.com/drive/folders/1sFfmP3yrfc7IzRshEELOby7-aEoymIFL?usp=sharing). 3. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/v1.0/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/v1.0/dataset/k400_val/kinetics_class2ind.txt) are also available. For more details on data preparation, you can refer to -- [preparing_kinetics](/tools/data/kinetics/README.md) -- [preparing_sthv1](/tools/data/sthv1/README.md) +- [Kinetics400](/tools/data/kinetics/README.md) +- [Something-something V1](/tools/data/sthv1/README.md) ## Train @@ -54,7 +52,7 @@ Example: train TANet model on Kinetics-400 dataset in a deterministic option wit ```shell python tools/train.py configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb.py \ - --cfg-options randomness.seed=0 randomness.deterministic=True + --seed=0 --deterministic ``` For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). diff --git a/configs/recognition/tanet/metafile.yml b/configs/recognition/tanet/metafile.yml index 730540ca98..7506a83417 100644 --- a/configs/recognition/tanet/metafile.yml +++ b/configs/recognition/tanet/metafile.yml @@ -10,13 +10,13 @@ Models: Config: configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb.py In Collection: TANet Metadata: - Architecture: TANet + Architecture: ResNet50 Batch Size: 8 Epochs: 100 - FLOPs: 43065983104 - Parameters: 25590320 + FLOPs: 43.0G + Parameters: 25.6M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -28,17 +28,18 @@ Models: Top 5 Accuracy: 92.41 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb_20220919-a34346bc.pth + - Name: tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb Config: configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb.py In Collection: TANet Metadata: - Architecture: TANet + Architecture: ResNet50 Batch Size: 8 Epochs: 50 - FLOPs: 32972787840 - Parameters: 25127246 + FLOPs: 43.1G + Parameters: 25.1M Pretrained: ImageNet - Resolution: height 100 + Resolution: 224x224 Training Data: SthV1 Training Resources: 8 GPUs Modality: RGB @@ -52,17 +53,18 @@ Models: Top 5 Accuracy (efficient): 75.75 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb_20220906-de50e4ef.pth + - Name: tanet_imagenet-pretrained-r50_8xb6-1x1x16-50e_sthv1-rgb Config: configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb6-1x1x16-50e_sthv1-rgb.py In Collection: TANet Metadata: - Architecture: TANet - Batch Size: 8 + Architecture: ResNet50 + Batch Size: 6 Epochs: 50 - FLOPs: 65946542336 - Parameters: 25134670 + FLOPs: 86.1G + Parameters: 25.1M Pretrained: ImageNet - Resolution: height 100 + Resolution: 224x224 Training Data: SthV1 Training Resources: 8 GPUs Modality: RGB diff --git a/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb6-1x1x16-50e_sthv1-rgb.py b/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb6-1x1x16-50e_sthv1-rgb.py index 0c9ed640c6..bad33feae1 100644 --- a/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb6-1x1x16-50e_sthv1-rgb.py +++ b/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb6-1x1x16-50e_sthv1-rgb.py @@ -17,9 +17,10 @@ ann_file_test = 'data/sthv1/sthv1_val_list_rawframes.txt' sthv1_flip_label_map = {2: 4, 4: 2, 30: 41, 41: 30, 52: 66, 66: 52} +file_client_args = dict(io_backend='disk') train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), - dict(type='RawFrameDecode'), + dict(type='RawFrameDecode', **file_client_args), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', @@ -40,7 +41,7 @@ frame_interval=1, num_clips=16, test_mode=True), - dict(type='RawFrameDecode'), + dict(type='RawFrameDecode', **file_client_args), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='FormatShape', input_format='NCHW'), @@ -54,13 +55,13 @@ num_clips=16, twice_sample=True, test_mode=True), - dict(type='RawFrameDecode'), + dict(type='RawFrameDecode', **file_client_args), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='FormatShape', input_format='NCHW'), dict(type='PackActionInputs') ] - +test_pipeline = val_pipeline train_dataloader = dict( batch_size=6, num_workers=8, @@ -113,3 +114,9 @@ ] default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (6 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=48) diff --git a/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb.py b/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb.py index 173b0e88c5..2831be7524 100644 --- a/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb.py +++ b/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-1x1x8-50e_sthv1-rgb.py @@ -15,9 +15,10 @@ ann_file_test = 'data/sthv1/sthv1_val_list_rawframes.txt' sthv1_flip_label_map = {2: 4, 4: 2, 30: 41, 41: 30, 52: 66, 66: 52} +file_client_args = dict(io_backend='disk') train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), - dict(type='RawFrameDecode'), + dict(type='RawFrameDecode', **file_client_args), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', @@ -38,7 +39,7 @@ frame_interval=1, num_clips=8, test_mode=True), - dict(type='RawFrameDecode'), + dict(type='RawFrameDecode', **file_client_args), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='FormatShape', input_format='NCHW'), @@ -52,7 +53,7 @@ num_clips=8, twice_sample=True, test_mode=True), - dict(type='RawFrameDecode'), + dict(type='RawFrameDecode', **file_client_args), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='FormatShape', input_format='NCHW'), @@ -111,3 +112,9 @@ ] default_hooks = dict(checkpoint=dict(max_keep_ckpts=3)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) diff --git a/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb.py b/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb.py index d99f3de90e..3f503090b8 100644 --- a/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb.py +++ b/configs/recognition/tanet/tanet_imagenet-pretrained-r50_8xb8-dense-1x1x8-100e_kinetics400-rgb.py @@ -11,8 +11,9 @@ ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' +file_client_args = dict(io_backend='disk') train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -29,7 +30,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='DenseSampleFrames', clip_len=1, @@ -43,7 +44,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='DenseSampleFrames', clip_len=1, @@ -107,3 +108,9 @@ default_hooks = dict( checkpoint=dict(max_keep_ckpts=5), logger=dict(interval=100)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) From b6b0197e0486fc1971072f622ef10ecae65026c5 Mon Sep 17 00:00:00 2001 From: wxDai Date: Mon, 26 Dec 2022 11:28:42 +0800 Subject: [PATCH 42/57] [Refactor] Refactor and Enhance 2s-AGCN (#2130) --- configs/_base_/models/agcn.py | 13 - ...-motion-u100-80e_ntu60-xsub-keypoint-2d.py | 67 ++++ ...-motion-u100-80e_ntu60-xsub-keypoint-3d.py | 67 ++++ ...16-bone-u100-80e_ntu60-xsub-keypoint-2d.py | 67 ++++ ...16-bone-u100-80e_ntu60-xsub-keypoint-3d.py | 67 ++++ ...-motion-u100-80e_ntu60-xsub-keypoint-2d.py | 67 ++++ ...-motion-u100-80e_ntu60-xsub-keypoint-3d.py | 67 ++++ ...6-joint-u100-80e_ntu60-xsub-keypoint-2d.py | 104 ++++++ ...6-joint-u100-80e_ntu60-xsub-keypoint-3d.py | 104 ++++++ .../2sagcn_4xb16-80e_ntu60-xsub-bone-3d.py | 41 --- ...2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d.py | 78 ---- configs/skeleton/2s-agcn/README.md | 56 +-- configs/skeleton/2s-agcn/metafile.yml | 152 +++++++- configs/skeleton/stgcn/README.md | 36 +- configs/skeleton/stgcn/metafile.yml | 32 +- mmaction/engine/__init__.py | 1 + mmaction/engine/model/__init__.py | 4 + mmaction/engine/model/weight_init.py | 52 +++ mmaction/models/backbones/__init__.py | 5 +- mmaction/models/backbones/aagcn.py | 236 ++++++++++++ mmaction/models/backbones/agcn.py | 337 ------------------ mmaction/models/backbones/stgcn.py | 25 +- mmaction/models/utils/gcn_utils.py | 152 +++++++- tests/models/backbones/test_aagcn.py | 46 +++ .../models/recognizers/test_recognizer_gcn.py | 8 + 25 files changed, 1324 insertions(+), 560 deletions(-) delete mode 100644 configs/_base_/models/agcn.py create mode 100644 configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py create mode 100644 configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py create mode 100644 configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py create mode 100644 configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py create mode 100644 configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py create mode 100644 configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py create mode 100644 configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py create mode 100644 configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py delete mode 100644 configs/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-bone-3d.py delete mode 100644 configs/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d.py create mode 100644 mmaction/engine/model/__init__.py create mode 100644 mmaction/engine/model/weight_init.py create mode 100644 mmaction/models/backbones/aagcn.py delete mode 100644 mmaction/models/backbones/agcn.py create mode 100644 tests/models/backbones/test_aagcn.py diff --git a/configs/_base_/models/agcn.py b/configs/_base_/models/agcn.py deleted file mode 100644 index b400dd5c86..0000000000 --- a/configs/_base_/models/agcn.py +++ /dev/null @@ -1,13 +0,0 @@ -model = dict( - type='RecognizerGCN', - backbone=dict( - type='AGCN', - in_channels=3, - graph_cfg=dict(layout='ntu-rgb+d', strategy='agcn')), - cls_head=dict( - type='STGCNHead', - num_classes=60, - in_channels=256, - loss_cls=dict(type='CrossEntropyLoss')), - train_cfg=None, - test_cfg=None) diff --git a/configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py b/configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py new file mode 100644 index 0000000000..6e5a23a820 --- /dev/null +++ b/configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py @@ -0,0 +1,67 @@ +_base_ = '2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_2d.pkl' +train_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['bm']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['bm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['bm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py b/configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py new file mode 100644 index 0000000000..cf9ba8008e --- /dev/null +++ b/configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py @@ -0,0 +1,67 @@ +_base_ = '2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_3d.pkl' +train_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['bm']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['bm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['bm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py b/configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py new file mode 100644 index 0000000000..3afc1f00f9 --- /dev/null +++ b/configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py @@ -0,0 +1,67 @@ +_base_ = '2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_2d.pkl' +train_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['b']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['b']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['b']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py b/configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py new file mode 100644 index 0000000000..a5d9200273 --- /dev/null +++ b/configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py @@ -0,0 +1,67 @@ +_base_ = '2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_3d.pkl' +train_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['b']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['b']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['b']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py b/configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py new file mode 100644 index 0000000000..d3e5e1799f --- /dev/null +++ b/configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py @@ -0,0 +1,67 @@ +_base_ = '2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_2d.pkl' +train_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['jm']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['jm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['jm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py b/configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py new file mode 100644 index 0000000000..43eeb47408 --- /dev/null +++ b/configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py @@ -0,0 +1,67 @@ +_base_ = '2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_3d.pkl' +train_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['jm']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['jm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['jm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py b/configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py new file mode 100644 index 0000000000..ea46de5e30 --- /dev/null +++ b/configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py @@ -0,0 +1,104 @@ +_base_ = '../../_base_/default_runtime.py' + +model = dict( + type='RecognizerGCN', + backbone=dict( + type='AAGCN', + graph_cfg=dict(layout='coco', mode='spatial'), + gcn_attention=False), # degenerate AAGCN to AGCN + cls_head=dict(type='GCNHead', num_classes=60, in_channels=256)) + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_2d.pkl' +train_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['j']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['j']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['j']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) + +val_evaluator = [dict(type='AccMetric')] +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + eta_min=0, + T_max=16, + by_epoch=True, + convert_to_iter_based=True) +] + +optim_wrapper = dict( + optimizer=dict( + type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True)) + +default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) diff --git a/configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py b/configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py new file mode 100644 index 0000000000..a890ff5c1f --- /dev/null +++ b/configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py @@ -0,0 +1,104 @@ +_base_ = '../../_base_/default_runtime.py' + +model = dict( + type='RecognizerGCN', + backbone=dict( + type='AAGCN', + graph_cfg=dict(layout='nturgb+d', mode='spatial'), + gcn_attention=False), # degenerate AAGCN to AGCN + cls_head=dict(type='GCNHead', num_classes=60, in_channels=256)) + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_3d.pkl' +train_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) + +val_evaluator = [dict(type='AccMetric')] +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + eta_min=0, + T_max=16, + by_epoch=True, + convert_to_iter_based=True) +] + +optim_wrapper = dict( + optimizer=dict( + type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True)) + +default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) diff --git a/configs/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-bone-3d.py b/configs/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-bone-3d.py deleted file mode 100644 index e1d8ccc7df..0000000000 --- a/configs/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-bone-3d.py +++ /dev/null @@ -1,41 +0,0 @@ -_base_ = '2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d.py' - -dataset_type = 'PoseDataset' -ann_file_train = 'data/ntu/nturgb+d_skeletons_60_3d/xsub/train.pkl' -ann_file_val = 'data/ntu/nturgb+d_skeletons_60_3d/xsub/val.pkl' -train_pipeline = [ - dict(type='PaddingWithLoop', clip_len=300), - dict(type='PoseDecode'), - dict(type='JointToBone'), - dict(type='FormatGCNInput', input_format='NCTVM'), - dict(type='PackActionInputs') -] -test_pipeline = [ - dict(type='PaddingWithLoop', clip_len=300), - dict(type='PoseDecode'), - dict(type='JointToBone'), - dict(type='FormatGCNInput', input_format='NCTVM'), - dict(type='PackActionInputs') -] - -train_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, ann_file=ann_file_train, pipeline=train_pipeline)) -val_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, ann_file=ann_file_val, pipeline=train_pipeline)) -test_dataloader = dict( - batch_size=1, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, ann_file=ann_file_val, pipeline=train_pipeline)) diff --git a/configs/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d.py b/configs/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d.py deleted file mode 100644 index 329268412c..0000000000 --- a/configs/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d.py +++ /dev/null @@ -1,78 +0,0 @@ -_base_ = ['../../_base_/models/agcn.py', '../../_base_/default_runtime.py'] - -dataset_type = 'PoseDataset' -ann_file_train = 'data/ntu/nturgb+d_skeletons_60_3d/xsub/train.pkl' -ann_file_val = 'data/ntu/nturgb+d_skeletons_60_3d/xsub/val.pkl' -train_pipeline = [ - dict(type='PaddingWithLoop', clip_len=300), - dict(type='PoseDecode'), - dict(type='FormatGCNInput', input_format='NCTVM'), - dict(type='PackActionInputs') -] -test_pipeline = [ - dict(type='PaddingWithLoop', clip_len=300), - dict(type='PoseDecode'), - dict(type='FormatGCNInput', input_format='NCTVM'), - dict(type='PackActionInputs') -] - -train_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type=dataset_type, ann_file=ann_file_train, pipeline=train_pipeline)) -val_dataloader = dict( - batch_size=16, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - ann_file=ann_file_val, - pipeline=test_pipeline, - test_mode=True)) -test_dataloader = dict( - batch_size=1, - num_workers=2, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type=dataset_type, - ann_file=ann_file_val, - pipeline=test_pipeline, - test_mode=True)) - -val_evaluator = dict(type='AccMetric') -test_evaluator = val_evaluator - -train_cfg = dict( - type='EpochBasedTrainLoop', max_epochs=80, val_begin=1, val_interval=2) -val_cfg = dict(type='ValLoop') -test_cfg = dict(type='TestLoop') - -param_scheduler = [ - dict( - type='MultiStepLR', - begin=0, - end=80, - by_epoch=True, - milestones=[30, 40], - gamma=0.1) -] - -optim_wrapper = dict( - optimizer=dict( - type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001, nesterov=True)) - -default_hooks = dict( - checkpoint=dict(interval=2, max_keep_ckpts=5), logger=dict(interval=100)) - -custom_hooks = [dict(type='SyncBuffersHook')] - -# Default setting for scaling LR automatically -# - `enable` means enable scaling LR automatically -# or not by default. -# - `base_batch_size` = (4 GPUs) x (16 samples per GPU). -auto_scale_lr = dict(enable=False, base_batch_size=64) diff --git a/configs/skeleton/2s-agcn/README.md b/configs/skeleton/2s-agcn/README.md index aff452b104..c61b6fe4e3 100644 --- a/configs/skeleton/2s-agcn/README.md +++ b/configs/skeleton/2s-agcn/README.md @@ -18,12 +18,30 @@ In skeleton-based action recognition, graph convolutional networks (GCNs), which ## Results and Models -### NTU60_XSub - -| type | gpus | backbone | pretrain | top1 acc | config | ckpt | log | -| :---: | :--: | :------: | :------: | :------: | :-------------------------------------------------------: | :-----------------------------------------------------: | :-----------------------------------------------------: | -| joint | 4 | AGCN | None | 85.92 | [config](/configs/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d/2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d_20220918-3108f53f.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d/2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d.log) | -| bone | 4 | AGCN | None | 87.02 | [config](/configs/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-bone-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-bone-3d/2sagcn_4xb16-80e_ntu60-xsub-bone-3d_20220918-46a76b9b.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-bone-3d/2sagcn_4xb16-80e_ntu60-xsub-bone-3d.log) | +### NTU60_XSub_2D + +| frame sampling strategy | modality | gpus | backbone | top1 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :----------: | :--: | :------: | :------: | :--------------: | :---: | :----: | :---------------------------------------: | :-------------------------------------: | :------------------------------------: | +| uniform 100 | joint | 8 | AGCN | 88.60 | 10 clips | 4.4G | 3.5M | [config](/configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d_20221222-4c0ed77e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.log) | +| uniform 100 | bone | 8 | AGCN | 91.59 | 10 clips | 4.4G | 3.5M | [config](/configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d_20221222-293878b5.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.log) | +| uniform 100 | joint-motion | 8 | AGCN | 88.02 | 10 clips | 4.4G | 3.5M | [config](/configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d_20221222-0c86e3a1.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.log) | +| uniform 100 | bone-motion | 8 | AGCN | 88.82 | 10 clips | 4.4G | 3.5M | [config](/configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d_20221222-87996f0d.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.log) | +| | two-stream | | | 91.95 | | | | | | | +| | four-stream | | | 92.34 | | | | | | | + +### NTU60_XSub_3D + +| frame sampling strategy | modality | gpus | backbone | top1 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :----------: | :--: | :------: | :------: | :--------------: | :---: | :----: | :---------------------------------------: | :-------------------------------------: | :------------------------------------: | +| uniform 100 | joint | 8 | AGCN | 88.26 | 10 clips | 6.5G | 3.5M | [config](/configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d_20221222-24dabf78.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.log) | +| uniform 100 | bone | 8 | AGCN | 89.22 | 10 clips | 6.5G | 3.5M | [config](/configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d_20221222-abe70a7f.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.log) | +| uniform 100 | joint-motion | 8 | AGCN | 86.73 | 10 clips | 6.5G | 3.5M | [config](/configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d_20221222-923cd3c3.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.log) | +| uniform 100 | bone-motion | 8 | AGCN | 86.41 | 10 clips | 6.5G | 3.5M | [config](/configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d_20221222-3d8f6f43.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.log) | +| | two-stream | | | 90.27 | | | | | | | +| | four-stream | | | 90.89 | | | | | | | + +1. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size, and the original batch size. +2. For two-stream fusion, we use **joint : bone = 1 : 1**. For four-stream fusion, we use **joint : joint-motion : bone : bone-motion = 2 : 1 : 2 : 1**. For more details about multi-stream fusion, please refer to this [tutorial](/docs/en/user_guides/useful_tools.md#multi-stream-fusion). ## Train @@ -33,18 +51,11 @@ You can use the following command to train a model. python tools/train.py ${CONFIG_FILE} [optional arguments] ``` -Example: train AGCN model on **joint data** of NTU60 dataset in a deterministic option with periodic validation. - -```shell -python tools/train.py configs/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d.py \ - --cfg-options randomness.seed=0 randomness.deterministic=True -``` - -Example: train AGCN model on **bone data** of NTU60 dataset in a deterministic option with periodic validation. +Example: train STGCN model on NTU60-2D dataset in a deterministic option with periodic validation. ```shell -python tools/train.py configs/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-bone-3d.py \ - --cfg-options randomness.seed=0 randomness.deterministic=True +python tools/train.py configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py \ + --seed 0 --deterministic ``` For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). @@ -57,18 +68,11 @@ You can use the following command to test a model. python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] ``` -Example: test AGCN model on **joint data** of NTU60 dataset and dump the result to a pickle file. - -```shell -python tools/test.py configs/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d.py \ - checkpoints/SOME_CHECKPOINT.pth --dump joint_result.pkl -``` - -Example: test AGCN model on **bone data** of NTU60 dataset and dump the result to a pickle file. +Example: test AGCN model on NTU60-2D dataset and dump the result to a pickle file. ```shell -python tools/test.py configs/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-bone-3d.py \ - checkpoints/SOME_CHECKPOINT.pth --dump bone_result.pkl +python tools/test.py configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py \ + checkpoints/SOME_CHECKPOINT.pth --dump result.pkl ``` For more details, you can refer to the **Test** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). diff --git a/configs/skeleton/2s-agcn/metafile.yml b/configs/skeleton/2s-agcn/metafile.yml index 033c00d14c..c0e77f1094 100644 --- a/configs/skeleton/2s-agcn/metafile.yml +++ b/configs/skeleton/2s-agcn/metafile.yml @@ -6,38 +6,154 @@ Collections: Title: 'Two-Stream Adaptive Graph Convolutional Networks for Skeleton-Based Action Recognition' Models: - - Name: 2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d - Config: configs/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d.py + - Name: 2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d + Config: configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py In Collection: AGCN Metadata: Architecture: AGCN Batch Size: 16 Epochs: 80 - Parameters: 3472176 - Training Data: NTU60-XSub - Training Resources: 4 GPUS + FLOPs: 4.4G + Parameters: 3.5M + Training Data: NTU60-XSub-2D + Training Resources: 8 GPUs Results: - Dataset: NTU60-XSub + Dataset: NTU60-XSub-2D Task: Skeleton-based Action Recognition Metrics: - Top 1 Accuracy: 85.92 - Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d/2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d.log - Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d/2sagcn_4xb16-80e_ntu60-xsub-keypoint-3d_20220918-3108f53f.pth + Top 1 Accuracy: 88.60 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d_20221222-4c0ed77e.pth - - Name: 2sagcn_4xb16-80e_ntu60-xsub-bone-3d - Config: configs/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-bone-3d.py + - Name: 2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d + Config: configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py In Collection: AGCN Metadata: Architecture: AGCN Batch Size: 16 Epochs: 80 - Parameters: 3472176 - Training Data: NTU60-XSub - Training Resources: 4 GPUS + FLOPs: 4.4G + Parameters: 3.5M + Training Data: NTU60-XSub-2D + Training Resources: 8 GPUs Results: - Dataset: NTU60-XSub + Dataset: NTU60-XSub-2D Task: Skeleton-based Action Recognition Metrics: - Top 1 Accuracy: 87.02 - Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-bone-3d/2sagcn_4xb16-80e_ntu60-xsub-bone-3d.log - Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2sagcn_4xb16-80e_ntu60-xsub-bone-3d/2sagcn_4xb16-80e_ntu60-xsub-bone-3d_20220918-46a76b9b.pth + Top 1 Accuracy: 91.59 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d_20221222-293878b5.pth + + - Name: 2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d + Config: configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py + In Collection: AGCN + Metadata: + Architecture: AGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 4.4G + Parameters: 3.5M + Training Data: NTU60-XSub-2D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-2D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 88.02 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d_20221222-0c86e3a1.pth + + - Name: 2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d + Config: configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py + In Collection: AGCN + Metadata: + Architecture: AGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 4.4G + Parameters: 3.5M + Training Data: NTU60-XSub-2D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-2D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 88.82 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d_20221222-87996f0d.pth + + - Name: 2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d + Config: configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py + In Collection: AGCN + Metadata: + Architecture: AGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 6.5G + Parameters: 3.5M + Training Data: NTU60-XSub-3D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-3D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 88.26 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d_20221222-24dabf78.pth + + - Name: 2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d + Config: configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py + In Collection: AGCN + Metadata: + Architecture: AGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 6.5G + Parameters: 3.5M + Training Data: NTU60-XSub-3D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-3D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 89.22 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d/2s-agcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d_20221222-abe70a7f.pth + + - Name: 2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d + Config: configs/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py + In Collection: AGCN + Metadata: + Architecture: AGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 6.5G + Parameters: 3.5M + Training Data: NTU60-XSub-3D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-3D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 86.73 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d/2s-agcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d_20221222-923cd3c3.pth + + - Name: 2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d + Config: configs/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py + In Collection: AGCN + Metadata: + Architecture: AGCN + Batch Size: 16 + Epochs: 80 + FLOPs: 6.5G + Parameters: 3.5M + Training Data: NTU60-XSub-3D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-3D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 86.41 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/2s-agcn/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d/2s-agcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d_20221222-3d8f6f43.pth diff --git a/configs/skeleton/stgcn/README.md b/configs/skeleton/stgcn/README.md index 77d89cff88..dee9f46dfb 100644 --- a/configs/skeleton/stgcn/README.md +++ b/configs/skeleton/stgcn/README.md @@ -22,10 +22,10 @@ Dynamics of human body skeletons convey significant information for human action | frame sampling strategy | modality | gpus | backbone | top1 acc | testing protocol | FLOPs | params | config | ckpt | log | | :---------------------: | :----------: | :--: | :------: | :------: | :--------------: | :---: | :----: | :---------------------------------------: | :-------------------------------------: | :------------------------------------: | -| uniform 100 | joint | 8 | STGCN | 88.95 | 10 clips | 38.2G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d_20221129-484a394a.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.log) | -| uniform 100 | bone | 8 | STGCN | 91.69 | 10 clips | 38.2G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d_20221129-c4b44488.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.log) | -| uniform 100 | joint-motion | 8 | STGCN | 86.90 | 10 clips | 38.2G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d_20221129-f18eb408.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.log) | -| uniform 100 | bone-motion | 8 | STGCN | 87.86 | 10 clips | 38.2G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d_20221129-99c60e2d.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.log) | +| uniform 100 | joint | 8 | STGCN | 88.95 | 10 clips | 3.8G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d_20221129-484a394a.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.log) | +| uniform 100 | bone | 8 | STGCN | 91.69 | 10 clips | 3.8G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d_20221129-c4b44488.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.log) | +| uniform 100 | joint-motion | 8 | STGCN | 86.90 | 10 clips | 3.8G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d_20221129-f18eb408.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.log) | +| uniform 100 | bone-motion | 8 | STGCN | 87.86 | 10 clips | 3.8G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d_20221129-99c60e2d.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.log) | | | two-stream | | | 92.12 | | | | | | | | | four-stream | | | 92.34 | | | | | | | @@ -33,10 +33,10 @@ Dynamics of human body skeletons convey significant information for human action | frame sampling strategy | modality | gpus | backbone | top1 acc | testing protocol | FLOPs | params | config | ckpt | log | | :---------------------: | :----------: | :--: | :------: | :------: | :--------------: | :---: | :----: | :---------------------------------------: | :-------------------------------------: | :------------------------------------: | -| uniform 100 | joint | 8 | STGCN | 88.11 | 10 clips | 57.1G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d_20221129-850308e1.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.log) | -| uniform 100 | bone | 8 | STGCN | 88.76 | 10 clips | 57.1G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d_20221129-9c8d2970.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.log) | -| uniform 100 | joint-motion | 8 | STGCN | 86.06 | 10 clips | 57.1G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d_20221129-927648ea.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.log) | -| uniform 100 | bone-motion | 8 | STGCN | 85.49 | 10 clips | 57.1G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d_20221129-593162ca.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.log) | +| uniform 100 | joint | 8 | STGCN | 88.11 | 10 clips | 5.7G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d_20221129-850308e1.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.log) | +| uniform 100 | bone | 8 | STGCN | 88.76 | 10 clips | 5.7G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d_20221129-9c8d2970.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.log) | +| uniform 100 | joint-motion | 8 | STGCN | 86.06 | 10 clips | 5.7G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d_20221129-927648ea.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.log) | +| uniform 100 | bone-motion | 8 | STGCN | 85.49 | 10 clips | 5.7G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d_20221129-593162ca.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcn_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.log) | | | two-stream | | | 90.14 | | | | | | | | | four-stream | | | 90.39 | | | | | | | @@ -44,10 +44,10 @@ Dynamics of human body skeletons convey significant information for human action | frame sampling strategy | modality | gpus | backbone | top1 acc | testing protocol | FLOPs | params | config | ckpt | log | | :---------------------: | :----------: | :--: | :------: | :------: | :--------------: | :---: | :----: | :---------------------------------------: | :-------------------------------------: | :------------------------------------: | -| uniform 100 | joint | 8 | STGCN | 83.19 | 10 clips | 38.2G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d_20221129-612416c6.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d.log) | -| uniform 100 | bone | 8 | STGCN | 83.36 | 10 clips | 38.2G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d_20221129-131e63c3.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d.log) | -| uniform 100 | joint-motion | 8 | STGCN | 78.87 | 10 clips | 38.2G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d_20221129-7cb38ec2.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d.log) | -| uniform 100 | bone-motion | 8 | STGCN | 79.55 | 10 clips | 38.2G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d_20221129-f5b19892.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d.log) | +| uniform 100 | joint | 8 | STGCN | 83.19 | 10 clips | 3.8G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d_20221129-612416c6.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-2d.log) | +| uniform 100 | bone | 8 | STGCN | 83.36 | 10 clips | 3.8G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d_20221129-131e63c3.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-2d.log) | +| uniform 100 | joint-motion | 8 | STGCN | 78.87 | 10 clips | 3.8G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d_20221129-7cb38ec2.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-2d.log) | +| uniform 100 | bone-motion | 8 | STGCN | 79.55 | 10 clips | 3.8G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d_20221129-f5b19892.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-2d.log) | | | two-stream | | | 84.84 | | | | | | | | | four-stream | | | 85.23 | | | | | | | @@ -55,10 +55,10 @@ Dynamics of human body skeletons convey significant information for human action | frame sampling strategy | modality | gpus | backbone | top1 acc | testing protocol | FLOPs | params | config | ckpt | log | | :---------------------: | :----------: | :--: | :------: | :------: | :--------------: | :---: | :----: | :---------------------------------------: | :-------------------------------------: | :------------------------------------: | -| uniform 100 | joint | 8 | STGCN | 82.15 | 10 clips | 57.1G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d_20221129-0484f579.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d.log) | -| uniform 100 | bone | 8 | STGCN | 84.28 | 10 clips | 57.1G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d_20221129-bc007510.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d.log) | -| uniform 100 | joint-motion | 8 | STGCN | 78.93 | 10 clips | 57.1G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d_20221129-5d54f525.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d.log) | -| uniform 100 | bone-motion | 8 | STGCN | 80.02 | 10 clips | 57.1G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d_20221129-3cb0e4e1.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d.log) | +| uniform 100 | joint | 8 | STGCN | 82.15 | 10 clips | 5.7G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d_20221129-0484f579.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-joint-u100-80e_ntu120-xsub-keypoint-3d.log) | +| uniform 100 | bone | 8 | STGCN | 84.28 | 10 clips | 5.7G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d_20221129-bc007510.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-bone-u100-80e_ntu120-xsub-keypoint-3d.log) | +| uniform 100 | joint-motion | 8 | STGCN | 78.93 | 10 clips | 5.7G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d_20221129-5d54f525.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-joint-motion-u100-80e_ntu120-xsub-keypoint-3d.log) | +| uniform 100 | bone-motion | 8 | STGCN | 80.02 | 10 clips | 5.7G | 3.1M | [config](/configs/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d_20221129-3cb0e4e1.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcn/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d/stgcn_8xb16-bone-motion-u100-80e_ntu120-xsub-keypoint-3d.log) | | | two-stream | | | 85.68 | | | | | | | | | four-stream | | | 86.19 | | | | | | | @@ -73,7 +73,7 @@ You can use the following command to train a model. python tools/train.py ${CONFIG_FILE} [optional arguments] ``` -Example: train STGCN model on NTU60 dataset in a deterministic option with periodic validation. +Example: train STGCN model on NTU60-2D dataset in a deterministic option with periodic validation. ```shell python tools/train.py configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py \ @@ -90,7 +90,7 @@ You can use the following command to test a model. python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] ``` -Example: test STGCN model on NTU60 dataset and dump the result to a pickle file. +Example: test STGCN model on NTU60-2D dataset and dump the result to a pickle file. ```shell python tools/test.py configs/skeleton/stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py \ diff --git a/configs/skeleton/stgcn/metafile.yml b/configs/skeleton/stgcn/metafile.yml index 0480fd91b7..5b8a283833 100644 --- a/configs/skeleton/stgcn/metafile.yml +++ b/configs/skeleton/stgcn/metafile.yml @@ -13,7 +13,7 @@ Models: Architecture: STGCN Batch Size: 16 Epochs: 80 - FLOPs: 38.2G + FLOPs: 3.8G Parameters: 3.1M Training Data: NTU60-XSub-2D Training Resources: 8 GPUs @@ -32,7 +32,7 @@ Models: Architecture: STGCN Batch Size: 16 Epochs: 80 - FLOPs: 38.2G + FLOPs: 3.8G Parameters: 3.1M Training Data: NTU60-XSub-2D Training Resources: 8 GPUs @@ -51,7 +51,7 @@ Models: Architecture: STGCN Batch Size: 16 Epochs: 80 - FLOPs: 38.2G + FLOPs: 3.8G Parameters: 3.1M Training Data: NTU60-XSub-2D Training Resources: 8 GPUs @@ -70,7 +70,7 @@ Models: Architecture: STGCN Batch Size: 16 Epochs: 80 - FLOPs: 38.2G + FLOPs: 3.8G Parameters: 3.1M Training Data: NTU60-XSub-2D Training Resources: 8 GPUs @@ -89,7 +89,7 @@ Models: Architecture: STGCN Batch Size: 16 Epochs: 80 - FLOPs: 57.1G + FLOPs: 5.7G Parameters: 3.1M Training Data: NTU60-XSub-3D Training Resources: 8 GPUs @@ -108,7 +108,7 @@ Models: Architecture: STGCN Batch Size: 16 Epochs: 80 - FLOPs: 57.1G + FLOPs: 5.7G Parameters: 3.1M Training Data: NTU60-XSub-3D Training Resources: 8 GPUs @@ -127,7 +127,7 @@ Models: Architecture: STGCN Batch Size: 16 Epochs: 80 - FLOPs: 57.1G + FLOPs: 5.7G Parameters: 3.1M Training Data: NTU60-XSub-3D Training Resources: 8 GPUs @@ -146,7 +146,7 @@ Models: Architecture: STGCN Batch Size: 16 Epochs: 80 - FLOPs: 57.1G + FLOPs: 5.7G Parameters: 3.1M Training Data: NTU60-XSub-3D Training Resources: 8 GPUs @@ -165,7 +165,7 @@ Models: Architecture: STGCN Batch Size: 16 Epochs: 80 - FLOPs: 38.2G + FLOPs: 3.8G Parameters: 3.1M Training Data: NTU120-XSub-2D Training Resources: 8 GPUs @@ -184,7 +184,7 @@ Models: Architecture: STGCN Batch Size: 16 Epochs: 80 - FLOPs: 38.2G + FLOPs: 3.8G Parameters: 3.1M Training Data: NTU120-XSub-2D Training Resources: 8 GPUs @@ -203,7 +203,7 @@ Models: Architecture: STGCN Batch Size: 16 Epochs: 80 - FLOPs: 38.2G + FLOPs: 3.8G Parameters: 3.1M Training Data: NTU120-XSub-2D Training Resources: 8 GPUs @@ -222,7 +222,7 @@ Models: Architecture: STGCN Batch Size: 16 Epochs: 80 - FLOPs: 38.2G + FLOPs: 3.8G Parameters: 3.1M Training Data: NTU120-XSub-2D Training Resources: 8 GPUs @@ -241,7 +241,7 @@ Models: Architecture: STGCN Batch Size: 16 Epochs: 80 - FLOPs: 57.1G + FLOPs: 5.7G Parameters: 3.1M Training Data: NTU120-XSub-3D Training Resources: 8 GPUs @@ -260,7 +260,7 @@ Models: Architecture: STGCN Batch Size: 16 Epochs: 80 - FLOPs: 57.1G + FLOPs: 5.7G Parameters: 3.1M Training Data: NTU120-XSub-3D Training Resources: 8 GPUs @@ -279,7 +279,7 @@ Models: Architecture: STGCN Batch Size: 16 Epochs: 80 - FLOPs: 57.1G + FLOPs: 5.7G Parameters: 3.1M Training Data: NTU120-XSub-3D Training Resources: 8 GPUs @@ -298,7 +298,7 @@ Models: Architecture: STGCN Batch Size: 16 Epochs: 80 - FLOPs: 57.1G + FLOPs: 5.7G Parameters: 3.1M Training Data: NTU120-XSub-3D Training Resources: 8 GPUs diff --git a/mmaction/engine/__init__.py b/mmaction/engine/__init__.py index e04835b27e..4d45a40230 100644 --- a/mmaction/engine/__init__.py +++ b/mmaction/engine/__init__.py @@ -1,3 +1,4 @@ # Copyright (c) OpenMMLab. All rights reserved. from .hooks import * # noqa: F401, F403 +from .model import * # noqa: F401, F403 from .optimizers import * # noqa: F401, F403 diff --git a/mmaction/engine/model/__init__.py b/mmaction/engine/model/__init__.py new file mode 100644 index 0000000000..c3d5a08672 --- /dev/null +++ b/mmaction/engine/model/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .weight_init import ConvBranchInit + +__all__ = ['ConvBranchInit'] diff --git a/mmaction/engine/model/weight_init.py b/mmaction/engine/model/weight_init.py new file mode 100644 index 0000000000..5c4f4bad02 --- /dev/null +++ b/mmaction/engine/model/weight_init.py @@ -0,0 +1,52 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import math + +import torch.nn as nn +from mmengine.model import BaseInit, update_init_info + +from mmaction.registry import WEIGHT_INITIALIZERS + + +def conv_branch_init(conv: nn.Module, branches: int) -> None: + """Perform initialization for a conv branch. + + Args: + conv (nn.Module): The conv module of a branch. + branches (int): The number of branches. + """ + + weight = conv.weight + n = weight.size(0) + k1 = weight.size(1) + k2 = weight.size(2) + nn.init.normal_(weight, 0, math.sqrt(2. / (n * k1 * k2 * branches))) + nn.init.constant_(conv.bias, 0) + + +@WEIGHT_INITIALIZERS.register_module('ConvBranch') +class ConvBranchInit(BaseInit): + """Initialize the module parameters of different branches. + + Args: + name (str): The name of the target module. + """ + + def __init__(self, name: str, **kwargs) -> None: + super(ConvBranchInit, self).__init__(**kwargs) + self.name = name + + def __call__(self, module) -> None: + assert hasattr(module, self.name) + + # Take a short cut to get the target module + module = getattr(module, self.name) + num_subset = len(module) + for conv in module: + conv_branch_init(conv, num_subset) + + if hasattr(module, '_params_init_info'): + update_init_info(module, init_info=self._get_init_info()) + + def _get_init_info(self) -> str: + info = f'{self.__class__.__name__}' + return info diff --git a/mmaction/models/backbones/__init__.py b/mmaction/models/backbones/__init__.py index 6a2c7b526a..741d652e01 100644 --- a/mmaction/models/backbones/__init__.py +++ b/mmaction/models/backbones/__init__.py @@ -1,5 +1,6 @@ # Copyright (c) OpenMMLab. All rights reserved. -from .agcn import AGCN +# from .aagcn import AAGCN +from .aagcn import AAGCN from .c2d import C2D from .c3d import C3D from .mobilenet_v2 import MobileNetV2 @@ -25,6 +26,6 @@ 'C2D', 'C3D', 'ResNet', 'ResNet3d', 'ResNetTSM', 'ResNet2Plus1d', 'ResNet3dSlowFast', 'ResNet3dSlowOnly', 'ResNet3dCSN', 'ResNetTIN', 'X3D', 'ResNet3dLayer', 'MobileNetV2TSM', 'MobileNetV2', 'TANet', 'TimeSformer', - 'STGCN', 'AGCN', 'ResNetAudio', 'SwinTransformer3D', 'VisionTransformer', + 'STGCN', 'AAGCN', 'ResNetAudio', 'SwinTransformer3D', 'VisionTransformer', 'MViT' ] diff --git a/mmaction/models/backbones/aagcn.py b/mmaction/models/backbones/aagcn.py new file mode 100644 index 0000000000..dc4a99029e --- /dev/null +++ b/mmaction/models/backbones/aagcn.py @@ -0,0 +1,236 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import copy as cp +from typing import Dict, List, Optional, Union + +import torch +import torch.nn as nn +from mmengine.model import BaseModule, ModuleList + +from mmaction.registry import MODELS +from ..utils import Graph, unit_aagcn, unit_tcn + + +class AAGCNBlock(BaseModule): + """The basic block of AAGCN. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + A (torch.Tensor): The adjacency matrix defined in the graph + with shape of `(num_subsets, num_nodes, num_nodes)`. + stride (int): Stride of the temporal convolution. Defaults to 1. + residual (bool): Whether to use residual connection. Defaults to True. + init_cfg (dict or list[dict], optional): Config to control + the initialization. Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + A: torch.Tensor, + stride: int = 1, + residual: bool = True, + init_cfg: Optional[Union[Dict, List[Dict]]] = None, + **kwargs) -> None: + super().__init__(init_cfg=init_cfg) + + gcn_kwargs = {k[4:]: v for k, v in kwargs.items() if k[:4] == 'gcn_'} + tcn_kwargs = {k[4:]: v for k, v in kwargs.items() if k[:4] == 'tcn_'} + kwargs = { + k: v + for k, v in kwargs.items() if k[:4] not in ['gcn_', 'tcn_'] + } + assert len(kwargs) == 0, f'Invalid arguments: {kwargs}' + + tcn_type = tcn_kwargs.pop('type', 'unit_tcn') + assert tcn_type in ['unit_tcn', 'mstcn'] + gcn_type = gcn_kwargs.pop('type', 'unit_aagcn') + assert gcn_type in ['unit_aagcn'] + + self.gcn = unit_aagcn(in_channels, out_channels, A, **gcn_kwargs) + + if tcn_type == 'unit_tcn': + self.tcn = unit_tcn( + out_channels, out_channels, 9, stride=stride, **tcn_kwargs) + + self.relu = nn.ReLU() + + if not residual: + self.residual = lambda x: 0 + elif (in_channels == out_channels) and (stride == 1): + self.residual = lambda x: x + else: + self.residual = unit_tcn( + in_channels, out_channels, kernel_size=1, stride=stride) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Defines the computation performed at every call.""" + return self.relu(self.tcn(self.gcn(x)) + self.residual(x)) + + +@MODELS.register_module() +class AAGCN(BaseModule): + """AAGCN backbone, the attention-enhanced version of 2s-AGCN. + + Skeleton-Based Action Recognition with Multi-Stream + Adaptive Graph Convolutional Networks. + More details can be found in the `paper + `__ . + + Two-Stream Adaptive Graph Convolutional Networks for + Skeleton-Based Action Recognition. + More details can be found in the `paper + `__ . + + Args: + graph_cfg (dict): Config for building the graph. + in_channels (int): Number of input channels. Defaults to 3. + base_channels (int): Number of base channels. Defaults to 64. + data_bn_type (str): Type of the data bn layer. Defaults to ``'MVC'``. + num_person (int): Maximum number of people. Only used when + data_bn_type == 'MVC'. Defaults to 2. + num_stages (int): Total number of stages. Defaults to 10. + inflate_stages (list[int]): Stages to inflate the number of channels. + Defaults to ``[5, 8]``. + down_stages (list[int]): Stages to perform downsampling in + the time dimension. Defaults to ``[5, 8]``. + init_cfg (dict or list[dict], optional): Config to control + the initialization. Defaults to None. + + Examples: + >>> import torch + >>> from mmaction.models import AAGCN + >>> from mmaction.utils import register_all_modules + >>> + >>> register_all_modules() + >>> mode = 'stgcn_spatial' + >>> batch_size, num_person, num_frames = 2, 2, 150 + >>> + >>> # openpose-18 layout + >>> num_joints = 18 + >>> model = AAGCN(graph_cfg=dict(layout='openpose', mode=mode)) + >>> model.init_weights() + >>> inputs = torch.randn(batch_size, num_person, + ... num_frames, num_joints, 3) + >>> output = model(inputs) + >>> print(output.shape) + >>> + >>> # nturgb+d layout + >>> num_joints = 25 + >>> model = AAGCN(graph_cfg=dict(layout='nturgb+d', mode=mode)) + >>> model.init_weights() + >>> inputs = torch.randn(batch_size, num_person, + ... num_frames, num_joints, 3) + >>> output = model(inputs) + >>> print(output.shape) + >>> + >>> # coco layout + >>> num_joints = 17 + >>> model = AAGCN(graph_cfg=dict(layout='coco', mode=mode)) + >>> model.init_weights() + >>> inputs = torch.randn(batch_size, num_person, + ... num_frames, num_joints, 3) + >>> output = model(inputs) + >>> print(output.shape) + >>> + >>> # custom settings + >>> # disable the attention module to degenerate AAGCN to AGCN + >>> model = AAGCN(graph_cfg=dict(layout='coco', mode=mode), + ... gcn_attention=False) + >>> model.init_weights() + >>> output = model(inputs) + >>> print(output.shape) + torch.Size([2, 2, 256, 38, 18]) + torch.Size([2, 2, 256, 38, 25]) + torch.Size([2, 2, 256, 38, 17]) + torch.Size([2, 2, 256, 38, 17]) + """ + + def __init__(self, + graph_cfg: Dict, + in_channels: int = 3, + base_channels: int = 64, + data_bn_type: str = 'MVC', + num_person: int = 2, + num_stages: int = 10, + inflate_stages: List[int] = [5, 8], + down_stages: List[int] = [5, 8], + init_cfg: Optional[Union[Dict, List[Dict]]] = None, + **kwargs) -> None: + super().__init__(init_cfg=init_cfg) + + self.graph = Graph(**graph_cfg) + A = torch.tensor( + self.graph.A, dtype=torch.float32, requires_grad=False) + self.register_buffer('A', A) + + assert data_bn_type in ['MVC', 'VC', None] + self.data_bn_type = data_bn_type + self.in_channels = in_channels + self.base_channels = base_channels + self.num_person = num_person + self.num_stages = num_stages + self.inflate_stages = inflate_stages + self.down_stages = down_stages + + if self.data_bn_type == 'MVC': + self.data_bn = nn.BatchNorm1d(num_person * in_channels * A.size(1)) + elif self.data_bn_type == 'VC': + self.data_bn = nn.BatchNorm1d(in_channels * A.size(1)) + else: + self.data_bn = nn.Identity() + + lw_kwargs = [cp.deepcopy(kwargs) for i in range(num_stages)] + for k, v in kwargs.items(): + if isinstance(v, tuple) and len(v) == num_stages: + for i in range(num_stages): + lw_kwargs[i][k] = v[i] + lw_kwargs[0].pop('tcn_dropout', None) + + modules = [] + if self.in_channels != self.base_channels: + modules = [ + AAGCNBlock( + in_channels, + base_channels, + A.clone(), + 1, + residual=False, + **lw_kwargs[0]) + ] + + for i in range(2, num_stages + 1): + in_channels = base_channels + out_channels = base_channels * (1 + (i in inflate_stages)) + stride = 1 + (i in down_stages) + modules.append( + AAGCNBlock( + base_channels, + out_channels, + A.clone(), + stride=stride, + **lw_kwargs[i - 1])) + base_channels = out_channels + + if self.in_channels == self.base_channels: + self.num_stages -= 1 + + self.gcn = ModuleList(modules) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Defines the computation performed at every call.""" + N, M, T, V, C = x.size() + x = x.permute(0, 1, 3, 4, 2).contiguous() + if self.data_bn_type == 'MVC': + x = self.data_bn(x.view(N, M * V * C, T)) + else: + x = self.data_bn(x.view(N * M, V * C, T)) + + x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, + 2).contiguous().view(N * M, C, T, V) + + for i in range(self.num_stages): + x = self.gcn[i](x) + + x = x.reshape((N, M) + x.shape[1:]) + return x diff --git a/mmaction/models/backbones/agcn.py b/mmaction/models/backbones/agcn.py deleted file mode 100644 index 9650d54565..0000000000 --- a/mmaction/models/backbones/agcn.py +++ /dev/null @@ -1,337 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import math - -import torch -import torch.nn as nn -from mmengine.logging import MMLogger -from mmengine.model.weight_init import constant_init, kaiming_init, normal_init -from mmengine.runner import load_checkpoint - -from mmaction.registry import MODELS -from ..utils import Graph - - -def conv_branch_init(conv, branches): - weight = conv.weight - n = weight.size(0) - k1 = weight.size(1) - k2 = weight.size(2) - normal_init(weight, mean=0, std=math.sqrt(2. / (n * k1 * k2 * branches))) - constant_init(conv.bias, 0) - - -def conv_init(conv): - kaiming_init(conv.weight) - constant_init(conv.bias, 0) - - -def bn_init(bn, scale): - constant_init(bn.weight, scale) - constant_init(bn.bias, 0) - - -def zero(x): - """return zero.""" - return 0 - - -def identity(x): - """return input itself.""" - return x - - -class AGCNBlock(nn.Module): - """Applies spatial graph convolution and temporal convolution over an - input graph sequence. - - Args: - in_channels (int): Number of channels in the input sequence data - out_channels (int): Number of channels produced by the convolution - kernel_size (tuple): Size of the temporal convolving kernel and - graph convolving kernel - stride (int, optional): Stride of the temporal convolution. Default: 1 - adj_len (int, optional): The length of the adjacency matrix. - Default: 17 - dropout (int, optional): Dropout rate of the final output. Default: 0 - residual (bool, optional): If ``True``, applies a residual mechanism. - Default: ``True`` - - Shape: - - Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` - format - - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format - - Output[0]: Outpu graph sequence in :math:`(N, out_channels, T_{out}, - V)` format - - Output[1]: Graph adjacency matrix for output data in :math:`(K, V, - V)` format - - where - :math:`N` is a batch size, - :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1] - `, - :math:`T_{in}/T_{out}` is a length of input/output sequence, - :math:`V` is the number of graph nodes. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size, - stride=1, - adj_len=17, - dropout=0, - residual=True): - super().__init__() - - assert len(kernel_size) == 2 - assert kernel_size[0] % 2 == 1 - padding = ((kernel_size[0] - 1) // 2, 0) - - self.gcn = ConvTemporalGraphical( - in_channels, out_channels, kernel_size[1], adj_len=adj_len) - self.tcn = nn.Sequential( - nn.Conv2d(out_channels, out_channels, (kernel_size[0], 1), - (stride, 1), padding), nn.BatchNorm2d(out_channels)) - - # tcn init - for m in self.tcn.modules(): - if isinstance(m, nn.Conv2d): - conv_init(m) - elif isinstance(m, nn.BatchNorm2d): - bn_init(m, 1) - - if not residual: - self.residual = zero - - elif (in_channels == out_channels) and (stride == 1): - self.residual = identity - - else: - self.residual = nn.Sequential( - nn.Conv2d( - in_channels, - out_channels, - kernel_size=1, - stride=(stride, 1)), nn.BatchNorm2d(out_channels)) - - self.relu = nn.ReLU(inplace=True) - - def forward(self, x, adj_mat): - """Defines the computation performed at every call.""" - res = self.residual(x) - x, adj_mat = self.gcn(x, adj_mat) - - x = self.tcn(x) + res - - return self.relu(x), adj_mat - - -class ConvTemporalGraphical(nn.Module): - """The basic module for applying a graph convolution. - - Args: - in_channels (int): Number of channels in the input sequence data - out_channels (int): Number of channels produced by the convolution - kernel_size (int): Size of the graph convolving kernel - t_kernel_size (int): Size of the temporal convolving kernel - t_stride (int, optional): Stride of the temporal convolution. - Default: 1 - t_padding (int, optional): Temporal zero-padding added to both sides - of the input. Default: 0 - t_dilation (int, optional): Spacing between temporal kernel elements. - Default: 1 - adj_len (int, optional): The length of the adjacency matrix. - Default: 17 - bias (bool, optional): If ``True``, adds a learnable bias to the - output. Default: ``True`` - - Shape: - - Input[0]: Input graph sequence in :math:`(N, in_channels, T_{in}, V)` - format - - Input[1]: Input graph adjacency matrix in :math:`(K, V, V)` format - - Output[0]: Output graph sequence in :math:`(N, out_channels, T_{out} - , V)` format - - Output[1]: Graph adjacency matrix for output data in :math:`(K, V, V) - ` format - - where - :math:`N` is a batch size, - :math:`K` is the spatial kernel size, as :math:`K == kernel_size[1] - `, - :math:`T_{in}/T_{out}` is a length of input/output sequence, - :math:`V` is the number of graph nodes. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size, - t_kernel_size=1, - t_stride=1, - t_padding=0, - t_dilation=1, - adj_len=17, - bias=True): - super().__init__() - - self.kernel_size = kernel_size - - self.PA = nn.Parameter(torch.FloatTensor(3, adj_len, adj_len)) - torch.nn.init.constant_(self.PA, 1e-6) - - self.num_subset = 3 - inter_channels = out_channels // 4 - self.inter_c = inter_channels - self.conv_a = nn.ModuleList() - self.conv_b = nn.ModuleList() - self.conv_d = nn.ModuleList() - for i in range(self.num_subset): - self.conv_a.append(nn.Conv2d(in_channels, inter_channels, 1)) - self.conv_b.append(nn.Conv2d(in_channels, inter_channels, 1)) - self.conv_d.append(nn.Conv2d(in_channels, out_channels, 1)) - - if in_channels != out_channels: - self.down = nn.Sequential( - nn.Conv2d(in_channels, out_channels, 1), - nn.BatchNorm2d(out_channels)) - else: - self.down = lambda x: x - - self.bn = nn.BatchNorm2d(out_channels) - self.soft = nn.Softmax(-2) - self.relu = nn.ReLU() - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - conv_init(m) - elif isinstance(m, nn.BatchNorm2d): - bn_init(m, 1) - bn_init(self.bn, 1e-6) - for i in range(self.num_subset): - conv_branch_init(self.conv_d[i], self.num_subset) - - def forward(self, x, adj_mat): - """Defines the computation performed at every call.""" - assert adj_mat.size(0) == self.kernel_size - - N, C, T, V = x.size() - A = adj_mat + self.PA - - y = None - for i in range(self.num_subset): - A1 = self.conv_a[i](x).permute(0, 3, 1, 2).contiguous().view( - N, V, self.inter_c * T) - A2 = self.conv_b[i](x).view(N, self.inter_c * T, V) - A1 = self.soft(torch.matmul(A1, A2) / A1.size(-1)) # N V V - A1 = A1 + A[i] - A2 = x.view(N, C * T, V) - z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V)) - y = z + y if y is not None else z - y = self.bn(y) - y += self.down(x) - - return self.relu(y), adj_mat - - -@MODELS.register_module() -class AGCN(nn.Module): - """Backbone of Two-Stream Adaptive Graph Convolutional Networks for - Skeleton-Based Action Recognition. - - Args: - in_channels (int): Number of channels in the input data. - graph_cfg (dict): The arguments for building the graph. - data_bn (bool): If 'True', adds data normalization to the inputs. - Default: True. - pretrained (str | None): Name of pretrained model. - **kwargs (optional): Other parameters for graph convolution units. - - Shape: - - Input: :math:`(N, in_channels, T_{in}, V_{in}, M_{in})` - - Output: :math:`(N, num_class)` where - :math:`N` is a batch size, - :math:`T_{in}` is a length of input sequence, - :math:`V_{in}` is the number of graph nodes, - :math:`M_{in}` is the number of instance in a frame. - """ - - def __init__(self, - in_channels, - graph_cfg, - data_bn=True, - pretrained=None, - **kwargs): - super().__init__() - - # load graph - self.graph = Graph(**graph_cfg) - A = torch.tensor( - self.graph.A, dtype=torch.float32, requires_grad=False) - self.register_buffer('A', A) - - # build networks - spatial_kernel_size = A.size(0) - temporal_kernel_size = 9 - kernel_size = (temporal_kernel_size, spatial_kernel_size) - self.data_bn = nn.BatchNorm1d(in_channels * - A.size(1)) if data_bn else identity - - kwargs0 = {k: v for k, v in kwargs.items() if k != 'dropout'} - self.agcn_networks = nn.ModuleList(( - AGCNBlock( - in_channels, - 64, - kernel_size, - 1, - adj_len=A.size(1), - residual=False, - **kwargs0), - AGCNBlock(64, 64, kernel_size, 1, adj_len=A.size(1), **kwargs), - AGCNBlock(64, 64, kernel_size, 1, adj_len=A.size(1), **kwargs), - AGCNBlock(64, 64, kernel_size, 1, adj_len=A.size(1), **kwargs), - AGCNBlock(64, 128, kernel_size, 2, adj_len=A.size(1), **kwargs), - AGCNBlock(128, 128, kernel_size, 1, adj_len=A.size(1), **kwargs), - AGCNBlock(128, 128, kernel_size, 1, adj_len=A.size(1), **kwargs), - AGCNBlock(128, 256, kernel_size, 2, adj_len=A.size(1), **kwargs), - AGCNBlock(256, 256, kernel_size, 1, adj_len=A.size(1), **kwargs), - AGCNBlock(256, 256, kernel_size, 1, adj_len=A.size(1), **kwargs), - )) - - self.pretrained = pretrained - - def init_weights(self): - """Initiate the parameters either from existing checkpoint or from - scratch.""" - if isinstance(self.pretrained, str): - logger = MMLogger.get_current_instance() - logger.info(f'load model from: {self.pretrained}') - - load_checkpoint(self, self.pretrained, strict=False, logger=logger) - - elif self.pretrained is None: - pass - else: - raise TypeError('pretrained must be a str or None') - - def forward(self, x): - """Defines the computation performed at every call. - Args: - x (torch.Tensor): The input data. - - Returns: - torch.Tensor: The output of the module. - """ - # data normalization - x = x.float() - n, c, t, v, m = x.size() - x = x.permute(0, 4, 3, 1, 2).contiguous() # N M V C T - x = x.view(n * m, v * c, t) - x = self.data_bn(x) - x = x.view(n, m, v, c, t) - x = x.permute(0, 1, 3, 4, 2).contiguous() - x = x.view(n * m, c, t, v) - - for gcn in self.agcn_networks: - x, _ = gcn(x, self.A) - - return x diff --git a/mmaction/models/backbones/stgcn.py b/mmaction/models/backbones/stgcn.py index 9fb2469674..9900a49648 100644 --- a/mmaction/models/backbones/stgcn.py +++ b/mmaction/models/backbones/stgcn.py @@ -13,7 +13,7 @@ class STGCNBlock(BaseModule): - """The basic block of ST-GCN. + """The basic block of STGCN. Args: in_channels (int): Number of input channels. @@ -22,6 +22,8 @@ class STGCNBlock(BaseModule): with shape of `(num_subsets, num_nodes, num_nodes)`. stride (int): Stride of the temporal convolution. Defaults to 1. residual (bool): Whether to use residual connection. Defaults to True. + init_cfg (dict or list[dict], optional): Config to control + the initialization. Defaults to None. """ def __init__(self, @@ -30,8 +32,9 @@ def __init__(self, A: torch.Tensor, stride: int = 1, residual: bool = True, + init_cfg: Optional[Union[Dict, List[Dict]]] = None, **kwargs) -> None: - super().__init__() + super().__init__(init_cfg=init_cfg) gcn_kwargs = {k[4:]: v for k, v in kwargs.items() if k[:4] == 'gcn_'} tcn_kwargs = {k[4:]: v for k, v in kwargs.items() if k[:4] == 'tcn_'} @@ -71,10 +74,12 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: @MODELS.register_module() class STGCN(BaseModule): - """ STGCN - A PyTorch implement of : `Spatial Temporal Graph Convolutional - Networks for Skeleton-Based Action Recognition` - - https://arxiv.org/abs/1801.07455 + """STGCN backbone. + + Spatial Temporal Graph Convolutional + Networks for Skeleton-Based Action Recognition. + More details can be found in the `paper + `__ . Args: graph_cfg (dict): Config for building the graph. @@ -107,7 +112,7 @@ class STGCN(BaseModule): >>> model = STGCN(graph_cfg=dict(layout='openpose', mode=mode)) >>> model.init_weights() >>> inputs = torch.randn(batch_size, num_person, - >>> num_frames, num_joints, 3) + ... num_frames, num_joints, 3) >>> output = model(inputs) >>> print(output.shape) >>> @@ -116,7 +121,7 @@ class STGCN(BaseModule): >>> model = STGCN(graph_cfg=dict(layout='nturgb+d', mode=mode)) >>> model.init_weights() >>> inputs = torch.randn(batch_size, num_person, - >>> num_frames, num_joints, 3) + ... num_frames, num_joints, 3) >>> output = model(inputs) >>> print(output.shape) >>> @@ -125,7 +130,7 @@ class STGCN(BaseModule): >>> model = STGCN(graph_cfg=dict(layout='coco', mode=mode)) >>> model.init_weights() >>> inputs = torch.randn(batch_size, num_person, - >>> num_frames, num_joints, 3) + ... num_frames, num_joints, 3) >>> output = model(inputs) >>> print(output.shape) >>> @@ -133,7 +138,7 @@ class STGCN(BaseModule): >>> # add additional residual connection for the first four gcns >>> stage_cfgs = {'gcn_with_res': [True] * 4 + [False] * 6} >>> model = STGCN(graph_cfg=dict(layout='coco', mode=mode), - >>> num_stages=10, stage_cfgs=stage_cfgs) + ... num_stages=10, stage_cfgs=stage_cfgs) >>> model.init_weights() >>> output = model(inputs) >>> print(output.shape) diff --git a/mmaction/models/utils/gcn_utils.py b/mmaction/models/utils/gcn_utils.py index 1e83d03da3..1ce2978e58 100644 --- a/mmaction/models/utils/gcn_utils.py +++ b/mmaction/models/utils/gcn_utils.py @@ -1,10 +1,11 @@ # Copyright (c) OpenMMLab. All rights reserved. +import copy as cp from typing import Dict, List, Optional, Union import torch import torch.nn as nn from mmcv.cnn import build_activation_layer, build_norm_layer -from mmengine.model import BaseModule, Sequential +from mmengine.model import BaseModule, ModuleList, Sequential class unit_gcn(BaseModule): @@ -103,6 +104,155 @@ def forward(self, x: torch.Tensor) -> torch.Tensor: return self.act(self.bn(x) + res) +class unit_aagcn(BaseModule): + """The graph convolution unit of AAGCN. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + A (torch.Tensor): The adjacency matrix defined in the graph + with shape of `(num_subsets, num_joints, num_joints)`. + coff_embedding (int): The coefficient for downscaling the embedding + dimension. Defaults to 4. + adaptive (bool): Whether to use adaptive graph convolutional layer. + Defaults to True. + attention (bool): Whether to use the STC-attention module. + Defaults to True. + init_cfg (dict or list[dict]): Initialization config dict. Defaults to + ``[ + dict(type='Constant', layer='BatchNorm2d', val=1, + override=dict(type='Constant', name='bn', val=1e-6)), + dict(type='Kaiming', layer='Conv2d', mode='fan_out'), + dict(type='ConvBranch', name='conv_d') + ]``. + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + A: torch.Tensor, + coff_embedding: int = 4, + adaptive: bool = True, + attention: bool = True, + init_cfg: Optional[Union[Dict, List[Dict]]] = [ + dict( + type='Constant', + layer='BatchNorm2d', + val=1, + override=dict(type='Constant', name='bn', val=1e-6)), + dict(type='Kaiming', layer='Conv2d', mode='fan_out'), + dict(type='ConvBranch', name='conv_d') + ] + ) -> None: + + if attention: + attention_init_cfg = [ + dict( + type='Constant', + layer='Conv1d', + val=0, + override=dict(type='Xavier', name='conv_sa')), + dict( + type='Kaiming', + layer='Linear', + mode='fan_in', + override=dict(type='Constant', val=0, name='fc2c')) + ] + init_cfg = cp.copy(init_cfg) + init_cfg.extend(attention_init_cfg) + + super(unit_aagcn, self).__init__(init_cfg=init_cfg) + inter_channels = out_channels // coff_embedding + self.inter_c = inter_channels + self.out_c = out_channels + self.in_c = in_channels + self.num_subset = A.shape[0] + self.adaptive = adaptive + self.attention = attention + + num_joints = A.shape[-1] + + self.conv_d = ModuleList() + for i in range(self.num_subset): + self.conv_d.append(nn.Conv2d(in_channels, out_channels, 1)) + + if self.adaptive: + self.A = nn.Parameter(A) + + self.alpha = nn.Parameter(torch.zeros(1)) + self.conv_a = ModuleList() + self.conv_b = ModuleList() + for i in range(self.num_subset): + self.conv_a.append(nn.Conv2d(in_channels, inter_channels, 1)) + self.conv_b.append(nn.Conv2d(in_channels, inter_channels, 1)) + else: + self.register_buffer('A', A) + + if self.attention: + self.conv_ta = nn.Conv1d(out_channels, 1, 9, padding=4) + # s attention + ker_joint = num_joints if num_joints % 2 else num_joints - 1 + pad = (ker_joint - 1) // 2 + self.conv_sa = nn.Conv1d(out_channels, 1, ker_joint, padding=pad) + # channel attention + rr = 2 + self.fc1c = nn.Linear(out_channels, out_channels // rr) + self.fc2c = nn.Linear(out_channels // rr, out_channels) + + self.down = lambda x: x + if in_channels != out_channels: + self.down = nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1), + nn.BatchNorm2d(out_channels)) + + self.bn = nn.BatchNorm2d(out_channels) + self.tan = nn.Tanh() + self.sigmoid = nn.Sigmoid() + self.relu = nn.ReLU(inplace=True) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Defines the computation performed at every call.""" + N, C, T, V = x.size() + + y = None + if self.adaptive: + for i in range(self.num_subset): + A1 = self.conv_a[i](x).permute(0, 3, 1, 2).contiguous().view( + N, V, self.inter_c * T) + A2 = self.conv_b[i](x).view(N, self.inter_c * T, V) + A1 = self.tan(torch.matmul(A1, A2) / A1.size(-1)) # N V V + A1 = self.A[i] + A1 * self.alpha + A2 = x.view(N, C * T, V) + z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V)) + y = z + y if y is not None else z + else: + for i in range(self.num_subset): + A1 = self.A[i] + A2 = x.view(N, C * T, V) + z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V)) + y = z + y if y is not None else z + + y = self.relu(self.bn(y) + self.down(x)) + + if self.attention: + # spatial attention first + se = y.mean(-2) # N C V + se1 = self.sigmoid(self.conv_sa(se)) # N 1 V + y = y * se1.unsqueeze(-2) + y + # then temporal attention + se = y.mean(-1) # N C T + se1 = self.sigmoid(self.conv_ta(se)) # N 1 T + y = y * se1.unsqueeze(-1) + y + # then spatial temporal attention ?? + se = y.mean(-1).mean(-1) # N C + se1 = self.relu(self.fc1c(se)) + se2 = self.sigmoid(self.fc2c(se1)) # N C + y = y * se2.unsqueeze(-1).unsqueeze(-1) + y + # A little bit weird + return y + + class unit_tcn(BaseModule): """The basic unit of temporal convolutional network. diff --git a/tests/models/backbones/test_aagcn.py b/tests/models/backbones/test_aagcn.py new file mode 100644 index 0000000000..a9b998f683 --- /dev/null +++ b/tests/models/backbones/test_aagcn.py @@ -0,0 +1,46 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmaction.models import AAGCN +from mmaction.utils import register_all_modules + + +def test_aagcn_backbone(): + """Test AAGCN backbone.""" + + register_all_modules() + + mode = 'spatial' + batch_size, num_person, num_frames = 2, 2, 150 + + # openpose-18 layout + num_joints = 18 + model = AAGCN(graph_cfg=dict(layout='openpose', mode=mode)) + model.init_weights() + inputs = torch.randn(batch_size, num_person, num_frames, num_joints, 3) + output = model(inputs) + assert output.shape == torch.Size([2, 2, 256, 38, 18]) + + # nturgb+d layout + num_joints = 25 + model = AAGCN(graph_cfg=dict(layout='nturgb+d', mode=mode)) + model.init_weights() + inputs = torch.randn(batch_size, num_person, num_frames, num_joints, 3) + output = model(inputs) + assert output.shape == torch.Size([2, 2, 256, 38, 25]) + + # coco layout + num_joints = 17 + model = AAGCN(graph_cfg=dict(layout='coco', mode=mode)) + model.init_weights() + inputs = torch.randn(batch_size, num_person, num_frames, num_joints, 3) + output = model(inputs) + assert output.shape == torch.Size([2, 2, 256, 38, 17]) + + # custom settings + # disable the attention module to degenerate AAGCN to AGCN + model = AAGCN( + graph_cfg=dict(layout='coco', mode=mode), gcn_attention=False) + model.init_weights() + output = model(inputs) + assert output.shape == torch.Size([2, 2, 256, 38, 17]) diff --git a/tests/models/recognizers/test_recognizer_gcn.py b/tests/models/recognizers/test_recognizer_gcn.py index dc52de3926..673bc45935 100644 --- a/tests/models/recognizers/test_recognizer_gcn.py +++ b/tests/models/recognizers/test_recognizer_gcn.py @@ -53,3 +53,11 @@ def test_stgcn(): 'stgcn/stgcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py') input_shape = (1, 2, 30, 17, 3) # N M T V C train_test_step(config, input_shape=input_shape) + + +def test_agcn(): + register_all_modules() + config = get_skeletongcn_cfg( + '2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py') + input_shape = (1, 2, 30, 17, 3) # N M T V C + train_test_step(config, input_shape=input_shape) From 35aa99a77c6e7968a38238c635a9c037c7c5f6b1 Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Sun, 25 Dec 2022 22:34:30 -0500 Subject: [PATCH 43/57] [Doc] Update TSN models' README & metafile (#2122) --- configs/recognition/tsn/README.md | 50 ++++------ ...-32x4d_8xb32-1x1x3-100e_kinetics400-rgb.py | 4 +- configs/recognition/tsn/metafile.yml | 99 ++++++++++--------- ...d-r101_8xb32-1x1x8-100e_kinetics400-rgb.py | 2 +- ...etrained-r50_8xb32-1x1x16-50e_sthv2-rgb.py | 12 ++- ...ed-r50_8xb32-1x1x3-100e_kinetics400-rgb.py | 14 ++- ...ed-r50_8xb32-1x1x5-100e_kinetics400-rgb.py | 6 +- ...ed-r50_8xb32-1x1x8-100e_kinetics400-rgb.py | 6 +- ...retrained-r50_8xb32-1x1x8-50e_sthv2-rgb.py | 14 ++- ..._8xb32-dense-1x1x5-100e_kinetics400-rgb.py | 8 +- 10 files changed, 116 insertions(+), 99 deletions(-) diff --git a/configs/recognition/tsn/README.md b/configs/recognition/tsn/README.md index d7fdc81276..d34d1ab433 100644 --- a/configs/recognition/tsn/README.md +++ b/configs/recognition/tsn/README.md @@ -20,20 +20,20 @@ Deep convolutional networks have achieved great success for visual recognition i ### Kinetics-400 -| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | config | ckpt | log | -| :---------------------: | :-------: | :------------: | :--: | :-------: | :------: | :------: | :------: | :----------------: | :--------------------------------: | :-------------------------------: | :------------------------------: | -| 1x1x3 | MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 72.77 | 90.66 | 25 clips x 10 crop | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20220906-cd10898e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.log) | -| 1x1x5 | MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 73.73 | 91.15 | 25 clips x 10 crop | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb_20220906-65d68713.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb.log) | -| 1x1x8 | MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 74.21 | 91.36 | 25 clips x 10 crop | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb_20220906-2692d16c.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.log) | -| dense-1x1x5 | MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 71.37 | 89.66 | 25 clips x 10 crop | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb_20220906-dcbc6e01.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb.log) | -| 1x1x8 | MultiStep | short-side 320 | 8 | ResNet101 | ImageNet | 75.91 | 92.21 | 25 clips x 10 crop | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb_20220906-c0d7d41e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb.log) | +| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :-------: | :--------: | :--: | :-------: | :------: | :------: | :------: | :----------------: | :----: | :----: | :------------------------------: | -----------------------------: | ----------------------------: | +| 1x1x3 | MultiStep | 224x224 | 8 | ResNet50 | ImageNet | 72.83 | 90.65 | 25 clips x 10 crop | 102.7G | 24.33M | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20220906-cd10898e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.log) | +| 1x1x5 | MultiStep | 224x224 | 8 | ResNet50 | ImageNet | 73.80 | 91.21 | 25 clips x 10 crop | 102.7G | 24.33M | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb_20220906-65d68713.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb.log) | +| 1x1x8 | MultiStep | 224x224 | 8 | ResNet50 | ImageNet | 74.12 | 91.34 | 25 clips x 10 crop | 102.7G | 24.33M | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb_20220906-2692d16c.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.log) | +| dense-1x1x5 | MultiStep | 224x224 | 8 | ResNet50 | ImageNet | 71.37 | 89.67 | 25 clips x 10 crop | 102.7G | 24.33M | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb_20220906-dcbc6e01.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb.log) | +| 1x1x8 | MultiStep | 224x224 | 8 | ResNet101 | ImageNet | 75.89 | 92.07 | 25 clips x 10 crop | 195.8G | 43.32M | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb_20220906-23cff032.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb.log) | ### Something-Something V2 -| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | config | ckpt | log | -| :---------------------: | :-------: | :--------: | :--: | :------: | :------: | :------: | :------: | :----------------: | :----------------------------------: | :--------------------------------: | :--------------------------------: | -| 1x1x8 | MultiStep | height 256 | 8 | ResNet50 | ImageNet | 32.55 | 63.27 | 25 clips x 10 crop | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb_20221122-ad2dbb37.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.log) | -| 1x1x16 | MultiStep | height 256 | 8 | ResNet50 | ImageNet | 35.22 | 66.13 | 25 clips x 10 crop | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb_20221122-ee13c8e2.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb.log) | +| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :-------: | :--------: | :--: | :------: | :------: | :------: | :------: | :----------------: | :----: | :----: | :------------------------------: | -----------------------------: | -----------------------------: | +| 1x1x8 | MultiStep | 224x224 | 8 | ResNet50 | ImageNet | 34.85 | 66.37 | 25 clips x 10 crop | 102.7G | 24.33M | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb_20221122-ad2dbb37.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.log) | +| 1x1x16 | MultiStep | 224x224 | 8 | ResNet50 | ImageNet | 36.55 | 68.00 | 25 clips x 10 crop | 102.7G | 24.33M | [config](/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb_20221122-ee13c8e2.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb.log) | ### Using backbones from 3rd-party in TSN @@ -43,30 +43,20 @@ It's possible and convenient to use a 3rd-party backbone for TSN under the frame - [x] Backbones from [TorchVision](https://github.com/pytorch/vision/) - [x] Backbones from [TIMM (pytorch-image-models)](https://github.com/rwightman/pytorch-image-models) -| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | config | ckpt | log | -| :---------------------: | :-------: | :------------: | :--: | :--------------: | :------: | :------: | :------: | :----------------: | :------------------------------: | :----------------------------: | :----------------------------: | -| 1x1x3 | MultiStep | short-side 320 | 8 | ResNext101 | ImageNet | 72.79 | 90.40 | 25 clips x 10 crop | [config](/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb_20220906-23cff032.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb.log) | -| 1x1x3 | MultiStep | short-side 320 | 8 | DenseNet161 | ImageNet | 71.83 | 90.02 | 25 clips x 10 crop | [config](/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb_20220906-5f4c0daf.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb.log) | -| 1x1x3 | MultiStep | short-side 320 | 8 | Swin Transformer | ImageNet | 76.90 | 92.55 | 25 clips x 10 crop | [config](/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb_20220906-65ed814e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb.log) | +| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :-------: | :--------: | :--: | :--------------: | :------: | :------: | :------: | :----------------: | :----: | :----: | :---------------------------: | ---------------------------: | --------------------------: | +| 1x1x3 | MultiStep | 224x224 | 8 | ResNext101 | ImageNet | 72.95 | 90.36 | 25 clips x 10 crop | 200.3G | 42.95M | [config](/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb_20221209-de2d5615.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb.log) | +| 1x1x3 | MultiStep | 224x224 | 8 | DenseNet161 | ImageNet | 72.07 | 90.15 | 25 clips x 10 crop | 194.6G | 27.36M | [config](/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb_20220906-5f4c0daf.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb.log) | +| 1x1x3 | MultiStep | 224x224 | 8 | Swin Transformer | ImageNet | 77.03 | 92.61 | 25 clips x 10 crop | 386.7G | 87.15M | [config](/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb_20220906-65ed814e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb.log) | 1. Note that some backbones in TIMM are not supported due to multiple reasons. Please refer to to [PR #880](https://github.com/open-mmlab/mmaction2/pull/880) for details. - -2. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. - According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, - e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu. - +2. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size and the original batch size. 3. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available. For more details on data preparation, you can refer to -- [preparing_ucf101](/tools/data/ucf101/README.md) -- [preparing_kinetics](/tools/data/kinetics/README.md) -- [preparing_sthv1](/tools/data/sthv1/README.md) -- [preparing_sthv2](/tools/data/sthv2/README.md) -- [preparing_mit](/tools/data/mit/README.md) -- [preparing_mmit](/tools/data/mmit/README.md) -- [preparing_hvu](/tools/data/hvu/README.md) -- [preparing_hmdb51](/tools/data/hmdb51/README.md) +- [Kinetics](/tools/data/kinetics/README.md) +- [Something-something V2](/tools/data/sthv2/README.md) ## Train @@ -80,7 +70,7 @@ Example: train TSN model on Kinetics-400 dataset in a deterministic option. ```shell python tools/train.py configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py \ - --cfg-options randomness.seed=0 randomness.deterministic=True + --seed=0 --deterministic ``` For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). diff --git a/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb.py b/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb.py index 318d50e635..ff0f13c3d2 100644 --- a/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb.py +++ b/configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb.py @@ -1,7 +1,7 @@ _base_ = ['../tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py'] -checkpoint = 'https://download.openmmlab.com/mmclassification/v0/' \ - 'resnext/resnext50_32x4d_b32x8_imagenet_20210429-56066e27.pth' +checkpoint = ('https://download.openmmlab.com/mmclassification/v0/resnext/' + 'resnext101_32x4d_b32x8_imagenet_20210506-e0fa3dd5.pth') model = dict( backbone=dict( diff --git a/configs/recognition/tsn/metafile.yml b/configs/recognition/tsn/metafile.yml index 5d43dce81a..b4734c93a2 100644 --- a/configs/recognition/tsn/metafile.yml +++ b/configs/recognition/tsn/metafile.yml @@ -13,10 +13,10 @@ Models: Architecture: ResNet50 Batch Size: 32 Epochs: 100 - FLOPs: 102997721600 - Parameters: 24327632 + FLOPs: 102.7G + Parameters: 24.33M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -24,8 +24,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 72.77 - Top 5 Accuracy: 90.66 + Top 1 Accuracy: 72.83 + Top 5 Accuracy: 90.65 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb_20220906-cd10898e.pth @@ -36,10 +36,10 @@ Models: Architecture: ResNet50 Batch Size: 32 Epochs: 100 - FLOPs: 32959827968 - Parameters: 24327632 + FLOPs: 102.7G + Parameters: 24.33M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -47,8 +47,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 73.73 - Top 5 Accuracy: 91.15 + Top 1 Accuracy: 73.80 + Top 5 Accuracy: 91.21 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb_20220906-65d68713.pth @@ -59,10 +59,10 @@ Models: Architecture: ResNet50 Batch Size: 32 Epochs: 100 - FLOPs: 43048605696 - Parameters: 24327632 + FLOPs: 102.7G + Parameters: 24.33M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -70,8 +70,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 74.21 - Top 5 Accuracy: 91.36 + Top 1 Accuracy: 74.12 + Top 5 Accuracy: 91.34 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb_20220906-2692d16c.pth @@ -82,10 +82,10 @@ Models: Architecture: ResNet50 Batch Size: 32 Epochs: 100 - FLOPs: 32959827968 - Parameters: 24327632 + FLOPs: 102.7G + Parameters: 24.33M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -94,7 +94,7 @@ Models: Task: Action Recognition Metrics: Top 1 Accuracy: 71.37 - Top 5 Accuracy: 89.66 + Top 5 Accuracy: 89.67 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb_20220906-dcbc6e01.pth @@ -105,10 +105,10 @@ Models: Architecture: ResNet101 Batch Size: 32 Epochs: 100 - FLOPs: 43048605696 - Parameters: 24327632 + FLOPs: 195.8G + Parameters: 43.32M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -116,10 +116,10 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 75.91 - Top 5 Accuracy: 92.21 + Top 1 Accuracy: 75.89 + Top 5 Accuracy: 92.07 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb.log - Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb_20220906-c0d7d41e.pth + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb_20220906-23cff032.pth - Name: tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb Config: configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb.py @@ -128,10 +128,10 @@ Models: Architecture: ResNext101 Batch Size: 32 Epochs: 100 - FLOPs: 262238208000 - Parameters: 42948304 + FLOPs: 200.3G + Parameters: 42.95M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -139,11 +139,10 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 72.79 - Top 5 Accuracy: 90.40 + Top 1 Accuracy: 72.95 + Top 5 Accuracy: 90.36 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb.log - Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb_20220906-23cff032.pth - + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-rn101-32x4d_8xb32-1x1x3-100e_kinetics400-rgb_20221209-de2d5615.pth - Name: tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb Config: configs/recognition/tsn/custom_backbones/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb.py @@ -152,10 +151,10 @@ Models: Architecture: DenseNet161 Batch Size: 32 Epochs: 100 - FLOPs: 255225561600 - Parameters: 27355600 + FLOPs: 194.6G + Parameters: 27.36M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -163,8 +162,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 71.83 - Top 5 Accuracy: 90.02 + Top 1 Accuracy: 72.07 + Top 5 Accuracy: 90.15 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-dense161_8xb32-1x1x3-100e_kinetics400-rgb_20220906-5f4c0daf.pth @@ -175,10 +174,10 @@ Models: Architecture: ResNet50 Batch Size: 32 Epochs: 100 - FLOPs: 32959827968 - Parameters: 87153224 + FLOPs: 386.7G + Parameters: 87.15M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -186,8 +185,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 76.90 - Top 5 Accuracy: 92.55 + Top 1 Accuracy: 77.03 + Top 5 Accuracy: 92.61 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb/tsn_imagenet-pretrained-swin-transformer_8xb32-1x1x3-100e_kinetics400-rgb_20220906-65ed814e.pth @@ -198,8 +197,10 @@ Models: Architecture: ResNet50 Batch Size: 32 Epochs: 100 + FLOPs: 102.7G + Parameters: 23.87M Pretrained: ImageNet - Resolution: height 256 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -207,8 +208,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 32.55 - Top 5 Accuracy: 63.27 + Top 1 Accuracy: 34.85 + Top 5 Accuracy: 66.37 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb_20221122-ad2dbb37.pth @@ -219,8 +220,10 @@ Models: Architecture: ResNet50 Batch Size: 32 Epochs: 100 + FLOPs: 102.7G + Parameters: 23.87M Pretrained: ImageNet - Resolution: height 256 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -228,7 +231,7 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 35.22 - Top 5 Accuracy: 66.13 + Top 1 Accuracy: 36.55 + Top 5 Accuracy: 68.00 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb_20221122-ee13c8e2.pth diff --git a/configs/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb.py b/configs/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb.py index ac380b5f7b..c5a5bb6e7f 100644 --- a/configs/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb.py +++ b/configs/recognition/tsn/tsn_imagenet-pretrained-r101_8xb32-1x1x8-100e_kinetics400-rgb.py @@ -1,4 +1,4 @@ -_base_ = ['tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb'] +_base_ = ['tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.py'] model = dict( backbone=dict( diff --git a/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb.py b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb.py index 4fd06b2168..5797a6f596 100644 --- a/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb.py +++ b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x16-50e_sthv2-rgb.py @@ -1,7 +1,9 @@ _base_ = ['tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.py'] +file_client_args = dict(io_backend='disk') + train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -18,7 +20,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, @@ -32,12 +34,12 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, frame_interval=1, - num_clips=16, + num_clips=25, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -50,4 +52,4 @@ val_dataloader = dict(dataset=dict(pipeline=val_pipeline)) -test_dataloader = dict(pipeline=test_pipeline, test_mode=True) +test_dataloader = dict(dataset=dict(pipeline=test_pipeline, test_mode=True)) diff --git a/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py index 72b167edc9..3bea4b9ca7 100644 --- a/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py +++ b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x3-100e_kinetics400-rgb.py @@ -10,8 +10,10 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +file_client_args = dict(io_backend='disk') + train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -27,7 +29,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, @@ -41,7 +43,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, @@ -92,3 +94,9 @@ test_evaluator = val_evaluator default_hooks = dict(checkpoint=dict(interval=3, max_keep_ckpts=3)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (32 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=256) diff --git a/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb.py b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb.py index 1b0ea01ee5..75db0cf603 100644 --- a/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb.py +++ b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x5-100e_kinetics400-rgb.py @@ -7,8 +7,10 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +file_client_args = dict(io_backend='disk') + train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=5), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -24,7 +26,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, diff --git a/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.py b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.py index edc605942a..53e062a396 100644 --- a/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.py +++ b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb.py @@ -7,8 +7,10 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +file_client_args = dict(io_backend='disk') + train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -24,7 +26,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, diff --git a/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.py b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.py index 14189db4b3..39113ba5b3 100644 --- a/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.py +++ b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-50e_sthv2-rgb.py @@ -12,8 +12,10 @@ ann_file_train = 'data/sthv2/sthv2_train_list_videos.txt' ann_file_val = 'data/sthv2/sthv2_val_list_videos.txt' +file_client_args = dict(io_backend='disk') + train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -29,7 +31,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, @@ -43,7 +45,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, @@ -97,3 +99,9 @@ train_cfg = dict( type='EpochBasedTrainLoop', max_epochs=50, val_begin=1, val_interval=5) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (32 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=256) diff --git a/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb.py b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb.py index 38bf62747d..67e3d8db5f 100644 --- a/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb.py +++ b/configs/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-dense-1x1x5-100e_kinetics400-rgb.py @@ -10,8 +10,10 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +file_client_args = dict(io_backend='disk') + train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=5), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -28,7 +30,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='DenseSampleFrames', clip_len=1, @@ -42,7 +44,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='DenseSampleFrames', clip_len=1, From 91f8cdc47f78e33024a23ac23247808ee5a98e98 Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Sun, 25 Dec 2022 22:35:08 -0500 Subject: [PATCH 44/57] [Doc] Update TimeSformer models' README & metafile (#2124) --- configs/recognition/timesformer/README.md | 20 +++++++--------- configs/recognition/timesformer/metafile.yml | 24 ++++++++++++------- ...aceOnly_8xb8-8x32x1-15e_kinetics400-rgb.py | 14 ++++++++--- 3 files changed, 35 insertions(+), 23 deletions(-) diff --git a/configs/recognition/timesformer/README.md b/configs/recognition/timesformer/README.md index 8b3fdf2c30..df197e0ba9 100644 --- a/configs/recognition/timesformer/README.md +++ b/configs/recognition/timesformer/README.md @@ -20,19 +20,17 @@ We present a convolution-free approach to video classification built exclusively ### Kinetics-400 -| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | inference_time(video/s) | gpu_mem(M) | config | ckpt | log | -| :---------------------: | :------------: | :--: | :---------------------: | :----------: | :------: | :------: | :---------------------: | :--------: | :------------------------: | :-----------------------: | :----------------------: | -| 8x32x1 | short-side 320 | 8 | TimeSformer (divST) | ImageNet-21K | 77.96 | 93.57 | x | 15235 | [config](/configs/recognition/timesformer/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb_20220815-a4d0d01f.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb.log) | -| 8x32x1 | short-side 320 | 8 | TimeSformer (jointST) | ImageNet-21K | 76.93 | 93.27 | x | 33358 | [config](/configs/recognition/timesformer/timesformer_jointST_8xb8-8x32x1-15e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_jointST_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_jointST_8xb8-8x32x1-15e_kinetics400-rgb_20220815-8022d1c0.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_jointST_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_jointST_8xb8-8x32x1-15e_kinetics400-rgb.log) | -| 8x32x1 | short-side 320 | 8 | TimeSformer (spaceOnly) | ImageNet-21K | 76.98 | 92.83 | x | 12355 | [config](/configs/recognition/timesformer/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb_20220815-78f05367.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb.log) | - -1. The **gpus** indicates the number of gpu (80G A100) we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. - According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, - e.g., lr=0.005 for 8 GPUs x 8 videos/gpu and lr=0.00375 for 8 GPUs x 6 videos/gpu. +| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :--------: | :--: | :---------------------: | :----------: | :------: | :------: | :--------------: | :---: | :----: | :----------------------------: | :--------------------------: | :-------------------------: | +| 8x32x1 | 224x224 | 8 | TimeSformer (divST) | ImageNet-21K | 77.69 | 93.45 | 1 clip x 3 crop | 196G | 122M | [config](/configs/recognition/timesformer/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb_20220815-a4d0d01f.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb.log) | +| 8x32x1 | 224x224 | 8 | TimeSformer (jointST) | ImageNet-21K | 76.95 | 93.28 | 1 clip x 3 crop | 180G | 86.11M | [config](/configs/recognition/timesformer/timesformer_jointST_8xb8-8x32x1-15e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_jointST_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_jointST_8xb8-8x32x1-15e_kinetics400-rgb_20220815-8022d1c0.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_jointST_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_jointST_8xb8-8x32x1-15e_kinetics400-rgb.log) | +| 8x32x1 | 224x224 | 8 | TimeSformer (spaceOnly) | ImageNet-21K | 76.93 | 92.88 | 1 clip x 3 crop | 141G | 86.11M | [config](/configs/recognition/timesformer/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb_20220815-78f05367.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb.log) | + +1. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size and the original batch size. 2. We keep the test setting with the [original repo](https://github.com/facebookresearch/TimeSformer) (three crop x 1 clip). 3. The pretrained model `vit_base_patch16_224.pth` used by TimeSformer was converted from [vision_transformer](https://github.com/google-research/vision_transformer). -For more details on data preparation, you can refer to the **Prepare videos** part in the [Data Preparation Tutorial](/docs/en/user_guides/2_data_prepare.md). +For more details on data preparation, you can refer to [Kinetics400](/tools/data/kinetics/README.md). ## Train @@ -46,7 +44,7 @@ Example: train TimeSformer model on Kinetics-400 dataset in a deterministic opti ```shell python tools/train.py configs/recognition/timesformer/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb.py \ - --cfg-options randomness.seed=0 randomness.deterministic=True + --seed=0 --deterministic ``` For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). diff --git a/configs/recognition/timesformer/metafile.yml b/configs/recognition/timesformer/metafile.yml index 7f7edd40eb..f144b647e3 100644 --- a/configs/recognition/timesformer/metafile.yml +++ b/configs/recognition/timesformer/metafile.yml @@ -14,7 +14,9 @@ Models: Batch Size: 8 Epochs: 15 Pretrained: ImageNet-21K - Resolution: short-side 320 + Resolution: 224x224 + FLOPs: 196G + params: 122M Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -22,8 +24,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 77.96 - Top 5 Accuracy: 93.57 + Top 1 Accuracy: 77.69 + Top 5 Accuracy: 93.45 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_divST_8xb8-8x32x1-15e_kinetics400-rgb_20220815-a4d0d01f.pth @@ -35,7 +37,9 @@ Models: Batch Size: 8 Epochs: 15 Pretrained: ImageNet-21K - Resolution: short-side 320 + Resolution: 224x224 + FLOPs: 180G + params: 86.11M Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -43,8 +47,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 76.93 - Top 5 Accuracy: 93.27 + Top 1 Accuracy: 76.95 + Top 5 Accuracy: 93.28 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_jointST_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_jointST_8xb8-8x32x1-15e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_jointST_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_jointST_8xb8-8x32x1-15e_kinetics400-rgb_20220815-8022d1c0.pth @@ -56,7 +60,9 @@ Models: Batch Size: 8 Epochs: 15 Pretrained: ImageNet-21K - Resolution: short-side 320 + Resolution: 224x224 + FLOPs: 141G + params: 86.11M Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -64,7 +70,7 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 76.98 - Top 5 Accuracy: 92.83 + Top 1 Accuracy: 76.93 + Top 5 Accuracy: 92.88 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/timesformer/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb_20220815-78f05367.pth diff --git a/configs/recognition/timesformer/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb.py b/configs/recognition/timesformer/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb.py index b969a33d0e..e4379bee0c 100644 --- a/configs/recognition/timesformer/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb.py +++ b/configs/recognition/timesformer/timesformer_spaceOnly_8xb8-8x32x1-15e_kinetics400-rgb.py @@ -35,8 +35,10 @@ ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' +file_client_args = dict(io_backend='disk') + train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=8, frame_interval=32, num_clips=1), dict(type='DecordDecode'), dict(type='RandomRescale', scale_range=(256, 320)), @@ -46,7 +48,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=8, @@ -60,7 +62,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=8, @@ -136,3 +138,9 @@ ] default_hooks = dict(checkpoint=dict(interval=5)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (8 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=64) From a0fbbbfcdc9113c7036b8abd3b97ff6f473ef740 Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Mon, 26 Dec 2022 11:35:52 +0800 Subject: [PATCH 45/57] fix mvit readme (#2125) --- configs/recognition/mvit/README.md | 25 ++++++++++++++----------- configs/recognition/mvit/metafile.yml | 16 ++++++++-------- 2 files changed, 22 insertions(+), 19 deletions(-) diff --git a/configs/recognition/mvit/README.md b/configs/recognition/mvit/README.md index ccd9611c2d..d040cb1de4 100644 --- a/configs/recognition/mvit/README.md +++ b/configs/recognition/mvit/README.md @@ -27,26 +27,29 @@ well as 86.1% on Kinetics-400 video classification. ### Kinetics-400 -| frame sampling strategy | resolution | backbone | pretrain | top1 acc | top5 acc | reference top1 acc | reference top1 acc | testing protocol | FLOPs | params | config | ckpt | -| :---------------------: | :------------: | :--------: | :----------: | :------: | :------: | :-----------------------------: | :-----------------------------: | :--------------: | :---: | :----: | :-----------------: | :---------------: | -| 16x4x1 | short-side 320 | MViTv2-S\* | From scratch | 81.1 | 94.7 | [81.0](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [94.6](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 5 clips x 1 crop | 64G | 34.5M | [config](/configs/recognition/mvit/mvit-small-p244_16x4x1_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-small-p244_16x4x1_kinetics400-rgb_20221021-9ebaaeed.pth) | -| 32x3x1 | short-side 320 | MViTv2-B\* | From scratch | 82.6 | 95.8 | [82.9](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [95.7](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 5 clips x 1 crop | 225G | 51.2M | [config](/configs/recognition/mvit/mvit-base-p244_32x3x1_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-base-p244_32x3x1_kinetics400-rgb_20221021-f392cd2d.pth) | -| 40x3x1 | short-side 320 | MViTv2-L\* | From scratch | 85.4 | 96.2 | [86.1](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [97.0](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 5 clips x 3 crop | 2828G | 213M | [config](/configs/recognition/mvit/mvit-large-p244_40x3x1_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-large-p244_40x3x1_kinetics400-rgb_20221021-11fe1f97.pth) | +| frame sampling strategy | resolution | backbone | pretrain | top1 acc | top5 acc | reference top1 acc | reference top1 acc | testing protocol | FLOPs | params | config | ckpt | +| :---------------------: | :--------: | :--------: | :----------: | :------: | :------: | :------------------------------: | :------------------------------: | :--------------: | :---: | :----: | :------------------: | :----------------: | +| 16x4x1 | 224x224 | MViTv2-S\* | From scratch | 81.1 | 94.7 | [81.0](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [94.6](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 5 clips x 1 crop | 64G | 34.5M | [config](/configs/recognition/mvit/mvit-small-p244_16x4x1_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-small-p244_16x4x1_kinetics400-rgb_20221021-9ebaaeed.pth) | +| 32x3x1 | 224x224 | MViTv2-B\* | From scratch | 82.6 | 95.8 | [82.9](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [95.7](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 5 clips x 1 crop | 225G | 51.2M | [config](/configs/recognition/mvit/mvit-base-p244_32x3x1_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-base-p244_32x3x1_kinetics400-rgb_20221021-f392cd2d.pth) | +| 40x3x1 | 312x312 | MViTv2-L\* | From scratch | 85.4 | 96.2 | [86.1](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [97.0](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 5 clips x 3 crop | 2828G | 213M | [config](/configs/recognition/mvit/mvit-large-p244_40x3x1_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-large-p244_40x3x1_kinetics400-rgb_20221021-11fe1f97.pth) | ### Something-Something V2 -| frame sampling strategy | resolution | backbone | pretrain | top1 acc | top5 acc | reference top1 acc | reference top1 acc | testing protocol | FLOPs | params | config | ckpt | -| :---------------------: | :------------: | :--------: | :----------: | :------: | :------: | :----------------------------: | :-----------------------------: | :---------------: | :---: | :----: | :-----------------: | :---------------: | -| uniform 16 | short-side 320 | MViTv2-S\* | K400 | 68.1 | 91.0 | [68.2](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [91.4](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 1 clips x 3 crops | 64G | 34.4M | [config](/configs/recognition/mvit/mvit-small-p244_u16_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-small-p244_u16_sthv2-rgb_20221021-65ecae7d.pth) | -| uniform 32 | short-side 320 | MViTv2-B\* | K400 | 70.8 | 92.7 | [70.5](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [92.7](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 1 clips x 3 crops | 225G | 51.1M | [config](/configs/recognition/mvit/mvit-base-p244_u32_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-base-p244_u32_sthv2-rgb_20221021-d5de5da6.pth) | -| uniform 40 | short-side 320 | MViTv2-L\* | IN21K + K400 | 73.2 | 94.0 | [73.3](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [94.0](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 1 clips x 3 crops | 2828G | 213M | [config](/configs/recognition/mvit/mvit-large-p244_u40_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-large-p244_u40_sthv2-rgb_20221021-61696e07.pth) | +| frame sampling strategy | resolution | backbone | pretrain | top1 acc | top5 acc | reference top1 acc | reference top1 acc | testing protocol | FLOPs | params | config | ckpt | +| :---------------------: | :--------: | :--------: | :----------: | :------: | :------: | :------------------------------: | :------------------------------: | :--------------: | :---: | :----: | :------------------: | :----------------: | +| uniform 16 | 224x224 | MViTv2-S\* | K400 | 68.1 | 91.0 | [68.2](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [91.4](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 1 clips x 3 crop | 64G | 34.4M | [config](/configs/recognition/mvit/mvit-small-p244_u16_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-small-p244_u16_sthv2-rgb_20221021-65ecae7d.pth) | +| uniform 32 | 224x224 | MViTv2-B\* | K400 | 70.8 | 92.7 | [70.5](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [92.7](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 1 clips x 3 crop | 225G | 51.1M | [config](/configs/recognition/mvit/mvit-base-p244_u32_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-base-p244_u32_sthv2-rgb_20221021-d5de5da6.pth) | +| uniform 40 | 312x312 | MViTv2-L\* | IN21K + K400 | 73.2 | 94.0 | [73.3](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | [94.0](https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md) | 1 clips x 3 crop | 2828G | 213M | [config](/configs/recognition/mvit/mvit-large-p244_u40_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/mvit/converted/mvit-large-p244_u40_sthv2-rgb_20221021-61696e07.pth) | *Models with * are ported from the repo [SlowFast](https://github.com/facebookresearch/SlowFast/) and tested on our data. Currently, we only support the testing of MViT models, training will be available soon.* 1. The values in columns named after "reference" are copied from paper 2. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available. -For more details on data preparation, you can refer to [Kinetics400](/tools/data/kinetics/README.md). +For more details on data preparation, you can refer to + +- [Kinetics](/tools/data/kinetics/README.md) +- [Something-something V2](/tools/data/sthv2/README.md) ## Test diff --git a/configs/recognition/mvit/metafile.yml b/configs/recognition/mvit/metafile.yml index c5d7107482..888fa24732 100644 --- a/configs/recognition/mvit/metafile.yml +++ b/configs/recognition/mvit/metafile.yml @@ -11,7 +11,7 @@ Models: In Collection: MViT Metadata: Architecture: MViT-small - Resolution: short-side 320 + Resolution: 224x224 Modality: RGB Converted From: Weights: https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md @@ -29,7 +29,7 @@ Models: In Collection: MViT Metadata: Architecture: MViT-base - Resolution: short-side 320 + Resolution: 224x224 Modality: RGB Converted From: Weights: https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md @@ -47,7 +47,7 @@ Models: In Collection: MViT Metadata: Architecture: MViT-large - Resolution: short-side 446 + Resolution: 312x312 Modality: RGB Converted From: Weights: https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md @@ -65,7 +65,7 @@ Models: In Collection: MViT Metadata: Architecture: MViT-small - Resolution: short-side 320 + Resolution: 224x224 Modality: RGB Converted From: Weights: https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md @@ -82,8 +82,8 @@ Models: Config: configs/recognition/mvit/mvit-base-p244_u32_sthv2-rgb.py In Collection: MViT Metadata: - Architecture: MViT-small - Resolution: short-side 320 + Architecture: MViT-base + Resolution: 224x224 Modality: RGB Converted From: Weights: https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md @@ -100,8 +100,8 @@ Models: Config: configs/recognition/mvit/mvit-large-p244_u40_sthv2-rgb.py In Collection: MViT Metadata: - Architecture: MViT-small - Resolution: short-side 446 + Architecture: MViT-large + Resolution: 312x312 Modality: RGB Converted From: Weights: https://github.com/facebookresearch/SlowFast/blob/main/projects/mvitv2/README.md From 2b7495d910c19f91844f2c390b09cf193649c639 Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Sun, 25 Dec 2022 22:37:10 -0500 Subject: [PATCH 46/57] [Doc] Update SlowOnly models' README & metafile (#2126) Co-authored-by: wxDai --- configs/recognition/slowonly/README.md | 37 ++++++++----------- configs/recognition/slowonly/metafile.yml | 36 +++++++++--------- ...xb16-4x16x1-steplr-150e_kinetics700-rgb.py | 20 ++++++---- ...6xb16-8x8x1-steplr-150e_kinetics700-rgb.py | 8 ++-- ...y_r50_8xb16-4x16x1-256e_kinetics400-rgb.py | 20 ++++++---- ...ly_r50_8xb16-8x8x1-256e_kinetics400-rgb.py | 8 ++-- 6 files changed, 68 insertions(+), 61 deletions(-) diff --git a/configs/recognition/slowonly/README.md b/configs/recognition/slowonly/README.md index 0e0dc178e1..bf5ce3781d 100644 --- a/configs/recognition/slowonly/README.md +++ b/configs/recognition/slowonly/README.md @@ -20,32 +20,27 @@ We present SlowFast networks for video recognition. Our model involves (i) a Slo ### Kinetics-400 -| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | inference time(video/s) | gpu_mem(M) | config | ckpt | log | -| :---------------------: | :--------------: | :------------: | :--: | :------------------: | :------: | :------: | :------: | :---------------: | :---------------------: | :--------: | :----------------: | :--------------: | :-------------: | -| 4x16x1 | Linear+Cosine | short-side 320 | 8 | ResNet50 | None | 72.97 | 90.88 | 10 clips x 3 crop | x | 5799 | [config](/configs/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb/slowonly_r50_4x16x1_256e_8xb16_kinetics400-rgb_20220901-f6a40d08.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb/slowonly_r50_4x16x1_256e_8xb16_kinetics400-rgb.log) | -| 8x8x1 | Linear+Cosine | short-side 320 | 8 | ResNet50 | None | 75.15 | 92.11 | 10 clips x 3 crop | x | 11089 | [config](/configs/recognition/slowonly/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb_20220901-2132fc87.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb.log) | -| 8x8x1 | Linear+Cosine | short-side 320 | 8 | ResNet101 | None | 76.59 | 92.80 | 10 clips x 3 crop | x | 16516 | [config](/configs/recognition/slowonly/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb_20220901-e6281431.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb.log) | -| 4x16x1 | Linear+MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 75.12 | 91.72 | 10 clips x 3 crop | x | 5797 | [config](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb_20220901-e7b65fad.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb.log) | -| 8x8x1 | Linear+MultiStep | short-side 320 | 8 | ResNet50 | ImageNet | 76.45 | 92.55 | 10 clips x 3 crop | x | 11089 | [config](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb_20220901-df42dc84.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb.log) | -| 4x16x1 | Linear+MultiStep | short-side 320 | 8 | ResNet50 (NonLocalEmbedGauss) | ImageNet | 75.07 | 91.69 | 10 clips x 3 crop | x | 8198 | [config](/configs/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb_20220901-cf739c75.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb.log) | -| 8x8x1 | Linear+MultiStep | short-side 320 | 8 | ResNet50 (NonLocalEmbedGauss) | ImageNet | 76.65 | 92.47 | 10 clips x 3 crop | x | 17087 | [config](/configs/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb_20220901-df42dc84.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb.log) | +| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :--------------: | :--------: | :--: | :------------------------: | :------: | :------: | :------: | :---------------: | :----: | :----: | :----------------------: | :--------------------: | :--------------------: | +| 4x16x1 | Linear+Cosine | 224x224 | 8 | ResNet50 | None | 72.97 | 90.88 | 10 clips x 3 crop | 27.38G | 32.45M | [config](/configs/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb/slowonly_r50_4x16x1_256e_8xb16_kinetics400-rgb_20220901-f6a40d08.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb/slowonly_r50_4x16x1_256e_8xb16_kinetics400-rgb.log) | +| 8x8x1 | Linear+Cosine | 224x224 | 8 | ResNet50 | None | 75.15 | 92.11 | 10 clips x 3 crop | 54.75G | 32.45M | [config](/configs/recognition/slowonly/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb_20220901-2132fc87.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb.log) | +| 8x8x1 | Linear+Cosine | 224x224 | 8 | ResNet101 | None | 76.59 | 92.80 | 10 clips x 3 crop | 112G | 60.36M | [config](/configs/recognition/slowonly/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb_20220901-e6281431.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb/slowonly_r101_8xb16-8x8x1-196e_kinetics400-rgb.log) | +| 4x16x1 | Linear+MultiStep | 224x224 | 8 | ResNet50 | ImageNet | 75.12 | 91.72 | 10 clips x 3 crop | 27.38G | 32.45M | [config](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb_20220901-e7b65fad.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb/slowonly_imagenet-pretrained-r50_8xb16-4x16x1-steplr-150e_kinetics400-rgb.log) | +| 8x8x1 | Linear+MultiStep | 224x224 | 8 | ResNet50 | ImageNet | 76.45 | 92.55 | 10 clips x 3 crop | 54.75G | 32.45M | [config](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb_20220901-df42dc84.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb/slowonly_imagenet-pretrained-r50_8xb16-8x8x1-steplr-150e_kinetics400-rgb.log) | +| 4x16x1 | Linear+MultiStep | 224x224 | 8 | ResNet50 (NonLocalEmbedGauss) | ImageNet | 75.07 | 91.69 | 10 clips x 3 crop | 43.23G | 39.81M | [config](/configs/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb_20220901-cf739c75.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-4x16x1-steplr-150e_kinetics400-rgb.log) | +| 8x8x1 | Linear+MultiStep | 224x224 | 8 | ResNet50 (NonLocalEmbedGauss) | ImageNet | 76.65 | 92.47 | 10 clips x 3 crop | 96.66G | 39.81M | [config](/configs/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb_20220901-df42dc84.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb/slowonly_r50-in1k-pre-nl-embedded-gaussian_8xb16-8x8x1-steplr-150e_kinetics400-rgb.log) | ### Kinetics-700 -| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | inference time(video/s) | gpu_mem(M) | config | ckpt | log | -| :---------------------: | :--------------: | :------------: | :--: | :------: | :------: | :------: | :------: | :---------------: | :---------------------: | :--------: | :--------------------: | :------------------: | :-----------------: | -| 4x16x1 | Linear+MultiStep | short-side 320 | 8x2 | ResNet50 | ImageNet | 65.52 | 86.39 | 10 clips x 3 crop | x | 5826 | [config](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb_20221013-98b1b0a7.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb.log) | -| 8x8x1 | Linear+MultiStep | short-side 320 | 8x2 | ResNet50 | ImageNet | 67.67 | 87.80 | 10 clips x 3 crop | x | 11089 | [config](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb_20221013-15b93b10.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb.log) | - -Note: - -1. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. - According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, - e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu. +| frame sampling strategy | scheduler | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :--------------: | :--------: | :--: | :------: | :------: | :------: | :------: | :---------------: | :----: | :----: | :----------------------------: | :--------------------------: | :--------------------------: | +| 4x16x1 | Linear+MultiStep | 224x224 | 8x2 | ResNet50 | ImageNet | 65.52 | 86.39 | 10 clips x 3 crop | 27.38G | 32.45M | [config](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb_20221013-98b1b0a7.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb.log) | +| 8x8x1 | Linear+MultiStep | 224x224 | 8x2 | ResNet50 | ImageNet | 67.67 | 87.80 | 10 clips x 3 crop | 54.75G | 32.45M | [config](/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb_20221013-15b93b10.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb.log) | +1. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size and the original batch size. 2. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available. -For more details on data preparation, you can refer to the **Prepare videos** part in the [Data Preparation Tutorial](/docs/en/user_guides/2_data_prepare.md). +For more details on data preparation, you can refer to [Kinetics400](/tools/data/kinetics/README.md). ## Train @@ -59,7 +54,7 @@ Example: train SlowOnly model on Kinetics-400 dataset in a deterministic option ```shell python tools/train.py configs/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb.py \ - --cfg-options randomness.seed=0 randomness.deterministic=True + --seed=0 --deterministic ``` For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). diff --git a/configs/recognition/slowonly/metafile.yml b/configs/recognition/slowonly/metafile.yml index 0e3f284bff..488e11b1c8 100644 --- a/configs/recognition/slowonly/metafile.yml +++ b/configs/recognition/slowonly/metafile.yml @@ -13,8 +13,8 @@ Models: Architecture: ResNet50 Batch Size: 16 Epochs: 256 - FLOPs: 27430649856 - Parameters: 32454096 + FLOPs: 27.38G + Parameters: 32.45M Pretrained: None Resolution: short-side 320 Training Data: Kinetics-400 @@ -36,8 +36,8 @@ Models: Architecture: ResNet50 Batch Size: 16 Epochs: 256 - FLOPs: 54860480512 - Parameters: 32454096 + FLOPs: 54.75G + Parameters: 32.45M Pretrained: None Resolution: short-side 320 Training Data: Kinetics-400 @@ -60,8 +60,8 @@ Models: Architecture: ResNet101 Batch Size: 16 Epochs: 196 - FLOPs: 112063447040 - Parameters: 60359120 + FLOPs: 112G + Parameters: 60.36M Pretrained: None Resolution: short-side 320 Training Data: Kinetics-400 @@ -83,8 +83,8 @@ Models: Architecture: ResNet50 Batch Size: 16 Epochs: 150 - FLOPs: 27430649856 - Parameters: 32454096 + FLOPs: 27.38G + Parameters: 32.45M Pretrained: ImageNet Resolution: short-side 320 Training Data: Kinetics-400 @@ -106,8 +106,8 @@ Models: Architecture: ResNet50 Batch Size: 16 Epochs: 150 - FLOPs: 54860480512 - Parameters: 32454096 + FLOPs: 54.75G + Parameters: 32.45M Pretrained: ImageNet Resolution: short-side 320 Training Data: Kinetics-400 @@ -130,8 +130,8 @@ Models: Architecture: ResNet50 Batch Size: 16 Epochs: 150 - FLOPs: 38201098240 - Parameters: 39808464 + FLOPs: 43.23G + Parameters: 39.81M Pretrained: ImageNet Resolution: short-side 320 Training Data: Kinetics-400 @@ -153,8 +153,8 @@ Models: Architecture: ResNet50 Batch Size: 16 Epochs: 150 - FLOPs: 76401377280 - Parameters: 39808464 + FLOPs: 96.66G + Parameters: 39.81M Pretrained: ImageNet Resolution: short-side 320 Training Data: Kinetics-400 @@ -176,8 +176,8 @@ Models: Architecture: ResNet50 Batch Size: 16 Epochs: 150 - FLOPs: 27430649856 - Parameters: 32454096 + FLOPs: 27.38G + Parameters: 32.45M Pretrained: ImageNet Resolution: short-side 320 Training Data: Kinetics-400 @@ -199,8 +199,8 @@ Models: Architecture: ResNet50 Batch Size: 16 Epochs: 150 - FLOPs: 54860480512 - Parameters: 32454096 + FLOPs: 54.75G + Parameters: 32.45M Pretrained: ImageNet Resolution: short-side 320 Training Data: Kinetics-400 diff --git a/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb.py b/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb.py index 4dd28786ca..c35a9ec10a 100644 --- a/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb.py +++ b/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-4x16x1-steplr-150e_kinetics700-rgb.py @@ -13,8 +13,10 @@ ann_file_train = 'data/kinetics700/kinetics700_train_list_videos.txt' ann_file_val = 'data/kinetics700/kinetics700_val_list_videos.txt' +file_client_args = dict(io_backend='disk') + train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -26,7 +28,7 @@ ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=4, @@ -41,7 +43,7 @@ ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=4, @@ -106,11 +108,13 @@ milestones=[90, 130], gamma=0.1) ] -""" -The learning rate is for total_batch_size = 16 x 16 (num_gpus x batch_size) -If you want to use other batch size or number of GPU settings, please update -the learning rate with the linear scaling rule. -""" + optim_wrapper = dict( optimizer=dict(type='SGD', lr=0.04, momentum=0.9, weight_decay=0.0001), clip_grad=dict(max_norm=40, norm_type=2)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (16 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=256) diff --git a/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb.py b/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb.py index 8a62fabab6..818294ba2e 100644 --- a/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb.py +++ b/configs/recognition/slowonly/slowonly_imagenet-pretrained-r50_16xb16-8x8x1-steplr-150e_kinetics700-rgb.py @@ -8,8 +8,10 @@ ann_file_train = 'data/kinetics700/kinetics700_train_list_videos.txt' ann_file_val = 'data/kinetics700/kinetics700_val_list_videos.txt' +file_client_args = dict(io_backend='disk') + train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -21,7 +23,7 @@ ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=8, @@ -36,7 +38,7 @@ ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=8, diff --git a/configs/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb.py b/configs/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb.py index 24d6d8f994..591df1494c 100644 --- a/configs/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb.py +++ b/configs/recognition/slowonly/slowonly_r50_8xb16-4x16x1-256e_kinetics400-rgb.py @@ -12,8 +12,10 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +file_client_args = dict(io_backend='disk') + train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -25,7 +27,7 @@ ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=4, @@ -40,7 +42,7 @@ ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=4, @@ -108,14 +110,16 @@ begin=34, end=256) ] -""" -The learning rate is for total_batch_size = 8 x 16 (num_gpus x batch_size) -If you want to use other batch size or number of GPU settings, please update -the learning rate with the linear scaling rule. -""" + optim_wrapper = dict( optimizer=dict(type='SGD', lr=0.2, momentum=0.9, weight_decay=1e-4), clip_grad=dict(max_norm=40, norm_type=2)) # runtime settings default_hooks = dict(checkpoint=dict(interval=4, max_keep_ckpts=3)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) diff --git a/configs/recognition/slowonly/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb.py b/configs/recognition/slowonly/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb.py index 66cc6eecde..98e138a268 100644 --- a/configs/recognition/slowonly/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb.py +++ b/configs/recognition/slowonly/slowonly_r50_8xb16-8x8x1-256e_kinetics400-rgb.py @@ -7,8 +7,10 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +file_client_args = dict(io_backend='disk') + train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -20,7 +22,7 @@ ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=8, @@ -35,7 +37,7 @@ ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=8, From 7c61e9ab934128813c3f8efda290fe8d55bac07a Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Sun, 25 Dec 2022 22:38:26 -0500 Subject: [PATCH 47/57] [Doc] Update TRN models' README & metafile (#2129) --- configs/recognition/trn/README.md | 22 +++++++--------- configs/recognition/trn/metafile.yml | 26 ++++++++++--------- ...retrained-r50_8xb16-1x1x8-50e_sthv1-rgb.py | 8 +++--- ...retrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py | 8 +++--- 4 files changed, 34 insertions(+), 30 deletions(-) diff --git a/configs/recognition/trn/README.md b/configs/recognition/trn/README.md index f410a3e9c8..875207dd43 100644 --- a/configs/recognition/trn/README.md +++ b/configs/recognition/trn/README.md @@ -20,24 +20,22 @@ Temporal relational reasoning, the ability to link meaningful transformations of ### Something-Something V1 -| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc (efficient/accurate) | top5 acc (efficient/accurate) | gpu_mem(M) | config | ckpt | log | -| :---------------------: | :--------: | :--: | :------: | :------: | :---------------------------: | :---------------------------: | :--------: | :--------------------------: | :------------------------: | :-----------------------: | -| 1x1x8 | height 100 | 8 | ResNet50 | ImageNet | 31.81 / 33.86 | 60.47 / 62.24 | 11037 | [config](/configs/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb_20220815-e13db2e9.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb.log) | +| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc (efficient/accurate) | top5 acc (efficient/accurate) | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :--------: | :--: | :------: | :------: | :---------------------------: | :---------------------------: | :----------------: | :----: | :----: | :-------------------: | :-----------------: | :-----------------: | +| 1x1x8 | 224x224 | 8 | ResNet50 | ImageNet | 31.60 / 33.65 | 60.15 / 62.22 | 16 clips x 10 crop | 42.94G | 26.64M | [config](/configs/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb_20220815-e13db2e9.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb.log) | ### Something-Something V2 -| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc (efficient/accurate) | top5 acc (efficient/accurate) | gpu_mem(M) | config | ckpt | log | -| :---------------------: | :--------: | :--: | :------: | :------: | :---------------------------: | :---------------------------: | :--------: | :--------------------------: | :------------------------: | :-----------------------: | -| 1x1x8 | height 240 | 8 | ResNet50 | ImageNet | 48.54 / 51.53 | 76.53 / 78.60 | 11037 | [config](/configs/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb_20220815-e01617db.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.log) | +| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc (efficient/accurate) | top5 acc (efficient/accurate) | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :--------: | :--: | :------: | :------: | :---------------------------: | :---------------------------: | :----------------: | :----: | :----: | :-------------------: | :-----------------: | :-----------------: | +| 1x1x8 | 224x224 | 8 | ResNet50 | ImageNet | 47.65 / 51.20 | 76.27 / 78.42 | 16 clips x 10 crop | 42.94G | 26.64M | [config](/configs/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb_20220815-e01617db.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.log) | -1. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. - According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, - e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu. -2. There are two kinds of test settings for Something-Something dataset, efficient setting (center crop x 1 clip) and accurate setting (Three crop x 2 clip). +1. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size and the original batch size. +2. There are two kinds of test settings for Something-Something dataset, efficient setting (center crop only) and accurate setting (three crop and `twice_sample`). 3. In the original [repository](https://github.com/zhoubolei/TRN-pytorch), the author augments data with random flipping on something-something dataset, but the augmentation method may be wrong due to the direct actions, such as `push left to right`. So, we replaced `flip` with `flip with label mapping`, and change the testing method `TenCrop`, which has five flipped crops, to `Twice Sample & ThreeCrop`. 4. We use `ResNet50` instead of `BNInception` as the backbone of TRN. When Training `TRN-ResNet50` on sthv1 dataset in the original repository, we get top1 (top5) accuracy 30.542 (58.627) vs. ours 31.81 (60.47). -For more details on data preparation, you can refer to [sthv1](/tools/data/sthv1/README.md) and [sthv2](/tools/data/sthv2/README.md). +For more details on data preparation, you can refer to [Something-something V1](/tools/data/sthv1/README.md) and [Something-something V2](/tools/data/sthv2/README.md). ## Train @@ -51,7 +49,7 @@ Example: train TRN model on sthv1 dataset in a deterministic option with periodi ```shell python tools/train.py configs/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb.py \ - --cfg-options randomness.seed=0 randomness.deterministic=True + --seed=0 --deterministic ``` For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). diff --git a/configs/recognition/trn/metafile.yml b/configs/recognition/trn/metafile.yml index 2fd90cd428..5d2ad5334c 100644 --- a/configs/recognition/trn/metafile.yml +++ b/configs/recognition/trn/metafile.yml @@ -13,9 +13,10 @@ Models: Architecture: ResNet50 Batch Size: 16 Epochs: 50 - Parameters: 26641154 + FLOPs: 42.94G + params: 26.64M Pretrained: ImageNet - Resolution: height 100 + Resolution: 224x224 Training Data: SthV1 Training Resources: 8 GPUs Modality: RGB @@ -23,10 +24,10 @@ Models: - Dataset: SthV1 Task: Action Recognition Metrics: - Top 1 Accuracy: 33.86 - Top 1 Accuracy (efficient): 31.81 - Top 5 Accuracy: 62.24 - Top 5 Accuracy (efficient): 60.47 + Top 1 Accuracy: 33.65 + Top 1 Accuracy (efficient): 31.60 + Top 5 Accuracy: 62.22 + Top 5 Accuracy (efficient): 60.15 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb_20220815-e13db2e9.pth @@ -37,8 +38,9 @@ Models: Architecture: ResNet50 Batch Size: 16 Epochs: 50 - Parameters: 26641154 - Pretrained: ImageNet + FLOPs: 42.94G + params: 26.64M + Pretrained: 224x224 Resolution: height 240 Training Data: SthV2 Training Resources: 8 GPUs @@ -47,9 +49,9 @@ Models: - Dataset: SthV2 Task: Action Recognition Metrics: - Top 1 Accuracy: 51.53 - Top 1 Accuracy (efficient): 48.54 - Top 5 Accuracy: 78.60 - Top 5 Accuracy (efficient): 76.53 + Top 1 Accuracy: 51.20 + Top 1 Accuracy (efficient): 47.65 + Top 5 Accuracy: 78.42 + Top 5 Accuracy (efficient): 76.27 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb_20220815-e01617db.pth diff --git a/configs/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb.py b/configs/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb.py index 766bed0d5a..842ba06256 100644 --- a/configs/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb.py +++ b/configs/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv1-rgb.py @@ -11,10 +11,12 @@ ann_file_val = 'data/sthv1/sthv1_val_list_rawframes.txt' ann_file_test = 'data/sthv1/sthv1_val_list_rawframes.txt' +file_client_args = dict(io_backend='disk') + sthv1_flip_label_map = {2: 4, 4: 2, 30: 41, 41: 30, 52: 66, 66: 52} train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), - dict(type='RawFrameDecode'), + dict(type='RawFrameDecode', **file_client_args), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', @@ -35,7 +37,7 @@ frame_interval=1, num_clips=8, test_mode=True), - dict(type='RawFrameDecode'), + dict(type='RawFrameDecode', **file_client_args), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='FormatShape', input_format='NCHW'), @@ -49,7 +51,7 @@ num_clips=8, twice_sample=True, test_mode=True), - dict(type='RawFrameDecode'), + dict(type='RawFrameDecode', **file_client_args), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='FormatShape', input_format='NCHW'), diff --git a/configs/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py b/configs/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py index 1426d10bbd..40bc1ea953 100644 --- a/configs/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py +++ b/configs/recognition/trn/trn_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py @@ -11,9 +11,11 @@ ann_file_val = 'data/sthv2/sthv2_val_list_videos.txt' ann_file_test = 'data/sthv2/sthv2_val_list_videos.txt' +file_client_args = dict(io_backend='disk') + sthv2_flip_label_map = {86: 87, 87: 86, 93: 94, 94: 93, 166: 167, 167: 166} train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -30,7 +32,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, @@ -44,7 +46,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, From f1c3847ef8bc4eaf7d03c23c21363818fe802fef Mon Sep 17 00:00:00 2001 From: GAO SHIQI <53168648+GhaSiKey@users.noreply.github.com> Date: Fri, 30 Dec 2022 16:13:35 +0800 Subject: [PATCH 48/57] CodeCamp #153 (#2152) --- demo/README.md | 61 ++++++++++++ demo/webcam_demo.py | 226 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 287 insertions(+) create mode 100644 demo/webcam_demo.py diff --git a/demo/README.md b/demo/README.md index 91e4f48936..88e4c96bf8 100644 --- a/demo/README.md +++ b/demo/README.md @@ -5,6 +5,7 @@ - [Modify configs through script arguments](#modify-config-through-script-arguments): Tricks to directly modify configs through script arguments. - [Video demo](#video-demo): A demo script to predict the recognition result using a single video. - [Video GradCAM Demo](#video-gradcam-demo): A demo script to visualize GradCAM results using a single video. +- [Webcam demo](#webcam-demo): A demo script to implement real-time action recognition from a web camera. - [Skeleton-based Action Recognition Demo](#skeleton-based-action-recognition-demo): A demo script to predict the skeleton-based action recognition result using a single video. ## Modify configs through script arguments @@ -120,6 +121,66 @@ or use checkpoint url from `configs/` to directly load corresponding checkpoint, demo/demo.mp4 --target-layer-name backbone/layer4/1/relu --out-filename demo/demo_gradcam_tsn.gif ``` +## Webcam demo + +We provide a demo script to implement real-time action recognition from web camera. In order to get predict results in range `[0, 1]`, make sure to set `model.cls_head.average_clips='prob'` in config file. + +```shell +python demo/webcam_demo.py ${CONFIG_FILE} ${CHECKPOINT_FILE} ${LABEL_FILE} \ + [--device ${DEVICE_TYPE}] [--camera-id ${CAMERA_ID}] [--threshold ${THRESHOLD}] \ + [--average-size ${AVERAGE_SIZE}] [--drawing-fps ${DRAWING_FPS}] [--inference-fps ${INFERENCE_FPS}] +``` + +Optional arguments: + +- `DEVICE_TYPE`: Type of device to run the demo. Allowed values are cuda device like `cuda:0` or `cpu`. If not specified, it will be set to `cuda:0`. +- `CAMERA_ID`: ID of camera device If not specified, it will be set to 0. +- `THRESHOLD`: Threshold of prediction score for action recognition. Only label with score higher than the threshold will be shown. If not specified, it will be set to 0. +- `AVERAGE_SIZE`: Number of latest clips to be averaged for prediction. If not specified, it will be set to 1. +- `DRAWING_FPS`: Upper bound FPS value of the output drawing. If not specified, it will be set to 20. +- `INFERENCE_FPS`: Upper bound FPS value of the output drawing. If not specified, it will be set to 4. + +If your hardware is good enough, increasing the value of `DRAWING_FPS` and `INFERENCE_FPS` will get a better experience. + +Examples: + +Assume that you are located at `$MMACTION2` and have already downloaded the checkpoints to the directory `checkpoints/`, +or use checkpoint url from `configs/` to directly load corresponding checkpoint, which will be automatically saved in `$HOME/.cache/torch/checkpoints`. + +1. Recognize the action from web camera as input by using a TSN model on cpu, averaging the score per 5 times + and outputting result labels with score higher than 0.2. + + ```shell + python demo/webcam_demo.py demo/demo_configs/tsn_r50_1x1x8_video_infer.py \ + checkpoints/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb_20220906-2692d16c.pth tools/data/kinetics/label_map_k400.txt --average-size 5 \ + --threshold 0.2 --device cpu + ``` + +2. Recognize the action from web camera as input by using a TSN model on cpu, averaging the score per 5 times + and outputting result labels with score higher than 0.2, loading checkpoint from url. + + ```shell + python demo/webcam_demo.py demo/demo_configs/tsn_r50_1x1x8_video_infer.py \ + https://download.openmmlab.com/mmaction/v1.0/recognition/tsn/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb/tsn_imagenet-pretrained-r50_8xb32-1x1x8-100e_kinetics400-rgb_20220906-2692d16c.pth \ + tools/data/kinetics/label_map_k400.txt --average-size 5 --threshold 0.2 --device cpu + ``` + +3. Recognize the action from web camera as input by using a I3D model on gpu by default, averaging the score per 5 times + and outputting result labels with score higher than 0.2. + + ```shell + python demo/webcam_demo.py demo/demo_configs/i3d_r50_32x2x1_video_infer.py \ + checkpoints/i3d_imagenet-pretrained-r50_8xb8-32x2x1-100e_kinetics400-rgb_20220812-e213c223.pth tools/data/kinetics/label_map_k400.txt \ + --average-size 5 --threshold 0.2 + ``` + +Considering the efficiency difference for users' hardware, Some modifications might be done to suit the case. +Users can change: + +- `SampleFrames` step (especially the number of `clip_len` and `num_clips`) of `test_pipeline` in the config file, like `--cfg-options test_pipeline.0.num_clips=3`. +- Change to the suitable Crop methods like `TenCrop`, `ThreeCrop`, `CenterCrop`, etc. in `test_pipeline` of the config file, like `--cfg-options test_pipeline.4.type=CenterCrop`. +- Change the number of `--average-size`. The smaller, the faster. + ## Skeleton-based Action Recognition Demo MMAction2 provides an demo script to predict the skeleton-based action recognition result using a single video. diff --git a/demo/webcam_demo.py b/demo/webcam_demo.py new file mode 100644 index 0000000000..7102375b7e --- /dev/null +++ b/demo/webcam_demo.py @@ -0,0 +1,226 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse +import time +from collections import deque +from operator import itemgetter +from threading import Thread + +import cv2 +import numpy as np +import torch +from mmengine import Config, DictAction +from mmengine.dataset import Compose, pseudo_collate + +from mmaction.apis import init_recognizer +from mmaction.utils import register_all_modules + +FONTFACE = cv2.FONT_HERSHEY_COMPLEX_SMALL +FONTSCALE = 1 +FONTCOLOR = (255, 255, 255) # BGR, white +MSGCOLOR = (128, 128, 128) # BGR, gray +THICKNESS = 1 +LINETYPE = 1 +EXCLUED_STEPS = [ + 'OpenCVInit', 'OpenCVDecode', 'DecordInit', 'DecordDecode', 'PyAVInit', + 'PyAVDecode', 'RawFrameDecode' +] + + +def parse_args(): + parser = argparse.ArgumentParser(description='MMAction2 webcam demo') + parser.add_argument('config', help='test config file path') + parser.add_argument('checkpoint', help='checkpoint file/url') + parser.add_argument('label', help='label file') + parser.add_argument( + '--device', type=str, default='cuda:0', help='CPU/CUDA device option') + parser.add_argument( + '--camera-id', type=int, default=0, help='camera device id') + parser.add_argument( + '--threshold', + type=float, + default=0.01, + help='recognition score threshold') + parser.add_argument( + '--average-size', + type=int, + default=1, + help='number of latest clips to be averaged for prediction') + parser.add_argument( + '--drawing-fps', + type=int, + default=20, + help='Set upper bound FPS value of the output drawing') + parser.add_argument( + '--inference-fps', + type=int, + default=4, + help='Set upper bound FPS value of model inference') + parser.add_argument( + '--cfg-options', + nargs='+', + action=DictAction, + default={}, + help='override some settings in the used config, the key-value pair ' + 'in xxx=yyy format will be merged into config file. For example, ' + "'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'") + args = parser.parse_args() + assert args.drawing_fps >= 0 and args.inference_fps >= 0, \ + 'upper bound FPS value of drawing and inference should be set as ' \ + 'positive number, or zero for no limit' + return args + + +def show_results(): + print('Press "Esc", "q" or "Q" to exit') + + text_info = {} + cur_time = time.time() + while True: + msg = 'Waiting for action ...' + _, frame = camera.read() + frame_queue.append(np.array(frame[:, :, ::-1])) + + if len(result_queue) != 0: + text_info = {} + results = result_queue.popleft() + for i, result in enumerate(results): + selected_label, score = result + if score < threshold: + break + location = (0, 40 + i * 20) + text = selected_label + ': ' + str(round(score * 100, 2)) + text_info[location] = text + cv2.putText(frame, text, location, FONTFACE, FONTSCALE, + FONTCOLOR, THICKNESS, LINETYPE) + + elif len(text_info) != 0: + for location, text in text_info.items(): + cv2.putText(frame, text, location, FONTFACE, FONTSCALE, + FONTCOLOR, THICKNESS, LINETYPE) + + else: + cv2.putText(frame, msg, (0, 40), FONTFACE, FONTSCALE, MSGCOLOR, + THICKNESS, LINETYPE) + + cv2.imshow('camera', frame) + ch = cv2.waitKey(1) + + if ch == 27 or ch == ord('q') or ch == ord('Q'): + camera.release() + cv2.destroyAllWindows() + break + + if drawing_fps > 0: + # add a limiter for actual drawing fps <= drawing_fps + sleep_time = 1 / drawing_fps - (time.time() - cur_time) + if sleep_time > 0: + time.sleep(sleep_time) + cur_time = time.time() + + +def inference(): + score_cache = deque() + scores_sum = 0 + cur_time = time.time() + while True: + cur_windows = [] + + while len(cur_windows) == 0: + if len(frame_queue) == sample_length: + cur_windows = list(np.array(frame_queue)) + if data['img_shape'] is None: + data['img_shape'] = frame_queue.popleft().shape[:2] + + cur_data = data.copy() + cur_data['imgs'] = cur_windows + cur_data = test_pipeline(cur_data) + cur_data = pseudo_collate([cur_data]) + + # Forward the model + with torch.no_grad(): + result = model.test_step(cur_data)[0] + scores = result.pred_scores.item.tolist() + scores = np.array(scores) + score_cache.append(scores) + scores_sum += scores + + if len(score_cache) == average_size: + scores_avg = scores_sum / average_size + num_selected_labels = min(len(label), 5) + + score_tuples = tuple(zip(label, scores_avg)) + score_sorted = sorted( + score_tuples, key=itemgetter(1), reverse=True) + results = score_sorted[:num_selected_labels] + + result_queue.append(results) + scores_sum -= score_cache.popleft() + + if inference_fps > 0: + # add a limiter for actual inference fps <= inference_fps + sleep_time = 1 / inference_fps - (time.time() - cur_time) + if sleep_time > 0: + time.sleep(sleep_time) + cur_time = time.time() + + +def main(): + global average_size, threshold, drawing_fps, inference_fps, \ + device, model, camera, data, label, sample_length, \ + test_pipeline, frame_queue, result_queue + + # Register all modules in mmaction2 into the registries + register_all_modules() + + args = parse_args() + average_size = args.average_size + threshold = args.threshold + drawing_fps = args.drawing_fps + inference_fps = args.inference_fps + + device = torch.device(args.device) + + cfg = Config.fromfile(args.config) + if args.cfg_options is not None: + cfg.merge_from_dict(args.cfg_options) + + # Build the recognizer from a config file and checkpoint file/url + model = init_recognizer(cfg, args.checkpoint, device=args.device) + camera = cv2.VideoCapture(args.camera_id) + data = dict(img_shape=None, modality='RGB', label=-1) + + with open(args.label, 'r') as f: + label = [line.strip() for line in f] + + # prepare test pipeline from non-camera pipeline + cfg = model.cfg + sample_length = 0 + pipeline = cfg.test_pipeline + pipeline_ = pipeline.copy() + for step in pipeline: + if 'SampleFrames' in step['type']: + sample_length = step['clip_len'] * step['num_clips'] + data['num_clips'] = step['num_clips'] + data['clip_len'] = step['clip_len'] + pipeline_.remove(step) + if step['type'] in EXCLUED_STEPS: + # remove step to decode frames + pipeline_.remove(step) + test_pipeline = Compose(pipeline_) + + assert sample_length > 0 + + try: + frame_queue = deque(maxlen=sample_length) + result_queue = deque(maxlen=1) + pw = Thread(target=show_results, args=(), daemon=True) + pr = Thread(target=inference, args=(), daemon=True) + pw.start() + pr.start() + pw.join() + except KeyboardInterrupt: + pass + + +if __name__ == '__main__': + main() From fb09be891a07208314e7db14fe8df3e5f83025de Mon Sep 17 00:00:00 2001 From: wxDai Date: Thu, 5 Jan 2023 14:54:29 +0800 Subject: [PATCH 49/57] [Feature] Support STGCN++ (#2156) --- README.md | 3 +- configs/skeleton/stgcnpp/README.md | 84 +++++++++ configs/skeleton/stgcnpp/metafile.yml | 159 ++++++++++++++++++ ...-motion-u100-80e_ntu60-xsub-keypoint-2d.py | 67 ++++++++ ...-motion-u100-80e_ntu60-xsub-keypoint-3d.py | 67 ++++++++ ...16-bone-u100-80e_ntu60-xsub-keypoint-2d.py | 67 ++++++++ ...16-bone-u100-80e_ntu60-xsub-keypoint-3d.py | 67 ++++++++ ...-motion-u100-80e_ntu60-xsub-keypoint-2d.py | 67 ++++++++ ...-motion-u100-80e_ntu60-xsub-keypoint-3d.py | 67 ++++++++ ...6-joint-u100-80e_ntu60-xsub-keypoint-2d.py | 106 ++++++++++++ ...6-joint-u100-80e_ntu60-xsub-keypoint-3d.py | 106 ++++++++++++ mmaction/models/backbones/stgcn.py | 24 +-- mmaction/models/utils/gcn_utils.py | 109 ++++++++++++ model-index.yml | 1 + tests/models/backbones/test_stgcn.py | 10 +- .../models/recognizers/test_recognizer_gcn.py | 8 + 16 files changed, 995 insertions(+), 17 deletions(-) create mode 100644 configs/skeleton/stgcnpp/README.md create mode 100644 configs/skeleton/stgcnpp/metafile.yml create mode 100644 configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py create mode 100644 configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py create mode 100644 configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py create mode 100644 configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py create mode 100644 configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py create mode 100644 configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py create mode 100644 configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py create mode 100644 configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py diff --git a/README.md b/README.md index 7fa850bd5b..cadcd8b9e8 100644 --- a/README.md +++ b/README.md @@ -70,6 +70,7 @@ The 1.x branch works with **PyTorch 1.6+**. ## What's New +- (2023-1-5) We support STGCN++ on NTU-RGB+D. - (2022-11-30) We refine our skeleton-based pipelines and support the joint training of multi-stream skeleton information, including **joint, bone, joint-motion, and bone-motion**. - (2022-10-11) We support **Video Swin Transformer** on Kinetics400 and additionally train a Swin-L model on Kinetics700 to extract video features for downstream tasks. - (2022-10-25) We support **VideoMAE** on Kinetics400. @@ -149,7 +150,7 @@ Please refer to [install.md](https://mmaction2.readthedocs.io/en/1.x/get_started ST-GCN (AAAI'2018) 2s-AGCN (CVPR'2019) PoseC3D (CVPR'2022) - + STGCN++ (ArXiv'2022) diff --git a/configs/skeleton/stgcnpp/README.md b/configs/skeleton/stgcnpp/README.md new file mode 100644 index 0000000000..655b067a60 --- /dev/null +++ b/configs/skeleton/stgcnpp/README.md @@ -0,0 +1,84 @@ +# STGCN++ + +[PYSKL: Towards Good Practices for Skeleton Action Recognition](https://arxiv.org/abs/2205.09443) + + + +## Abstract + + + +We present PYSKL: an open-source toolbox for skeleton-based action recognition based on PyTorch. The toolbox supports a wide variety of skeleton action recognition algorithms, including approaches based on GCN and CNN. In contrast to existing open-source skeleton action recognition projects that include only one or two algorithms, PYSKL implements six different algorithms under a unified framework with both the latest and original good practices to ease the comparison of efficacy and efficiency. We also provide an original GCN-based skeleton action recognition model named ST-GCN++, which achieves competitive recognition performance without any complicated attention schemes, serving as a strong baseline. Meanwhile, PYSKL supports the training and testing of nine skeleton-based action recognition benchmarks and achieves state-of-the-art recognition performance on eight of them. To facilitate future research on skeleton action recognition, we also provide a large number of trained models and detailed benchmark results to give some insights. PYSKL is released at this https URL and is actively maintained. We will update this report when we add new features or benchmarks. The current version corresponds to PYSKL v0.2. + +## Results and Models + +### NTU60_XSub_2D + +| frame sampling strategy | modality | gpus | backbone | top1 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :----------: | :--: | :------: | :------: | :--------------: | :---: | :----: | :---------------------------------------: | :-------------------------------------: | :------------------------------------: | +| uniform 100 | joint | 8 | STGCN++ | 89.29 | 10 clips | 1.95G | 1.39M | [config](/configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d_20221228-86e1e77a.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.log) | +| uniform 100 | bone | 8 | STGCN++ | 92.30 | 10 clips | 1.95G | 1.39M | [config](/configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d_20221228-cd11a691.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.log) | +| uniform 100 | joint-motion | 8 | STGCN++ | 87.30 | 10 clips | 1.95G | 1.39M | [config](/configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d_20221228-19a34aba.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.log) | +| uniform 100 | bone-motion | 8 | STGCN++ | 88.76 | 10 clips | 1.95G | 1.39M | [config](/configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d_20221228-c02a0749.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.log) | +| | two-stream | | | 92.61 | | | | | | | +| | four-stream | | | 92.77 | | | | | | | + +### NTU60_XSub_3D + +| frame sampling strategy | modality | gpus | backbone | top1 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :----------: | :--: | :------: | :------: | :--------------: | :---: | :----: | :---------------------------------------: | :-------------------------------------: | :------------------------------------: | +| uniform 100 | joint | 8 | STGCN++ | 89.14 | 10 clips | 2.96G | 1.4M | [config](/configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d_20221230-4e455ce3.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.log) | +| uniform 100 | bone | 8 | STGCN++ | 90.21 | 10 clips | 2.96G | 1.4M | [config](/configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d_20221230-7f356072.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.log) | +| uniform 100 | joint-motion | 8 | STGCN++ | 86.67 | 10 clips | 2.96G | 1.4M | [config](/configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d_20221230-650de5cc.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.log) | +| uniform 100 | bone-motion | 8 | STGCN++ | 87.45 | 10 clips | 2.96G | 1.4M | [config](/configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d_20221230-b00440d2.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.log) | +| | two-stream | | | 91.39 | | | | | | | +| | four-stream | | | 91.87 | | | | | | | + +1. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size, and the original batch size. +2. For two-stream fusion, we use **joint : bone = 1 : 1**. For four-stream fusion, we use **joint : joint-motion : bone : bone-motion = 2 : 1 : 2 : 1**. For more details about multi-stream fusion, please refer to this [tutorial](/docs/en/user_guides/useful_tools.md#multi-stream-fusion). + +## Train + +You can use the following command to train a model. + +```shell +python tools/train.py ${CONFIG_FILE} [optional arguments] +``` + +Example: train STGCN++ model on NTU60-2D dataset in a deterministic option with periodic validation. + +```shell +python tools/train.py configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py \ + --seed 0 --deterministic +``` + +For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). + +## Test + +You can use the following command to test a model. + +```shell +python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments] +``` + +Example: test STGCN++ model on NTU60-2D dataset and dump the result to a pickle file. + +```shell +python tools/test.py configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py \ + checkpoints/SOME_CHECKPOINT.pth --dump result.pkl +``` + +For more details, you can refer to the **Test** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). + +## Citation + +```BibTeX +@misc{duan2022PYSKL, + url = {https://arxiv.org/abs/2205.09443}, + author = {Duan, Haodong and Wang, Jiaqi and Chen, Kai and Lin, Dahua}, + title = {PYSKL: Towards Good Practices for Skeleton Action Recognition}, + publisher = {arXiv}, + year = {2022} +} +``` diff --git a/configs/skeleton/stgcnpp/metafile.yml b/configs/skeleton/stgcnpp/metafile.yml new file mode 100644 index 0000000000..23404e2190 --- /dev/null +++ b/configs/skeleton/stgcnpp/metafile.yml @@ -0,0 +1,159 @@ +Collections: + - Name: STGCN++ + README: configs/skeleton/stgcnpp/README.md + Paper: + URL: https://arxiv.org/abs/2205.09443 + Title: 'PYSKL: Towards Good Practices for Skeleton Action Recognition' + +Models: + - Name: stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d + Config: configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py + In Collection: STGCN++ + Metadata: + Architecture: STGCN++ + Batch Size: 16 + Epochs: 80 + FLOPs: 1.95G + Parameters: 1.39M + Training Data: NTU60-XSub-2D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-2D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 89.29 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d_20221228-86e1e77a.pth + + - Name: stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d + Config: configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py + In Collection: STGCN++ + Metadata: + Architecture: STGCN++ + Batch Size: 16 + Epochs: 80 + FLOPs: 1.95G + Parameters: 1.39M + Training Data: NTU60-XSub-2D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-2D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 92.30 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d_20221228-cd11a691.pth + + - Name: stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d + Config: configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py + In Collection: STGCN++ + Metadata: + Architecture: STGCN++ + Batch Size: 16 + Epochs: 80 + FLOPs: 1.95G + Parameters: 1.39M + Training Data: NTU60-XSub-2D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-2D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 87.30 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d_20221228-19a34aba.pth + + - Name: stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d + Config: configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py + In Collection: STGCN++ + Metadata: + Architecture: STGCN++ + Batch Size: 16 + Epochs: 80 + FLOPs: 1.95G + Parameters: 1.39M + Training Data: NTU60-XSub-2D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-2D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 88.76 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d_20221228-c02a0749.pth + + - Name: stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d + Config: configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py + In Collection: STGCN++ + Metadata: + Architecture: STGCN++ + Batch Size: 16 + Epochs: 80 + FLOPs: 2.96G + Parameters: 1.4M + Training Data: NTU60-XSub-3D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-3D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 89.14 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d_20221230-4e455ce3.pth + + - Name: stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d + Config: configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py + In Collection: STGCN++ + Metadata: + Architecture: STGCN++ + Batch Size: 16 + Epochs: 80 + FLOPs: 2.96G + Parameters: 1.4M + Training Data: NTU60-XSub-3D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-3D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 90.21 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d_20221230-7f356072.pth + + - Name: stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d + Config: configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py + In Collection: STGCN++ + Metadata: + Architecture: STGCN++ + Batch Size: 16 + Epochs: 80 + FLOPs: 2.96G + Parameters: 1.4M + Training Data: NTU60-XSub-3D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-3D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 86.67 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d_20221230-650de5cc.pth + + - Name: stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d + Config: configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py + In Collection: STGCN++ + Metadata: + Architecture: STGCN++ + Batch Size: 16 + Epochs: 80 + FLOPs: 2.96G + Parameters: 1.4M + Training Data: NTU60-XSub-3D + Training Resources: 8 GPUs + Results: + Dataset: NTU60-XSub-3D + Task: Skeleton-based Action Recognition + Metrics: + Top 1 Accuracy: 87.45 + Training Log: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.log + Weights: https://download.openmmlab.com/mmaction/v1.0/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d_20221230-b00440d2.pth diff --git a/configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py b/configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py new file mode 100644 index 0000000000..d33546d8a9 --- /dev/null +++ b/configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-2d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_2d.pkl' +train_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['bm']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['bm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['bm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py b/configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py new file mode 100644 index 0000000000..a54dc0b37a --- /dev/null +++ b/configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-motion-u100-80e_ntu60-xsub-keypoint-3d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_3d.pkl' +train_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['bm']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['bm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['bm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py b/configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py new file mode 100644 index 0000000000..15808fca82 --- /dev/null +++ b/configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-2d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_2d.pkl' +train_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['b']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['b']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['b']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py b/configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py new file mode 100644 index 0000000000..a30c593417 --- /dev/null +++ b/configs/skeleton/stgcnpp/stgcnpp_8xb16-bone-u100-80e_ntu60-xsub-keypoint-3d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_3d.pkl' +train_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['b']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['b']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['b']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py b/configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py new file mode 100644 index 0000000000..ceaae53fa2 --- /dev/null +++ b/configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-2d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_2d.pkl' +train_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['jm']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['jm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['jm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py b/configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py new file mode 100644 index 0000000000..dd5833ca0f --- /dev/null +++ b/configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-motion-u100-80e_ntu60-xsub-keypoint-3d.py @@ -0,0 +1,67 @@ +_base_ = 'stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py' + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_3d.pkl' +train_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['jm']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['jm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['jm']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) diff --git a/configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py b/configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py new file mode 100644 index 0000000000..3a497bb619 --- /dev/null +++ b/configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py @@ -0,0 +1,106 @@ +_base_ = '../../_base_/default_runtime.py' + +model = dict( + type='RecognizerGCN', + backbone=dict( + type='STGCN', + gcn_adaptive='init', + gcn_with_res=True, + tcn_type='mstcn', + graph_cfg=dict(layout='coco', mode='spatial')), + cls_head=dict(type='GCNHead', num_classes=60, in_channels=256)) + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_2d.pkl' +train_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['j']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['j']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize2D'), + dict(type='GenSkeFeat', dataset='coco', feats=['j']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) + +val_evaluator = [dict(type='AccMetric')] +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + eta_min=0, + T_max=16, + by_epoch=True, + convert_to_iter_based=True) +] + +optim_wrapper = dict( + optimizer=dict( + type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True)) + +default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) diff --git a/configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py b/configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py new file mode 100644 index 0000000000..fa2a96c500 --- /dev/null +++ b/configs/skeleton/stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-3d.py @@ -0,0 +1,106 @@ +_base_ = '../../_base_/default_runtime.py' + +model = dict( + type='RecognizerGCN', + backbone=dict( + type='STGCN', + gcn_adaptive='init', + gcn_with_res=True, + tcn_type='mstcn', + graph_cfg=dict(layout='nturgb+d', mode='spatial')), + cls_head=dict(type='GCNHead', num_classes=60, in_channels=256)) + +dataset_type = 'PoseDataset' +ann_file = 'data/skeleton/ntu60_3d.pkl' +train_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']), + dict(type='UniformSampleFrames', clip_len=100), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +val_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=1, test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] +test_pipeline = [ + dict(type='PreNormalize3D'), + dict(type='GenSkeFeat', dataset='nturgb+d', feats=['j']), + dict( + type='UniformSampleFrames', clip_len=100, num_clips=10, + test_mode=True), + dict(type='PoseDecode'), + dict(type='FormatGCNInput', num_person=2), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=5, + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=train_pipeline, + split='xsub_train'))) +val_dataloader = dict( + batch_size=16, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=val_pipeline, + split='xsub_val', + test_mode=True)) +test_dataloader = dict( + batch_size=1, + num_workers=2, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + ann_file=ann_file, + pipeline=test_pipeline, + split='xsub_val', + test_mode=True)) + +val_evaluator = [dict(type='AccMetric')] +test_evaluator = val_evaluator + +train_cfg = dict( + type='EpochBasedTrainLoop', max_epochs=16, val_begin=1, val_interval=1) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +param_scheduler = [ + dict( + type='CosineAnnealingLR', + eta_min=0, + T_max=16, + by_epoch=True, + convert_to_iter_based=True) +] + +optim_wrapper = dict( + optimizer=dict( + type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0005, nesterov=True)) + +default_hooks = dict(checkpoint=dict(interval=1), logger=dict(interval=100)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) diff --git a/mmaction/models/backbones/stgcn.py b/mmaction/models/backbones/stgcn.py index 9900a49648..fed496848f 100644 --- a/mmaction/models/backbones/stgcn.py +++ b/mmaction/models/backbones/stgcn.py @@ -7,7 +7,7 @@ from mmengine.model import BaseModule, ModuleList from mmaction.registry import MODELS -from ..utils import Graph, unit_gcn, unit_tcn +from ..utils import Graph, mstcn, unit_gcn, unit_tcn EPS = 1e-4 @@ -45,7 +45,7 @@ def __init__(self, assert len(kwargs) == 0, f'Invalid arguments: {kwargs}' tcn_type = tcn_kwargs.pop('type', 'unit_tcn') - assert tcn_type in ['unit_tcn'] + assert tcn_type in ['unit_tcn', 'mstcn'] gcn_type = gcn_kwargs.pop('type', 'unit_gcn') assert gcn_type in ['unit_gcn'] @@ -54,7 +54,9 @@ def __init__(self, if tcn_type == 'unit_tcn': self.tcn = unit_tcn( out_channels, out_channels, 9, stride=stride, **tcn_kwargs) - + elif tcn_type == 'mstcn': + self.tcn = mstcn( + out_channels, out_channels, stride=stride, **tcn_kwargs) self.relu = nn.ReLU() if not residual: @@ -135,10 +137,10 @@ class STGCN(BaseModule): >>> print(output.shape) >>> >>> # custom settings - >>> # add additional residual connection for the first four gcns - >>> stage_cfgs = {'gcn_with_res': [True] * 4 + [False] * 6} - >>> model = STGCN(graph_cfg=dict(layout='coco', mode=mode), - ... num_stages=10, stage_cfgs=stage_cfgs) + >>> # instantiate STGCN++ + >>> model = STGCN(graph_cfg=dict(layout='coco', mode='spatial'), + ... gcn_adaptive='init', gcn_with_res=True, + ... tcn_type='mstcn') >>> model.init_weights() >>> output = model(inputs) >>> print(output.shape) @@ -158,8 +160,8 @@ def __init__(self, num_stages: int = 10, inflate_stages: List[int] = [5, 8], down_stages: List[int] = [5, 8], - stage_cfgs: Dict = dict(), - init_cfg: Optional[Union[Dict, List[Dict]]] = None) -> None: + init_cfg: Optional[Union[Dict, List[Dict]]] = None, + **kwargs) -> None: super().__init__(init_cfg=init_cfg) self.graph = Graph(**graph_cfg) @@ -174,8 +176,8 @@ def __init__(self, else: self.data_bn = nn.Identity() - lw_kwargs = [cp.deepcopy(stage_cfgs) for i in range(num_stages)] - for k, v in stage_cfgs.items(): + lw_kwargs = [cp.deepcopy(kwargs) for i in range(num_stages)] + for k, v in kwargs.items(): if isinstance(v, (tuple, list)) and len(v) == num_stages: for i in range(num_stages): lw_kwargs[i][k] = v[i] diff --git a/mmaction/models/utils/gcn_utils.py b/mmaction/models/utils/gcn_utils.py index 1ce2978e58..4293bb7300 100644 --- a/mmaction/models/utils/gcn_utils.py +++ b/mmaction/models/utils/gcn_utils.py @@ -310,3 +310,112 @@ def __init__( def forward(self, x: torch.Tensor) -> torch.Tensor: """Defines the computation performed at every call.""" return self.drop(self.bn(self.conv(x))) + + +class mstcn(BaseModule): + """The multi-scale temporal convolutional network. + + Args: + in_channels (int): Number of input channels. + out_channels (int): Number of output channels. + mid_channels (int): Number of middle channels. Defaults to None. + dropout (float): Dropout probability. Defaults to 0. + ms_cfg (list): The config of multi-scale branches. Defaults to + ``[(3, 1), (3, 2), (3, 3), (3, 4), ('max', 3), '1x1']``. + stride (int): Stride of the temporal convolution. Defaults to 1. + init_cfg (dict or list[dict]): Initialization config dict. + Defaults to None. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + mid_channels: int = None, + dropout: float = 0., + ms_cfg: List = [(3, 1), (3, 2), (3, 3), (3, 4), ('max', 3), + '1x1'], + stride: int = 1, + init_cfg: Union[Dict, List[Dict]] = None) -> None: + super().__init__(init_cfg=init_cfg) + # Multiple branches of temporal convolution + self.ms_cfg = ms_cfg + num_branches = len(ms_cfg) + self.num_branches = num_branches + self.in_channels = in_channels + self.out_channels = out_channels + self.act = nn.ReLU() + + if mid_channels is None: + mid_channels = out_channels // num_branches + rem_mid_channels = out_channels - mid_channels * (num_branches - 1) + else: + assert isinstance(mid_channels, float) and mid_channels > 0 + mid_channels = int(out_channels * mid_channels) + rem_mid_channels = mid_channels + + self.mid_channels = mid_channels + self.rem_mid_channels = rem_mid_channels + + branches = [] + for i, cfg in enumerate(ms_cfg): + branch_c = rem_mid_channels if i == 0 else mid_channels + if cfg == '1x1': + branches.append( + nn.Conv2d( + in_channels, + branch_c, + kernel_size=1, + stride=(stride, 1))) + continue + assert isinstance(cfg, tuple) + if cfg[0] == 'max': + branches.append( + Sequential( + nn.Conv2d(in_channels, branch_c, kernel_size=1), + nn.BatchNorm2d(branch_c), self.act, + nn.MaxPool2d( + kernel_size=(cfg[1], 1), + stride=(stride, 1), + padding=(1, 0)))) + continue + assert isinstance(cfg[0], int) and isinstance(cfg[1], int) + branch = Sequential( + nn.Conv2d(in_channels, branch_c, kernel_size=1), + nn.BatchNorm2d(branch_c), self.act, + unit_tcn( + branch_c, + branch_c, + kernel_size=cfg[0], + stride=stride, + dilation=cfg[1], + norm=None)) + branches.append(branch) + + self.branches = ModuleList(branches) + tin_channels = mid_channels * (num_branches - 1) + rem_mid_channels + + self.transform = Sequential( + nn.BatchNorm2d(tin_channels), self.act, + nn.Conv2d(tin_channels, out_channels, kernel_size=1)) + + self.bn = nn.BatchNorm2d(out_channels) + self.drop = nn.Dropout(dropout, inplace=True) + + def inner_forward(self, x: torch.Tensor) -> torch.Tensor: + """Defines the computation performed at every call.""" + N, C, T, V = x.shape + + branch_outs = [] + for tempconv in self.branches: + out = tempconv(x) + branch_outs.append(out) + + feat = torch.cat(branch_outs, dim=1) + feat = self.transform(feat) + return feat + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Defines the computation performed at every call.""" + out = self.inner_forward(x) + out = self.bn(out) + return self.drop(out) diff --git a/model-index.yml b/model-index.yml index d1503952ae..a41addf98d 100644 --- a/model-index.yml +++ b/model-index.yml @@ -19,4 +19,5 @@ Import: - configs/detection/acrn/metafile.yml - configs/skeleton/stgcn/metafile.yml - configs/skeleton/2s-agcn/metafile.yml +- configs/skeleton/stgcnpp/metafile.yml - configs/skeleton/posec3d/metafile.yml diff --git a/tests/models/backbones/test_stgcn.py b/tests/models/backbones/test_stgcn.py index 31ee57484e..10e8b33d05 100644 --- a/tests/models/backbones/test_stgcn.py +++ b/tests/models/backbones/test_stgcn.py @@ -35,12 +35,12 @@ def test_stgcn_backbone(): assert output.shape == torch.Size([2, 2, 256, 38, 17]) # custom settings - # add additional residual connection for the first four gcns - stage_cfgs = {'gcn_with_res': [True] * 4 + [False] * 6} + # instantiate STGCN++ model = STGCN( - graph_cfg=dict(layout='coco', mode=mode), - num_stages=10, - stage_cfgs=stage_cfgs) + graph_cfg=dict(layout='coco', mode='spatial'), + gcn_adaptive='init', + gcn_with_res=True, + tcn_type='mstcn') model.init_weights() output = model(inputs) assert output.shape == torch.Size([2, 2, 256, 38, 17]) diff --git a/tests/models/recognizers/test_recognizer_gcn.py b/tests/models/recognizers/test_recognizer_gcn.py index 673bc45935..9734ac2240 100644 --- a/tests/models/recognizers/test_recognizer_gcn.py +++ b/tests/models/recognizers/test_recognizer_gcn.py @@ -61,3 +61,11 @@ def test_agcn(): '2s-agcn/2s-agcn_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py') input_shape = (1, 2, 30, 17, 3) # N M T V C train_test_step(config, input_shape=input_shape) + + +def test_stgcn_plusplus(): + register_all_modules() + config = get_skeletongcn_cfg( + 'stgcn++/stgcn++_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py') + input_shape = (1, 2, 30, 17, 3) # N M T V C + train_test_step(config, input_shape=input_shape) From d96eeab1eb07653236ff84c4095223b2094ffd8d Mon Sep 17 00:00:00 2001 From: wxDai Date: Fri, 6 Jan 2023 12:56:28 +0800 Subject: [PATCH 50/57] fix ut (#2166) --- tests/models/recognizers/test_recognizer_gcn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/models/recognizers/test_recognizer_gcn.py b/tests/models/recognizers/test_recognizer_gcn.py index 9734ac2240..7ae1441a6b 100644 --- a/tests/models/recognizers/test_recognizer_gcn.py +++ b/tests/models/recognizers/test_recognizer_gcn.py @@ -66,6 +66,6 @@ def test_agcn(): def test_stgcn_plusplus(): register_all_modules() config = get_skeletongcn_cfg( - 'stgcn++/stgcn++_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py') + 'stgcnpp/stgcnpp_8xb16-joint-u100-80e_ntu60-xsub-keypoint-2d.py') input_shape = (1, 2, 30, 17, 3) # N M T V C train_test_step(config, input_shape=input_shape) From 588732ba6540fd1ed4e1d1388584fd50a4fc7118 Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Fri, 6 Jan 2023 01:25:24 -0500 Subject: [PATCH 51/57] [Doc] Update TSM models' README & metafile (#2128) Co-authored-by: wxDai --- configs/recognition/tsm/README.md | 66 ++++----------- configs/recognition/tsm/metafile.yml | 82 +++++++++---------- ...ed-r50_8xb16-1x1x16-50e_kinetics400-rgb.py | 10 ++- ...etrained-r50_8xb16-1x1x16-50e_sthv2-rgb.py | 8 +- ...ned-r50_8xb16-1x1x8-50e_kinetics400-rgb.py | 14 +++- ...retrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py | 14 +++- ...0_8xb16-dense-1x1x8-50e_kinetics400-rgb.py | 14 +++- 7 files changed, 100 insertions(+), 108 deletions(-) diff --git a/configs/recognition/tsm/README.md b/configs/recognition/tsm/README.md index 5c322012e5..ca490117c3 100644 --- a/configs/recognition/tsm/README.md +++ b/configs/recognition/tsm/README.md @@ -20,64 +20,28 @@ The explosive growth in video streaming gives rise to challenges on performing v ### Kinetics-400 -| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | -| :---------------------: | :------------: | :--: | :---------------------------: | :------: | :------: | :------: | :----------------: | :----: | :----: | :-------------------------: | ------------------------: | -----------------------: | -| 1x1x8 | short-side 320 | 8 | ResNet50 | ImageNet | 72.95 | 90.45 | 8 clips x 10 crop | 32.88G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb_20220831-64d69186.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb.log) | -| 1x1x8 | short-side 320 | 8 | ResNet50 | ImageNet | 73.11 | 90.06 | 8 clips x 10 crop | 32.88G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb_20220831-a6db1e5d.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb.log) | -| 1x1x16 | short-side 320 | 8 | ResNet50 | ImageNet | 74.64 | 91.42 | 16 clips x 10 crop | 65.75G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb_20220831-042b1748.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb.log) | -| 1x1x8 (dense) | short-side 320 | 8 | ResNet50 | ImageNet | 73.39 | 90.78 | 8 clips x 10 crop | 32.88G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb_20220831-f55d3c2b.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb.log) | -| 1x1x8 | short-side 320 | 8 | ResNet50 (NonLocalDotProduct) | ImageNet | 74.17 | 90.95 | 8 clips x 10 crop | 61.30G | 31.68M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb_20220831-108bfde5.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb.log) | -| 1x1x8 | short-side 320 | 8 | ResNet50 (NonLocalGauss) | ImageNet | 73.37 | 90.82 | 8 clips x 10 crop | 59.06G | 28.00M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb_20220831-7e54dacf.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.log) | -| 1x1x8 | short-side 320 | 8 | ResNet50 (NonLocalEmbedGauss) | ImageNet | 74.45 | 91.11 | 8 clips x 10 crop | 61.30G | 31.68M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb_20220831-35eddb57.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.log) | +| frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | +| :---------------------: | :--------: | :--: | :---------------------------: | :------: | :------: | :------: | :----------------: | :----: | :----: | :--------------------------: | -------------------------: | -------------------------: | +| 1x1x8 | 224x224 | 8 | ResNet50 | ImageNet | 73.18 | 90.56 | 8 clips x 10 crop | 32.88G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb_20220831-64d69186.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb.log) | +| 1x1x8 | 224x224 | 8 | ResNet50 | ImageNet | 73.22 | 90.22 | 8 clips x 10 crop | 32.88G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb_20220831-a6db1e5d.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb.log) | +| 1x1x16 | 224x224 | 8 | ResNet50 | ImageNet | 75.12 | 91.55 | 16 clips x 10 crop | 65.75G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb_20220831-042b1748.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb.log) | +| 1x1x8 (dense) | 224x224 | 8 | ResNet50 | ImageNet | 73.38 | 90.78 | 8 clips x 10 crop | 32.88G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb_20220831-f55d3c2b.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb.log) | +| 1x1x8 | 224x224 | 8 | ResNet50 (NonLocalDotProduct) | ImageNet | 74.49 | 91.15 | 8 clips x 10 crop | 61.30G | 31.68M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb_20220831-108bfde5.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb.log) | +| 1x1x8 | 224x224 | 8 | ResNet50 (NonLocalGauss) | ImageNet | 73.66 | 90.99 | 8 clips x 10 crop | 59.06G | 28.00M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb_20220831-7e54dacf.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.log) | +| 1x1x8 | 224x224 | 8 | ResNet50 (NonLocalEmbedGauss) | ImageNet | 74.34 | 91.23 | 8 clips x 10 crop | 61.30G | 31.68M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb_20220831-35eddb57.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.log) | ### Something-something V2 | frame sampling strategy | resolution | gpus | backbone | pretrain | top1 acc | top5 acc | testing protocol | FLOPs | params | config | ckpt | log | | :---------------------: | :--------: | :--: | :-------: | :------: | :------: | :------: | :----------------: | :----: | :----: | :---------------------------------: | :-------------------------------: | :------------------------------: | -| 1x1x8 | height 256 | 8 | ResNet50 | ImageNet | 60.20 | 86.13 | 8 clips x 10 crop | 32.88G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb_20221122-cb2cc64e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.log) | -| 1x1x16 | height 256 | 8 | ResNet50 | ImageNet | 62.46 | 87.75 | 16 clips x 10 crop | 65.75G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb_20221122-b1fb8264.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb.log) | -| 1x1x8 | height 256 | 8 | ResNet101 | ImageNet | 60.49 | 85.99 | 8 clips x 10 crop | 62.66G | 42.86M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb_20221122-cb2cc64e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb.log) | - -Note: - -1. The **gpus** indicates the number of gpu we used to get the checkpoint. It is noteworthy that the configs we provide are used for 8 gpus as default. - According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU, - e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu. - - +| 1x1x8 | 224x224 | 8 | ResNet50 | ImageNet | 60.20 | 86.13 | 8 clips x 10 crop | 32.88G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb_20221122-446d261a.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.log) | +| 1x1x16 | 224x224 | 8 | ResNet50 | ImageNet | 62.46 | 87.75 | 16 clips x 10 crop | 65.75G | 23.87M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb_20221122-b1fb8264.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb.log) | +| 1x1x8 | 224x224 | 8 | ResNet101 | ImageNet | 60.49 | 85.99 | 8 clips x 10 crop | 62.66G | 42.86M | [config](/configs/recognition/tsm/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb_20221122-cb2cc64e.pth) | [log](https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb.log) | +1. The **gpus** indicates the number of gpus we used to get the checkpoint. If you want to use a different number of gpus or videos per gpu, the best way is to set `--auto-scale-lr` when calling `tools/train.py`, this parameter will auto-scale the learning rate according to the actual batch size and the original batch size. 2. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available. -For more details on data preparation, you can refer to the **Prepare videos** part in the [Data Preparation Tutorial](/docs/en/user_guides/2_data_prepare.md). +For more details on data preparation, you can refer to [Kinetics400](/tools/data/kinetics/README.md). ## Train @@ -91,7 +55,7 @@ Example: train TSM model on Kinetics-400 dataset in a deterministic option. ```shell python tools/train.py configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb.py \ - --cfg-options randomness.seed=0 randomness.deterministic=True + --seed=0 --deterministic ``` For more details, you can refer to the **Training** part in the [Training and Test Tutorial](/docs/en/user_guides/4_train_test.md). diff --git a/configs/recognition/tsm/metafile.yml b/configs/recognition/tsm/metafile.yml index f5931d0ef0..5adafb069f 100644 --- a/configs/recognition/tsm/metafile.yml +++ b/configs/recognition/tsm/metafile.yml @@ -13,10 +13,10 @@ Models: Architecture: ResNet50 Batch Size: 16 Epochs: 100 - FLOPs: 32965562368 - Parameters: 24327632 + FLOPs: 32.88G + Parameters: 23.87M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -24,8 +24,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 72.95 - Top 5 Accuracy: 90.45 + Top 1 Accuracy: 73.18 + Top 5 Accuracy: 90.56 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb_20220831-64d69186.pth @@ -36,10 +36,10 @@ Models: Architecture: ResNet50 Batch Size: 16 Epochs: 100 - FLOPs: 32965562368 - Parameters: 24327632 + FLOPs: 32.88G + Parameters: 23.87M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -47,22 +47,22 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 73.11 - Top 5 Accuracy: 90.06 + Top 1 Accuracy: 73.22 + Top 5 Accuracy: 90.22 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-100e_kinetics400-rgb_20220831-a6db1e5d.pth - Name: tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb - Config: tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb.py + Config: configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb.py In Collection: TSM Metadata: Architecture: ResNet50 Batch Size: 16 Epochs: 100 - FLOPs: 65931124736 - Parameters: 24327632 + FLOPs: 65.75G + Parameters: 23.87M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -70,8 +70,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 74.64 - Top 5 Accuracy: 91.42 + Top 1 Accuracy: 75.12 + Top 5 Accuracy: 91.55 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb_20220831-042b1748.pth @@ -82,10 +82,10 @@ Models: Architecture: ResNet50 Batch Size: 16 Epochs: 100 - FLOPs: 32965562368 - Parameters: 24327632 + FLOPs: 32.88G + Parameters: 23.87M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -93,22 +93,22 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 73.39 + Top 1 Accuracy: 73.38 Top 5 Accuracy: 90.78 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb_20220831-f55d3c2b.pth - - Name: tsm_imagenet-pretrained-r50-nl-embedded_gaussian_8xb16-1x1x8-50e_kinetics400-rgb - Config: configs/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded_gaussian_8xb16-1x1x8-50e_kinetics400-rgb.py + - Name: tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb + Config: configs/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.py In Collection: TSM Metadata: Architecture: ResNet50 Batch Size: 16 Epochs: 100 - FLOPs: 49457811456 - Parameters: 31682000 + FLOPs: 61.30G + Parameters: 31.68M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -116,8 +116,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 74.45 - Top 5 Accuracy: 91.11 + Top 1 Accuracy: 74.34 + Top 5 Accuracy: 91.23 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-embedded-gaussian_8xb16-1x1x8-50e_kinetics400-rgb_20220831-35eddb57.pth @@ -128,10 +128,10 @@ Models: Architecture: ResNet50 Batch Size: 16 Epochs: 100 - FLOPs: 49457811456 - Parameters: 31682000 + FLOPs: 61.30G + Parameters: 31.68M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -139,8 +139,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 74.17 - Top 5 Accuracy: 90.95 + Top 1 Accuracy: 74.49 + Top 5 Accuracy: 91.15 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-dot-product_8xb16-1x1x8-50e_kinetics400-rgb_20220831-108bfde5.pth @@ -151,10 +151,10 @@ Models: Architecture: ResNet50 Batch Size: 16 Epochs: 100 - FLOPs: 41231355904 - Parameters: 28007888 + FLOPs: 59.06G + Parameters: 28.00M Pretrained: ImageNet - Resolution: short-side 320 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -162,8 +162,8 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 73.37 - Top 5 Accuracy: 90.82 + Top 1 Accuracy: 73.66 + Top 5 Accuracy: 90.99 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb.log Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb/tsm_imagenet-pretrained-r50-nl-gaussian_8xb16-1x1x8-50e_kinetics400-rgb_20220831-7e54dacf.pth @@ -177,7 +177,7 @@ Models: FLOPs: 32.88G Parameters: 23.87M Pretrained: ImageNet - Resolution: height 256 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -188,7 +188,7 @@ Models: Top 1 Accuracy: 60.20 Top 5 Accuracy: 86.13 Training Log: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.log - Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r101_8xb16-1x1x8-50e_sthv2-rgb_20221122-cb2cc64e.pth + Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb_20221122-446d261a.pth - Name: tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb Config: configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb.py @@ -200,7 +200,7 @@ Models: FLOPs: 65.75G Parameters: 23.87M Pretrained: ImageNet - Resolution: height 256 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB @@ -223,7 +223,7 @@ Models: FLOPs: 62.66G Parameters: 42.86M Pretrained: ImageNet - Resolution: height 256 + Resolution: 224x224 Training Data: Kinetics-400 Training Resources: 8 GPUs Modality: RGB diff --git a/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb.py b/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb.py index c5c7d4ac55..b96a884135 100644 --- a/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb.py +++ b/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_kinetics400-rgb.py @@ -10,8 +10,10 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +file_client_args = dict(io_backend='disk') + train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -28,21 +30,21 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16, test_mode=True), - dict(type='DecordDecode'), + dict(type='DecordDecode', **file_client_args), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='FormatShape', input_format='NCHW'), dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, diff --git a/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb.py b/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb.py index 5a946b1fef..691e39c2b2 100644 --- a/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb.py +++ b/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x16-50e_sthv2-rgb.py @@ -2,8 +2,10 @@ model = dict(backbone=dict(num_segments=16), cls_head=dict(num_segments=16)) +file_client_args = dict(io_backend='disk') + train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -20,7 +22,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, @@ -34,7 +36,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, diff --git a/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb.py b/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb.py index dad7b77489..d8d83eded4 100644 --- a/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb.py +++ b/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_kinetics400-rgb.py @@ -7,8 +7,10 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +file_client_args = dict(io_backend='disk') + train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -25,7 +27,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, @@ -39,7 +41,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, @@ -112,3 +114,9 @@ paramwise_cfg=dict(fc_lr5=True), optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001), clip_grad=dict(max_norm=20, norm_type=2)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) diff --git a/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py b/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py index ae8c5f4dd3..ba9c393593 100644 --- a/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py +++ b/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-1x1x8-50e_sthv2-rgb.py @@ -9,8 +9,10 @@ ann_file_train = 'data/sthv2/sthv2_train_list_videos.txt' ann_file_val = 'data/sthv2/sthv2_val_list_videos.txt' +file_client_args = dict(io_backend='disk') + train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -27,7 +29,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, @@ -41,7 +43,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='SampleFrames', clip_len=1, @@ -115,3 +117,9 @@ paramwise_cfg=dict(fc_lr5=True), optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0005), clip_grad=dict(max_norm=20, norm_type=2)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) diff --git a/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb.py b/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb.py index 0d8e8b6cbb..1f0e864017 100644 --- a/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb.py +++ b/configs/recognition/tsm/tsm_imagenet-pretrained-r50_8xb16-dense-1x1x8-50e_kinetics400-rgb.py @@ -7,8 +7,10 @@ ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' +file_client_args = dict(io_backend='disk') + train_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), @@ -25,7 +27,7 @@ dict(type='PackActionInputs') ] val_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='DenseSampleFrames', clip_len=1, @@ -39,7 +41,7 @@ dict(type='PackActionInputs') ] test_pipeline = [ - dict(type='DecordInit'), + dict(type='DecordInit', **file_client_args), dict( type='DenseSampleFrames', clip_len=1, @@ -112,3 +114,9 @@ paramwise_cfg=dict(fc_lr5=True), optimizer=dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001), clip_grad=dict(max_norm=20, norm_type=2)) + +# Default setting for scaling LR automatically +# - `enable` means enable scaling LR automatically +# or not by default. +# - `base_batch_size` = (8 GPUs) x (16 samples per GPU). +auto_scale_lr = dict(enable=False, base_batch_size=128) From af357dd2bd2c82ce6a8a0fe9ec86a48d3fc1ecef Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Fri, 6 Jan 2023 15:29:24 +0800 Subject: [PATCH 52/57] [Enhance] support adjust fps in SampleFrame (#2157) --- configs/recognition/x3d/README.md | 10 +- configs/recognition/x3d/metafile.yml | 6 +- .../x3d_m_16x5x1_facebook-kinetics400-rgb.py | 1 + .../x3d_s_13x6x1_facebook-kinetics400-rgb.py | 5 +- mmaction/datasets/transforms/loading.py | 160 +++++++++++++++--- 5 files changed, 146 insertions(+), 36 deletions(-) diff --git a/configs/recognition/x3d/README.md b/configs/recognition/x3d/README.md index 019a2c9942..a0b9a6f3f4 100644 --- a/configs/recognition/x3d/README.md +++ b/configs/recognition/x3d/README.md @@ -20,15 +20,15 @@ This paper presents X3D, a family of efficient video networks that progressively ### Kinetics-400 -| frame sampling strategy | resolution | backbone | top1 10-view | top1 30-view | reference top1 10-view | reference top1 30-view | config | ckpt | -| :---------------------: | :------------: | :------: | :----------: | :----------: | :---------------------------------------: | :---------------------------------------: | :-----------------------: | :----------------------: | -| 13x6x1 | short-side 320 | X3D_S | 72.7 | 73.3 | 73.1 \[[SlowFast](https://github.com/facebookresearch/SlowFast/blob/master/MODEL_ZOO.md)\] | 73.5 \[[SlowFast](https://github.com/facebookresearch/SlowFast/blob/master/MODEL_ZOO.md)\] | [config](/configs/recognition/x3d/x3d_s_13x6x1_facebook-kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/x3d/facebook/x3d_s_13x6x1_facebook-kinetics400-rgb_20201027-623825a0.pth)\[1\] | -| 16x5x1 | short-side 320 | X3D_M | 74.9 | 75.5 | 75.1 \[[SlowFast](https://github.com/facebookresearch/SlowFast/blob/master/MODEL_ZOO.md)\] | 76.2 \[[SlowFast](https://github.com/facebookresearch/SlowFast/blob/master/MODEL_ZOO.md)\] | [config](/configs/recognition/x3d/x3d_m_16x5x1_facebook-kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/x3d/facebook/x3d_m_16x5x1_facebook-kinetics400-rgb_20201027-3f42382a.pth)\[1\] | +| frame sampling strategy | resolution | backbone | top1 10-view | top1 30-view | reference top1 10-view | reference top1 30-view | config | ckpt | +| :---------------------: | :--------: | :------: | :----------: | :----------: | :----------------------------------------: | :----------------------------------------: | :------------------------: | :-----------------------: | +| 13x6x1 | 160x160 | X3D_S | 73.2 | 73.3 | 73.1 \[[SlowFast](https://github.com/facebookresearch/SlowFast/blob/master/MODEL_ZOO.md)\] | 73.5 \[[SlowFast](https://github.com/facebookresearch/SlowFast/blob/master/MODEL_ZOO.md)\] | [config](/configs/recognition/x3d/x3d_s_13x6x1_facebook-kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/x3d/facebook/x3d_s_13x6x1_facebook-kinetics400-rgb_20201027-623825a0.pth)\[1\] | +| 16x5x1 | 224x224 | X3D_M | 75.2 | 76.4 | 75.1 \[[SlowFast](https://github.com/facebookresearch/SlowFast/blob/master/MODEL_ZOO.md)\] | 76.2 \[[SlowFast](https://github.com/facebookresearch/SlowFast/blob/master/MODEL_ZOO.md)\] | [config](/configs/recognition/x3d/x3d_m_16x5x1_facebook-kinetics400-rgb.py) | [ckpt](https://download.openmmlab.com/mmaction/v1.0/recognition/x3d/facebook/x3d_m_16x5x1_facebook-kinetics400-rgb_20201027-3f42382a.pth)\[1\] | \[1\] The models are ported from the repo [SlowFast](https://github.com/facebookresearch/SlowFast/) and tested on our data. Currently, we only support the testing of X3D models, training will be available soon. 1. The values in columns named after "reference" are the results got by testing the checkpoint released on the original repo and codes, using the same dataset with ours. -2. The validation set of Kinetics400 we used consists of 19796 videos. These videos are available at [Kinetics400-Validation](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB). The corresponding [data list](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (each line is of the format 'video_id, num_frames, label_index') and the [label map](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) are also available. +2. The validation set of Kinetics400 we used is same as the repo [SlowFast](https://github.com/facebookresearch/SlowFast/), which is available [here](https://github.com/facebookresearch/video-nonlocal-net/issues/67). For more details on data preparation, you can refer to [Kinetics400](/tools/data/kinetics/README.md). diff --git a/configs/recognition/x3d/metafile.yml b/configs/recognition/x3d/metafile.yml index 7b5ebb767a..33eba752c9 100644 --- a/configs/recognition/x3d/metafile.yml +++ b/configs/recognition/x3d/metafile.yml @@ -13,7 +13,7 @@ Models: Architecture: X3D_S FLOPs: 2967543760 Parameters: 3794322 - Resolution: short-side 320 + Resolution: 160x160 Modality: RGB Converted From: Weights: https://dl.fbaipublicfiles.com/pyslowfast/x3d_models/x3d_s.pyth @@ -34,7 +34,7 @@ Models: Architecture: X3D_M FLOPs: 6490866832 Parameters: 3794322 - Resolution: short-side 320 + Resolution: 224x224 Modality: RGB Converted From: Weights: https://dl.fbaipublicfiles.com/pyslowfast/x3d_models/x3d_m.pyth @@ -43,7 +43,7 @@ Models: - Dataset: Kinetics-400 Task: Action Recognition Metrics: - Top 1 Accuracy: 75.5 + Top 1 Accuracy: 76.4 Weights: https://download.openmmlab.com/mmaction/v1.0/recognition/x3d/facebook/x3d_m_16x5x1_facebook-kinetics400-rgb_20201027-3f42382a.pth reference top1 10-view: 75.1 [[SlowFast](https://github.com/facebookresearch/SlowFast/blob/master/MODEL_ZOO.md)] reference top1 30-view: 76.2 [[SlowFast](https://github.com/facebookresearch/SlowFast/blob/master/MODEL_ZOO.md)] diff --git a/configs/recognition/x3d/x3d_m_16x5x1_facebook-kinetics400-rgb.py b/configs/recognition/x3d/x3d_m_16x5x1_facebook-kinetics400-rgb.py index 90fe26ca87..95b8c5f589 100644 --- a/configs/recognition/x3d/x3d_m_16x5x1_facebook-kinetics400-rgb.py +++ b/configs/recognition/x3d/x3d_m_16x5x1_facebook-kinetics400-rgb.py @@ -11,6 +11,7 @@ clip_len=16, frame_interval=5, num_clips=10, + target_fps=30, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), diff --git a/configs/recognition/x3d/x3d_s_13x6x1_facebook-kinetics400-rgb.py b/configs/recognition/x3d/x3d_s_13x6x1_facebook-kinetics400-rgb.py index 21ee4f1b60..bc4eabd38f 100644 --- a/configs/recognition/x3d/x3d_s_13x6x1_facebook-kinetics400-rgb.py +++ b/configs/recognition/x3d/x3d_s_13x6x1_facebook-kinetics400-rgb.py @@ -11,10 +11,11 @@ clip_len=13, frame_interval=6, num_clips=10, + target_fps=30, test_mode=True), dict(type='DecordDecode'), - dict(type='Resize', scale=(-1, 192)), - dict(type='ThreeCrop', crop_size=192), + dict(type='Resize', scale=(-1, 182)), + dict(type='ThreeCrop', crop_size=182), dict(type='FormatShape', input_format='NCTHW'), dict(type='PackActionInputs') ] diff --git a/mmaction/datasets/transforms/loading.py b/mmaction/datasets/transforms/loading.py index 88e0517122..523f283058 100644 --- a/mmaction/datasets/transforms/loading.py +++ b/mmaction/datasets/transforms/loading.py @@ -4,6 +4,7 @@ import os import os.path as osp import shutil +from typing import Optional import mmcv import numpy as np @@ -111,6 +112,10 @@ class SampleFrames(BaseTransform): Defaults to False. keep_tail_frames (bool): Whether to keep tail frames when sampling. Defaults to False. + target_fps (optional, int): Convert input videos with arbitrary frame + rates to the unified target FPS before sampling frames. If + ``None``, the frame rate will not be adjusted. Defaults to + ``None``. """ def __init__(self, @@ -122,6 +127,7 @@ def __init__(self, out_of_bound_opt: str = 'loop', test_mode: bool = False, keep_tail_frames: bool = False, + target_fps: Optional[int] = None, **kwargs) -> None: self.clip_len = clip_len @@ -132,9 +138,11 @@ def __init__(self, self.out_of_bound_opt = out_of_bound_opt self.test_mode = test_mode self.keep_tail_frames = keep_tail_frames + self.target_fps = target_fps assert self.out_of_bound_opt in ['loop', 'repeat_last'] - def _get_train_clips(self, num_frames: int) -> np.array: + def _get_train_clips(self, num_frames: int, + ori_clip_len: float) -> np.array: """Get clip offsets in train mode. It will calculate the average interval for selected frames, @@ -144,11 +152,11 @@ def _get_train_clips(self, num_frames: int) -> np.array: Args: num_frames (int): Total number of frame in the video. + ori_clip_len (float): length of original sample clip. Returns: np.ndarray: Sampled frame indices in train mode. """ - ori_clip_len = self.clip_len * self.frame_interval if self.keep_tail_frames: avg_interval = (num_frames - ori_clip_len + 1) / float( @@ -178,7 +186,8 @@ def _get_train_clips(self, num_frames: int) -> np.array: return clip_offsets - def _get_test_clips(self, num_frames: int) -> np.array: + def _get_test_clips(self, num_frames: int, + ori_clip_len: float) -> np.array: """Get clip offsets in test mode. If the total number of frames is @@ -186,6 +195,7 @@ def _get_test_clips(self, num_frames: int) -> np.array: Args: num_frames (int): Total number of frame in the video. + ori_clip_len (float): length of original sample clip. Returns: np.ndarray: Sampled frame indices in test mode. @@ -198,7 +208,6 @@ def _get_test_clips(self, num_frames: int) -> np.array: if self.twice_sample: clip_offsets = np.concatenate([clip_offsets, base_offsets]) else: # 3D recognizer - ori_clip_len = (self.clip_len - 1) * self.frame_interval + 1 max_offset = max(num_frames - ori_clip_len, 0) if self.twice_sample: num_clips = self.num_clips * 2 @@ -206,14 +215,19 @@ def _get_test_clips(self, num_frames: int) -> np.array: num_clips = self.num_clips if num_clips > 1: num_segments = self.num_clips - 1 - offset_between = max_offset / float(num_segments) - clip_offsets = np.arange(num_clips) * offset_between - clip_offsets = np.round(clip_offsets) + # align test sample strategy with `PySlowFast` repo + if self.target_fps is not None: + offset_between = np.floor(max_offset / float(num_segments)) + clip_offsets = np.arange(num_clips) * offset_between + else: + offset_between = max_offset / float(num_segments) + clip_offsets = np.arange(num_clips) * offset_between + clip_offsets = np.round(clip_offsets) else: clip_offsets = np.array([max_offset // 2]) return clip_offsets - def _sample_clips(self, num_frames: int) -> np.array: + def _sample_clips(self, num_frames: int, ori_clip_len: float) -> np.array: """Choose clip offsets for the video in a given mode. Args: @@ -223,12 +237,29 @@ def _sample_clips(self, num_frames: int) -> np.array: np.ndarray: Sampled frame indices. """ if self.test_mode: - clip_offsets = self._get_test_clips(num_frames) + clip_offsets = self._get_test_clips(num_frames, ori_clip_len) else: - clip_offsets = self._get_train_clips(num_frames) + clip_offsets = self._get_train_clips(num_frames, ori_clip_len) return clip_offsets + def _get_ori_clip_len(self, fps_scale_ratio: float) -> float: + """calculate length of clip segment for different strategy. + + Args: + fps_scale_ratio (float): Scale ratio to adjust fps. + """ + if self.target_fps is not None: + # align test sample strategy with `PySlowFast` repo + ori_clip_len = self.clip_len * self.frame_interval + ori_clip_len = np.maximum(1, ori_clip_len * fps_scale_ratio) + elif self.test_mode: + ori_clip_len = (self.clip_len - 1) * self.frame_interval + 1 + else: + ori_clip_len = self.clip_len * self.frame_interval + + return ori_clip_len + def transform(self, results: dict) -> dict: """Perform the SampleFrames loading. @@ -237,11 +268,23 @@ def transform(self, results: dict) -> dict: to the next transform in pipeline. """ total_frames = results['total_frames'] + # if can't get fps, same value of `fps` and `target_fps` + # will perform nothing + fps = results.get('fps') + if self.target_fps is None or not fps: + fps_scale_ratio = 1.0 + else: + fps_scale_ratio = fps / self.target_fps + ori_clip_len = self._get_ori_clip_len(fps_scale_ratio) + clip_offsets = self._sample_clips(total_frames, ori_clip_len) - clip_offsets = self._sample_clips(total_frames) - frame_inds = clip_offsets[:, None] + np.arange( - self.clip_len)[None, :] * self.frame_interval - frame_inds = np.concatenate(frame_inds) + if self.target_fps: + frame_inds = clip_offsets[:, None] + np.linspace( + 0, ori_clip_len - 1, self.clip_len).astype(np.int32) + else: + frame_inds = clip_offsets[:, None] + np.arange( + self.clip_len)[None, :] * self.frame_interval + frame_inds = np.concatenate(frame_inds) if self.temporal_jitter: perframe_offsets = np.random.randint( @@ -419,35 +462,44 @@ def __repr__(self): class DenseSampleFrames(SampleFrames): """Select frames from the video by dense sample strategy. - Required keys are "filename", added or modified keys are "total_frames", - "frame_inds", "frame_interval" and "num_clips". + Required keys: + + - total_frames + - start_index + + Added keys: + + - frame_inds + - clip_len + - frame_interval + - num_clips Args: clip_len (int): Frames of each sampled output clip. frame_interval (int): Temporal interval of adjacent sampled frames. - Default: 1. - num_clips (int): Number of clips to be sampled. Default: 1. + Defaults to 1. + num_clips (int): Number of clips to be sampled. Defaults to 1. sample_range (int): Total sample range for dense sample. - Default: 64. + Defaults to 64. num_sample_positions (int): Number of sample start positions, Which is - only used in test mode. Default: 10. That is to say, by default, + only used in test mode. Defaults to 10. That is to say, by default, there are at least 10 clips for one input sample in test mode. temporal_jitter (bool): Whether to apply temporal jittering. - Default: False. + Defaults to False. test_mode (bool): Store True when building test or validation dataset. - Default: False. + Defaults to False. """ def __init__(self, *args, - sample_range=64, - num_sample_positions=10, + sample_range: int = 64, + num_sample_positions: int = 10, **kwargs): super().__init__(*args, **kwargs) self.sample_range = sample_range self.num_sample_positions = num_sample_positions - def _get_train_clips(self, num_frames): + def _get_train_clips(self, num_frames: int) -> np.array: """Get clip offsets by dense sample strategy in train mode. It will calculate a sample position and sample interval and set @@ -469,7 +521,7 @@ def _get_train_clips(self, num_frames): clip_offsets = (base_offsets + start_idx) % num_frames return clip_offsets - def _get_test_clips(self, num_frames): + def _get_test_clips(self, num_frames: int) -> np.array: """Get clip offsets by dense sample strategy in test mode. It will calculate a sample position and sample interval and evenly @@ -494,6 +546,61 @@ def _get_test_clips(self, num_frames): clip_offsets = np.array(clip_offsets) return clip_offsets + def _sample_clips(self, num_frames: int) -> np.array: + """Choose clip offsets for the video in a given mode. + + Args: + num_frames (int): Total number of frame in the video. + + Returns: + np.ndarray: Sampled frame indices. + """ + if self.test_mode: + clip_offsets = self._get_test_clips(num_frames) + else: + clip_offsets = self._get_train_clips(num_frames) + + return clip_offsets + + def transform(self, results: dict) -> dict: + """Perform the SampleFrames loading. + + Args: + results (dict): The resulting dict to be modified and passed + to the next transform in pipeline. + """ + total_frames = results['total_frames'] + + clip_offsets = self._sample_clips(total_frames) + frame_inds = clip_offsets[:, None] + np.arange( + self.clip_len)[None, :] * self.frame_interval + frame_inds = np.concatenate(frame_inds) + + if self.temporal_jitter: + perframe_offsets = np.random.randint( + self.frame_interval, size=len(frame_inds)) + frame_inds += perframe_offsets + + frame_inds = frame_inds.reshape((-1, self.clip_len)) + if self.out_of_bound_opt == 'loop': + frame_inds = np.mod(frame_inds, total_frames) + elif self.out_of_bound_opt == 'repeat_last': + safe_inds = frame_inds < total_frames + unsafe_inds = 1 - safe_inds + last_ind = np.max(safe_inds * frame_inds, axis=1) + new_inds = (safe_inds * frame_inds + (unsafe_inds.T * last_ind).T) + frame_inds = new_inds + else: + raise ValueError('Illegal out_of_bound option.') + + start_index = results['start_index'] + frame_inds = np.concatenate(frame_inds) + start_index + results['frame_inds'] = frame_inds.astype(np.int32) + results['clip_len'] = self.clip_len + results['frame_interval'] = self.frame_interval + results['num_clips'] = self.num_clips + return results + def __repr__(self): repr_str = (f'{self.__class__.__name__}(' f'clip_len={self.clip_len}, ' @@ -914,6 +1021,7 @@ def transform(self, results): file_obj = io.BytesIO(self.file_client.get(results['filename'])) container = decord.VideoReader(file_obj, num_threads=self.num_threads) + results['fps'] = container.get_avg_fps() results['video_reader'] = container results['total_frames'] = len(container) return results From 6fdad856ad928679cfdb7726319712a1219120c5 Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Fri, 6 Jan 2023 04:05:38 -0500 Subject: [PATCH 53/57] [Feature] export spatial temporal detection model to onnx (#2148) --- tools/deployment/export_onnx_stdet.py | 192 ++++++++++++++++++++++++++ 1 file changed, 192 insertions(+) create mode 100644 tools/deployment/export_onnx_stdet.py diff --git a/tools/deployment/export_onnx_stdet.py b/tools/deployment/export_onnx_stdet.py new file mode 100644 index 0000000000..39a3b3ead4 --- /dev/null +++ b/tools/deployment/export_onnx_stdet.py @@ -0,0 +1,192 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import argparse + +import onnxruntime +import torch +import torch.nn as nn +from mmdet.structures.bbox import bbox2roi +from mmengine import Config +from mmengine.runner import load_checkpoint + +from mmaction.registry import MODELS +from mmaction.utils import register_all_modules + + +def parse_args(): + parser = argparse.ArgumentParser(description='Get model flops and params') + parser.add_argument('config', help='config file path') + parser.add_argument('checkpoint', help='checkpoint file') + parser.add_argument( + '--num_frames', type=int, default=8, help='number of input frames.') + parser.add_argument( + '--shape', + type=int, + nargs='+', + default=[256, 455], + help='input image size') + parser.add_argument( + '--device', type=str, default='cpu', help='CPU/CUDA device option') + parser.add_argument( + '--output_file', + type=str, + default='stdet.onnx', + help='file name of the output onnx file') + args = parser.parse_args() + return args + + +class SpatialMaxPool3d(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + x = x.max(dim=-1, keepdim=True)[0] + return x.max(dim=-2, keepdim=True)[0] + + +class SpatialAvgPool(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + return x.mean(dim=(-1, -2), keepdims=True) + + +class TemporalMaxPool3d(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + return x.max(dim=-3, keepdim=True)[0] + + +class TemporalAvgPool3d(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, x): + return x.mean(dim=-3, keepdim=True) + + +class GlobalPool2d(nn.Module): + + def __init__(self, pool_size, output_size, later_max=True): + super().__init__() + self.pool = nn.AvgPool2d(pool_size) + self.max = later_max + self.output_size = output_size + + def forward(self, x): + x = self.pool(x) + if self.max: + x = x.max(dim=-1, keepdim=True)[0] + x = x.max(dim=-2, keepdim=True)[0] + else: + x = x.mean(dim=(-1, -2), keepdims=True) + x = x.expand(-1, -1, self.output_size, self.output_size) + return x + + +class STDet(nn.Module): + + def __init__(self, base_model, input_tensor): + super(STDet, self).__init__() + self.backbone = base_model.backbone + self.bbox_roi_extractor = base_model.roi_head.bbox_roi_extractor + self.bbox_head = base_model.roi_head.bbox_head + + output_size = self.bbox_roi_extractor.global_pool.output_size + pool_size = min(input_tensor.shape[-2:]) // 16 // output_size + + if isinstance(self.bbox_head.temporal_pool, nn.AdaptiveAvgPool3d): + self.bbox_head.temporal_pool = TemporalAvgPool3d() + else: + self.bbox_head.temporal_pool = TemporalMaxPool3d() + if isinstance(self.bbox_head.spatial_pool, nn.AdaptiveAvgPool3d): + self.bbox_head.spatial_pool = SpatialAvgPool() + self.bbox_roi_extractor.global_pool = GlobalPool2d( + pool_size, output_size, later_max=False) + else: + self.bbox_head.spatial_pool = SpatialMaxPool3d() + self.bbox_roi_extractor.global_pool = GlobalPool2d( + pool_size, output_size, later_max=True) + + def forward(self, input_tensor, rois): + feat = self.backbone(input_tensor) + bbox_feats, _ = self.bbox_roi_extractor(feat, rois) + cls_score = self.bbox_head(bbox_feats) + return cls_score + + +def main(): + args = parse_args() + register_all_modules() + config = Config.fromfile(args.config) + + base_model = MODELS.build(config.model) + load_checkpoint(base_model, args.checkpoint, map_location='cpu') + base_model.to(args.device) + + if len(args.shape) == 1: + input_shape = (args.shape[0], args.shape[0]) + elif len(args.shape) == 2: + input_shape = tuple(args.shape) + else: + raise ValueError('invalid input shape') + + input_tensor = torch.randn(1, 3, args.num_frames, *input_shape) + input_tensor = input_tensor.clamp(-3, 3).to(args.device) + proposal = torch.Tensor([[22., 59., 67., 157.], [186., 73., 217., 159.], + [407., 95., 431., 168.]]) + + rois = bbox2roi([proposal]).to(args.device) + + model = STDet(base_model, input_tensor).to(args.device) + model.eval() + cls_score = model(input_tensor, rois) + print(f'Model output shape: {cls_score.shape}') + + torch.onnx.export( + model, (input_tensor, rois), + args.output_file, + input_names=['input_tensor', 'rois'], + output_names=['cls_score'], + export_params=False, + do_constant_folding=True, + verbose=True, + opset_version=11, + dynamic_axes={ + 'input_tensor': { + 0: 'batch_size', + 3: 'height', + 4: 'width' + }, + 'rois': { + 0: 'total_num_bbox_for_the_batch' + }, + 'cls_score': { + 0: 'total_num_bbox_for_the_batch' + } + }) + + print(f'Successfully export the onnx file to {args.output_file}') + + # Test exported file + session = onnxruntime.InferenceSession(args.output_file) + input_feed = { + 'input_tensor': input_tensor.cpu().data.numpy(), + 'rois': rois.cpu().data.numpy() + } + outputs = session.run(['cls_score'], input_feed=input_feed) + outputs = outputs[0] + diff = abs(cls_score.cpu().data.numpy() - outputs).max() + if diff < 1e-5: + print('The output difference is smaller than 1e-5.') + + +if __name__ == '__main__': + main() From e881686192eaab885dbbb6abb41c6cbd7cc1e3e9 Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Mon, 9 Jan 2023 03:42:04 -0500 Subject: [PATCH 54/57] [Feature] Support Omni-source training on ImageNet and Kinetics dataset. (#2143) --- ...b16-8x8x1-256e_imagenet-kinetics400-rgb.py | 171 ++++++++++++ mmaction/datasets/transforms/__init__.py | 38 +-- mmaction/datasets/transforms/loading.py | 86 ++++++ mmaction/engine/__init__.py | 1 + mmaction/engine/runner/__init__.py | 4 + mmaction/engine/runner/multi_loop.py | 85 ++++++ mmaction/models/backbones/__init__.py | 12 +- mmaction/models/backbones/resnet_omni.py | 255 ++++++++++++++++++ .../data_preprocessors/data_preprocessor.py | 56 +++- mmaction/models/heads/__init__.py | 7 +- mmaction/models/heads/omni_head.py | 125 +++++++++ mmaction/models/recognizers/__init__.py | 3 +- .../models/recognizers/recognizer_omni.py | 183 +++++++++++++ tests/datasets/transforms/test_loading.py | 42 ++- tests/models/backbones/test_resnet_omni.py | 26 ++ tests/models/heads/test_omni_head.py | 46 ++++ tests/models/recognizers/recognizer_omni.py | 60 +++++ 17 files changed, 1158 insertions(+), 42 deletions(-) create mode 100644 configs/recognition/omnisource/slowonly_r50_16xb16-8x8x1-256e_imagenet-kinetics400-rgb.py create mode 100644 mmaction/engine/runner/__init__.py create mode 100644 mmaction/engine/runner/multi_loop.py create mode 100644 mmaction/models/backbones/resnet_omni.py create mode 100644 mmaction/models/heads/omni_head.py create mode 100644 mmaction/models/recognizers/recognizer_omni.py create mode 100644 tests/models/backbones/test_resnet_omni.py create mode 100644 tests/models/heads/test_omni_head.py create mode 100644 tests/models/recognizers/recognizer_omni.py diff --git a/configs/recognition/omnisource/slowonly_r50_16xb16-8x8x1-256e_imagenet-kinetics400-rgb.py b/configs/recognition/omnisource/slowonly_r50_16xb16-8x8x1-256e_imagenet-kinetics400-rgb.py new file mode 100644 index 0000000000..05feb2710a --- /dev/null +++ b/configs/recognition/omnisource/slowonly_r50_16xb16-8x8x1-256e_imagenet-kinetics400-rgb.py @@ -0,0 +1,171 @@ +_base_ = ['../../_base_/default_runtime.py'] + +# model settings +model = dict( + type='RecognizerOmni', + backbone=dict(type='OmniResNet'), + cls_head=dict( + type='OmniHead', + image_classes=1000, + video_classes=400, + in_channels=2048, + average_clips='prob'), + data_preprocessor=dict( + type='ActionDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + format_shape='MIX2d3d')) + +# dataset settings +image_root = 'data/imagenet/' +image_ann_train = 'meta/train.txt' + +video_root = 'data/kinetics400/videos_train' +video_root_val = 'data/kinetics400/videos_val' +video_ann_train = 'data/kinetics400/kinetics400_train_list_videos.txt' +video_ann_val = 'data/kinetics400/kinetics400_val_list_videos.txt' + +num_images = 1281167 # number of training samples in the ImageNet dataset +num_videos = 240435 # number of training samples in the Kinetics400 dataset +batchsize_video = 16 +num_gpus = 8 +num_iter = num_videos // (batchsize_video * num_gpus) +batchsize_image = num_images // (num_iter * num_gpus) + +train_pipeline = [ + dict(type='DecordInit'), + dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='RandomResizedCrop'), + dict(type='Resize', scale=(224, 224), keep_ratio=False), + dict(type='Flip', flip_ratio=0.5), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +val_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=1, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='CenterCrop', crop_size=224), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +test_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', + clip_len=8, + frame_interval=8, + num_clips=10, + test_mode=True), + dict(type='DecordDecode'), + dict(type='Resize', scale=(-1, 256)), + dict(type='ThreeCrop', crop_size=256), + dict(type='FormatShape', input_format='NCTHW'), + dict(type='PackActionInputs') +] + +train_dataloader = dict( + batch_size=batchsize_video, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='VideoDataset', + ann_file=video_ann_train, + data_prefix=dict(video=video_root), + pipeline=train_pipeline)) + +val_dataloader = dict( + batch_size=16, + num_workers=4, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type='VideoDataset', + ann_file=video_ann_val, + data_prefix=dict(video=video_root_val), + pipeline=val_pipeline, + test_mode=True)) + +test_dataloader = dict( + batch_size=1, + num_workers=8, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type='VideoDataset', + ann_file=video_ann_val, + data_prefix=dict(video=video_root_val), + pipeline=test_pipeline, + test_mode=True)) + +imagenet_pipeline = [ + dict(type='LoadRGBFromFile'), + dict(type='mmcls.RandomResizedCrop', scale=224), + dict(type='mmcls.RandomFlip', prob=0.5, direction='horizontal'), + dict(type='mmcls.PackClsInputs'), +] + +image_dataloader = dict( + batch_size=batchsize_image, + num_workers=8, + dataset=dict( + type='mmcls.ImageNet', + data_root=image_root, + ann_file=image_ann_train, + data_prefix='train', + pipeline=imagenet_pipeline), + sampler=dict(type='DefaultSampler', shuffle=True), +) + +val_evaluator = dict(type='AccMetric') +test_evaluator = val_evaluator + +train_cfg = dict( + type='MultiLoaderEpochBasedTrainLoop', + other_loaders=[image_dataloader], + max_epochs=256, + val_interval=4) + +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# learning policy +param_scheduler = [ + dict( + type='LinearLR', + start_factor=0.1, + by_epoch=True, + begin=0, + end=34, + convert_to_iter_based=True), + dict( + type='CosineAnnealingLR', + T_max=222, + eta_min=0, + by_epoch=True, + begin=34, + end=256, + convert_to_iter_based=True) +] +""" +The learning rate is for total_batch_size = 16 x 16 (num_gpus x batch_size) +If you want to use other batch size or number of GPU settings, please update +the learning rate with the linear scaling rule. +""" +optim_wrapper = dict( + optimizer=dict(type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001), + clip_grad=dict(max_norm=40, norm_type=2)) + +# runtime settings +default_hooks = dict(checkpoint=dict(interval=4, max_keep_ckpts=3)) diff --git a/mmaction/datasets/transforms/__init__.py b/mmaction/datasets/transforms/__init__.py index 8d0648c435..198bd8c781 100644 --- a/mmaction/datasets/transforms/__init__.py +++ b/mmaction/datasets/transforms/__init__.py @@ -6,9 +6,9 @@ DecordInit, DenseSampleFrames, GenerateLocalizationLabels, ImageDecode, LoadAudioFeature, LoadHVULabel, LoadLocalizationFeature, - LoadProposals, OpenCVDecode, OpenCVInit, PIMSDecode, - PIMSInit, PyAVDecode, PyAVDecodeMotionVector, PyAVInit, - RawFrameDecode, SampleAVAFrames, SampleFrames, + LoadProposals, LoadRGBFromFile, OpenCVDecode, OpenCVInit, + PIMSDecode, PIMSInit, PyAVDecode, PyAVDecodeMotionVector, + PyAVInit, RawFrameDecode, SampleAVAFrames, SampleFrames, UniformSample, UntrimmedSampleFrames) from .pose_transforms import (GeneratePoseTarget, GenSkeFeat, JointToBone, LoadKineticsPose, MergeSkeFeat, PadTo, @@ -21,20 +21,20 @@ from .wrappers import ImgAug, PytorchVideoWrapper, TorchVisionWrapper __all__ = [ - 'SampleFrames', 'PyAVDecode', 'DecordDecode', 'DenseSampleFrames', - 'OpenCVDecode', 'MultiScaleCrop', 'RandomResizedCrop', 'RandomCrop', - 'Resize', 'Flip', 'Fuse', 'ThreeCrop', 'CenterCrop', 'TenCrop', - 'Transpose', 'FormatShape', 'GenerateLocalizationLabels', - 'LoadLocalizationFeature', 'LoadProposals', 'DecordInit', 'OpenCVInit', - 'PyAVInit', 'UntrimmedSampleFrames', 'RawFrameDecode', 'DecordInit', - 'OpenCVInit', 'PyAVInit', 'ColorJitter', 'LoadHVULabel', 'SampleAVAFrames', - 'AudioAmplify', 'MelSpectrogram', 'AudioDecode', 'FormatAudioShape', - 'LoadAudioFeature', 'AudioFeatureSelector', 'AudioDecodeInit', - 'ImageDecode', 'BuildPseudoClip', 'RandomRescale', 'PIMSDecode', - 'PyAVDecodeMotionVector', 'UniformSampleFrames', 'PoseDecode', - 'LoadKineticsPose', 'GeneratePoseTarget', 'PIMSInit', 'FormatGCNInput', - 'PadTo', 'ArrayDecode', 'JointToBone', 'PackActionInputs', - 'PackLocalizationInputs', 'ImgAug', 'TorchVisionWrapper', - 'PytorchVideoWrapper', 'PoseCompact', 'PreNormalize3D', 'ToMotion', - 'MergeSkeFeat', 'GenSkeFeat', 'PreNormalize2D', 'UniformSample' + 'ArrayDecode', 'AudioAmplify', 'AudioDecode', 'AudioDecodeInit', + 'AudioFeatureSelector', 'BuildPseudoClip', 'CenterCrop', 'ColorJitter', + 'DecordDecode', 'DecordInit', 'DecordInit', 'DenseSampleFrames', 'Flip', + 'FormatAudioShape', 'FormatGCNInput', 'FormatShape', 'Fuse', 'GenSkeFeat', + 'GenerateLocalizationLabels', 'GeneratePoseTarget', 'ImageDecode', + 'ImgAug', 'JointToBone', 'LoadAudioFeature', 'LoadHVULabel', + 'LoadKineticsPose', 'LoadLocalizationFeature', 'LoadProposals', + 'LoadRGBFromFile', 'MelSpectrogram', 'MergeSkeFeat', 'MultiScaleCrop', + 'OpenCVDecode', 'OpenCVInit', 'OpenCVInit', 'PIMSDecode', 'PIMSInit', + 'PackActionInputs', 'PackLocalizationInputs', 'PadTo', 'PoseCompact', + 'PoseDecode', 'PreNormalize2D', 'PreNormalize3D', 'PyAVDecode', + 'PyAVDecodeMotionVector', 'PyAVInit', 'PyAVInit', 'PytorchVideoWrapper', + 'RandomCrop', 'RandomRescale', 'RandomResizedCrop', 'RawFrameDecode', + 'Resize', 'SampleAVAFrames', 'SampleFrames', 'TenCrop', 'ThreeCrop', + 'ToMotion', 'TorchVisionWrapper', 'Transpose', 'UniformSample', + 'UniformSampleFrames', 'UntrimmedSampleFrames' ] diff --git a/mmaction/datasets/transforms/loading.py b/mmaction/datasets/transforms/loading.py index 523f283058..558579b87f 100644 --- a/mmaction/datasets/transforms/loading.py +++ b/mmaction/datasets/transforms/loading.py @@ -16,6 +16,92 @@ from mmaction.utils import get_random_string, get_shm_dir, get_thread_id +@TRANSFORMS.register_module() +class LoadRGBFromFile(BaseTransform): + """Load a RGB image from file. + + Required Keys: + + - img_path + + Modified Keys: + + - img + - img_shape + - ori_shape + + Args: + to_float32 (bool): Whether to convert the loaded image to a float32 + numpy array. If set to False, the loaded image is an uint8 array. + Defaults to False. + color_type (str): The flag argument for :func:``mmcv.imfrombytes``. + Defaults to 'color'. + imdecode_backend (str): The image decoding backend type. The backend + argument for :func:``mmcv.imfrombytes``. + See :func:``mmcv.imfrombytes`` for details. + Defaults to 'cv2'. + io_backend (str): io backend where frames are store. + Default: 'disk'. + ignore_empty (bool): Whether to allow loading empty image or file path + not existent. Defaults to False. + kwargs (dict): Args for file client. + """ + + def __init__(self, + to_float32: bool = False, + color_type: str = 'color', + imdecode_backend: str = 'cv2', + io_backend: str = 'disk', + ignore_empty: bool = False, + **kwargs) -> None: + self.ignore_empty = ignore_empty + self.to_float32 = to_float32 + self.color_type = color_type + self.imdecode_backend = imdecode_backend + self.file_client = FileClient(io_backend, **kwargs) + self.io_backend = io_backend + + def transform(self, results: dict) -> dict: + """Functions to load image. + + Args: + results (dict): Result dict from :obj:``mmcv.BaseDataset``. + + Returns: + dict: The dict contains loaded image and meta information. + """ + + filename = results['img_path'] + try: + img_bytes = self.file_client.get(filename) + img = mmcv.imfrombytes( + img_bytes, + flag=self.color_type, + channel_order='rgb', + backend=self.imdecode_backend) + except Exception as e: + if self.ignore_empty: + return None + else: + raise e + if self.to_float32: + img = img.astype(np.float32) + + results['img'] = img + results['img_shape'] = img.shape[:2] + results['ori_shape'] = img.shape[:2] + return results + + def __repr__(self): + repr_str = (f'{self.__class__.__name__}(' + f'ignore_empty={self.ignore_empty}, ' + f'to_float32={self.to_float32}, ' + f"color_type='{self.color_type}', " + f"imdecode_backend='{self.imdecode_backend}', " + f"io_backend='{self.io_backend}')") + return repr_str + + @TRANSFORMS.register_module() class LoadHVULabel(BaseTransform): """Convert the HVU label from dictionaries to torch tensors. diff --git a/mmaction/engine/__init__.py b/mmaction/engine/__init__.py index 4d45a40230..be2ad518d1 100644 --- a/mmaction/engine/__init__.py +++ b/mmaction/engine/__init__.py @@ -2,3 +2,4 @@ from .hooks import * # noqa: F401, F403 from .model import * # noqa: F401, F403 from .optimizers import * # noqa: F401, F403 +from .runner import * # noqa: F401, F403 diff --git a/mmaction/engine/runner/__init__.py b/mmaction/engine/runner/__init__.py new file mode 100644 index 0000000000..c7dc511ea8 --- /dev/null +++ b/mmaction/engine/runner/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from .multi_loop import MultiLoaderEpochBasedTrainLoop + +__all__ = ['MultiLoaderEpochBasedTrainLoop'] diff --git a/mmaction/engine/runner/multi_loop.py b/mmaction/engine/runner/multi_loop.py new file mode 100644 index 0000000000..37c7053c41 --- /dev/null +++ b/mmaction/engine/runner/multi_loop.py @@ -0,0 +1,85 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import gc +from typing import Dict, List, Union + +from mmengine.runner import EpochBasedTrainLoop +from torch.utils.data import DataLoader + +from mmaction.registry import LOOPS + + +class EpochMultiLoader: + """Multi loaders based on epoch.""" + + def __init__(self, dataloaders: List[DataLoader]): + self._dataloaders = dataloaders + self.iter_loaders = [iter(loader) for loader in self._dataloaders] + + @property + def num_loaders(self): + """The number of dataloaders.""" + return len(self._dataloaders) + + def __iter__(self): + """Return self when executing __iter__.""" + return self + + def __next__(self): + """Get the next iter's data of multiple loaders.""" + data = tuple([next(loader) for loader in self.iter_loaders]) + return data + + def __len__(self): + """Get the length of loader.""" + return min([len(loader) for loader in self._dataloaders]) + + +@LOOPS.register_module() +class MultiLoaderEpochBasedTrainLoop(EpochBasedTrainLoop): + """EpochBasedTrainLoop with multiple dataloaders. + + Args: + runner (Runner): A reference of runner. + dataloader (Dataloader or Dict): A dataloader object or a dict to + build a dataloader for training the model. + other_loaders (List of Dataloader or Dict): A list of other loaders. + Each item in the list is a dataloader object or a dict to build + a dataloader. + max_epochs (int): Total training epochs. + val_begin (int): The epoch that begins validating. Defaults to 1. + val_interval (int): Validation interval. Defaults to 1. + """ + + def __init__(self, + runner, + dataloader: Union[Dict, DataLoader], + other_loaders: List[Union[Dict, DataLoader]], + max_epochs: int, + val_begin: int = 1, + val_interval: int = 1) -> None: + super().__init__(runner, dataloader, max_epochs, val_begin, + val_interval) + multi_loaders = [self.dataloader] + for loader in other_loaders: + if isinstance(loader, dict): + loader = runner.build_dataloader(loader, seed=runner.seed) + multi_loaders.append(loader) + + self.multi_loaders = multi_loaders + + def run_epoch(self) -> None: + """Iterate one epoch.""" + self.runner.call_hook('before_train_epoch') + self.runner.model.train() + + gc.collect() + for loader in self.multi_loaders: + if hasattr(loader, 'sampler') and hasattr(loader.sampler, + 'set_epoch'): + loader.sampler.set_epoch(self._epoch) + + for idx, data_batch in enumerate(EpochMultiLoader(self.multi_loaders)): + self.run_iter(idx, data_batch) + + self.runner.call_hook('after_train_epoch') + self._epoch += 1 diff --git a/mmaction/models/backbones/__init__.py b/mmaction/models/backbones/__init__.py index 741d652e01..d634099cb6 100644 --- a/mmaction/models/backbones/__init__.py +++ b/mmaction/models/backbones/__init__.py @@ -1,5 +1,4 @@ # Copyright (c) OpenMMLab. All rights reserved. -# from .aagcn import AAGCN from .aagcn import AAGCN from .c2d import C2D from .c3d import C3D @@ -13,6 +12,7 @@ from .resnet3d_slowfast import ResNet3dSlowFast from .resnet3d_slowonly import ResNet3dSlowOnly from .resnet_audio import ResNetAudio +from .resnet_omni import OmniResNet from .resnet_tin import ResNetTIN from .resnet_tsm import ResNetTSM from .stgcn import STGCN @@ -23,9 +23,9 @@ from .x3d import X3D __all__ = [ - 'C2D', 'C3D', 'ResNet', 'ResNet3d', 'ResNetTSM', 'ResNet2Plus1d', - 'ResNet3dSlowFast', 'ResNet3dSlowOnly', 'ResNet3dCSN', 'ResNetTIN', 'X3D', - 'ResNet3dLayer', 'MobileNetV2TSM', 'MobileNetV2', 'TANet', 'TimeSformer', - 'STGCN', 'AAGCN', 'ResNetAudio', 'SwinTransformer3D', 'VisionTransformer', - 'MViT' + 'AAGCN', 'C2D', 'C3D', 'MViT', 'MobileNetV2', 'MobileNetV2TSM', + 'OmniResNet', 'ResNet', 'ResNet2Plus1d', 'ResNet3d', 'ResNet3dCSN', + 'ResNet3dLayer', 'ResNet3dSlowFast', 'ResNet3dSlowOnly', 'ResNetAudio', + 'ResNetTIN', 'ResNetTSM', 'STGCN', 'SwinTransformer3D', 'TANet', + 'TimeSformer', 'VisionTransformer', 'X3D' ] diff --git a/mmaction/models/backbones/resnet_omni.py b/mmaction/models/backbones/resnet_omni.py new file mode 100644 index 0000000000..76fbb1eb4e --- /dev/null +++ b/mmaction/models/backbones/resnet_omni.py @@ -0,0 +1,255 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import List, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from mmengine.model import BaseModel, BaseModule +from mmengine.runner import CheckpointLoader + +from mmaction.registry import MODELS +from mmaction.utils import OptConfigType + + +def batch_norm(inputs: torch.Tensor, + module: nn.modules.batchnorm, + training: Optional[bool] = None) -> torch.Tensor: + """Applies Batch Normalization for each channel across a batch of data + using params from the given batch normalization module. + + Args: + inputs (Tensor): The input data. + module (nn.modules.batchnorm): a batch normalization module. Will use + params from this batch normalization module to do the operation. + training (bool, optional): if true, apply the train mode batch + normalization. Defaults to None and will use the training mode of + the module. + """ + if training is None: + training = module.training + return F.batch_norm( + input=inputs, + running_mean=None if training else module.running_mean, + running_var=None if training else module.running_var, + weight=module.weight, + bias=module.bias, + training=training, + momentum=module.momentum, + eps=module.eps) + + +class BottleNeck(BaseModule): + """Building block for Omni-ResNet. + + Args: + inplanes (int): Number of channels for the input in first conv layer. + planes (int): Number of channels for the input in second conv layer. + temporal_kernel (int): Temporal kernel in the conv layer. Should be + either 1 or 3. Defaults to 1. + spatial_stride (int): Spatial stride in the conv layer. Defaults to 1. + init_cfg (dict or ConfigDict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + inplanes: int, + planes: int, + temporal_kernel: int = 3, + spatial_stride: int = 1, + init_cfg: OptConfigType = None, + **kwargs) -> None: + super(BottleNeck, self).__init__(init_cfg=init_cfg) + assert temporal_kernel in [1, 3] + + self.conv1 = nn.Conv3d( + inplanes, + planes, + kernel_size=(temporal_kernel, 1, 1), + padding=(temporal_kernel // 2, 0, 0), + bias=False) + self.conv2 = nn.Conv3d( + planes, + planes, + stride=(1, spatial_stride, spatial_stride), + kernel_size=(1, 3, 3), + padding=(0, 1, 1), + bias=False) + + self.conv3 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False) + + self.bn1 = nn.BatchNorm3d(planes, momentum=0.01) + self.bn2 = nn.BatchNorm3d(planes, momentum=0.01) + self.bn3 = nn.BatchNorm3d(planes * 4, momentum=0.01) + + if inplanes != planes * 4 or spatial_stride != 1: + downsample = [ + nn.Conv3d( + inplanes, + planes * 4, + kernel_size=1, + stride=(1, spatial_stride, spatial_stride), + bias=False), + nn.BatchNorm3d(planes * 4, momentum=0.01) + ] + self.downsample = nn.Sequential(*downsample) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Defines the computation performed at every call. + + Accept both 3D (BCTHW for videos) and 2D (BCHW for images) tensors. + """ + if x.ndim == 4: + return self.forward_2d(x) + + # Forward call for 3D tensors. + out = self.conv1(x) + out = self.bn1(out).relu_() + + out = self.conv2(out) + out = self.bn2(out).relu_() + + out = self.conv3(out) + out = self.bn3(out) + + if hasattr(self, 'downsample'): + x = self.downsample(x) + + return out.add_(x).relu_() + + def forward_2d(self, x: torch.Tensor) -> torch.Tensor: + """Forward call for 2D tensors.""" + out = F.conv2d(x, self.conv1.weight.sum(2)) + out = batch_norm(out, self.bn1).relu_() + + out = F.conv2d( + out, + self.conv2.weight.squeeze(2), + stride=self.conv2.stride[-1], + padding=1) + out = batch_norm(out, self.bn2).relu_() + + out = F.conv2d(out, self.conv3.weight.squeeze(2)) + out = batch_norm(out, self.bn3) + + if hasattr(self, 'downsample'): + x = F.conv2d( + x, + self.downsample[0].weight.squeeze(2), + stride=self.downsample[0].stride[-1]) + x = batch_norm(x, self.downsample[1]) + + return out.add_(x).relu_() + + +@MODELS.register_module() +class OmniResNet(BaseModel): + """Omni-ResNet that accepts both image and video inputs. + + Args: + layers (List[int]): number of layers in each residual stages. Defaults + to [3, 4, 6, 3]. + pretrain_2d (str, optional): path to the 2D pretraining checkpoints. + Defaults to None. + init_cfg (dict or ConfigDict, optional): The Config for initialization. + Defaults to None. + """ + + def __init__(self, + layers: List[int] = [3, 4, 6, 3], + pretrain_2d: Optional[str] = None, + init_cfg: OptConfigType = None) -> None: + super(OmniResNet, self).__init__(init_cfg=init_cfg) + + self.inplanes = 64 + self.conv1 = nn.Conv3d( + 3, + self.inplanes, + kernel_size=(1, 7, 7), + stride=(1, 2, 2), + padding=(0, 3, 3), + bias=False) + self.bn1 = nn.BatchNorm3d(self.inplanes, momentum=0.01) + + self.pool3d = nn.MaxPool3d((1, 3, 3), (1, 2, 2), (0, 1, 1)) + self.pool2d = nn.MaxPool2d(3, 2, 1) + + self.temporal_kernel = 1 + self.layer1 = self._make_layer(64, layers[0]) + self.layer2 = self._make_layer(128, layers[1], stride=2) + self.temporal_kernel = 3 + self.layer3 = self._make_layer(256, layers[2], stride=2) + self.layer4 = self._make_layer(512, layers[3], stride=2) + + if pretrain_2d is not None: + self.init_from_2d(pretrain_2d) + + def _make_layer(self, + planes: int, + num_blocks: int, + stride: int = 1) -> nn.Module: + layers = [ + BottleNeck( + self.inplanes, + planes, + spatial_stride=stride, + temporal_kernel=self.temporal_kernel) + ] + self.inplanes = planes * 4 + for _ in range(1, num_blocks): + layers.append( + BottleNeck( + self.inplanes, + planes, + temporal_kernel=self.temporal_kernel)) + return nn.Sequential(*layers) + + def init_from_2d(self, pretrain: str) -> None: + param2d = CheckpointLoader.load_checkpoint( + pretrain, map_location='cpu') + param3d = self.state_dict() + for key in param3d: + if key in param2d: + weight = param2d[key] + if weight.ndim == 4: + t = param3d[key].shape[2] + weight = weight.unsqueeze(2) + weight = weight.expand(-1, -1, t, -1, -1) + weight = weight / t + param3d[key] = weight + self.load_state_dict(param3d) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Defines the computation performed at every call. + + Accept both 3D (BCTHW for videos) and 2D (BCHW for images) tensors. + """ + if x.ndim == 4: + return self.forward_2d(x) + + # Forward call for 3D tensors. + x = self.conv1(x) + x = self.bn1(x).relu_() + x = self.pool3d(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + return x + + def forward_2d(self, x: torch.Tensor) -> torch.Tensor: + """Forward call for 2D tensors.""" + x = F.conv2d( + x, + self.conv1.weight.squeeze(2), + stride=self.conv1.stride[-1], + padding=self.conv1.padding[-1]) + x = batch_norm(x, self.bn1).relu_() + x = self.pool2d(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + return x diff --git a/mmaction/models/data_preprocessors/data_preprocessor.py b/mmaction/models/data_preprocessors/data_preprocessor.py index e41f32be74..d2641bb6ab 100644 --- a/mmaction/models/data_preprocessors/data_preprocessor.py +++ b/mmaction/models/data_preprocessors/data_preprocessor.py @@ -1,5 +1,5 @@ # Copyright (c) OpenMMLab. All rights reserved. -from typing import Optional, Sequence, Union +from typing import Optional, Sequence, Tuple, Union import torch from mmengine.model import BaseDataPreprocessor, stack_batch @@ -49,7 +49,7 @@ def __init__(self, self._enable_normalize = True if self.format_shape == 'NCHW': normalizer_shape = (-1, 1, 1) - elif self.format_shape == 'NCTHW' or self.format_shape == 'NCTVM': + elif self.format_shape in ['NCTHW', 'NCTVM', 'MIX2d3d']: normalizer_shape = (-1, 1, 1, 1) else: raise ValueError(f'Invalid format shape: {format_shape}') @@ -70,16 +70,41 @@ def __init__(self, else: self.blending = None - def forward(self, data: Sequence[dict], training: bool = False) -> tuple: + def forward(self, + data: Union[dict, Tuple[dict]], + training: bool = False) -> Union[dict, Tuple[dict]]: """Perform normalization, padding, bgr2rgb conversion and batch augmentation based on ``BaseDataPreprocessor``. Args: - data (Sequence[dict]): data sampled from dataloader. + data (dict or Tuple[dict]): data sampled from dataloader. training (bool): Whether to enable training time augmentation. Returns: - Tuple[torch.Tensor, list]: Data in the same format as the model + dict or Tuple[dict]: Data in the same format as the model + input. + """ + if isinstance(data, dict): + return self.forward_onesample(data, training) + elif isinstance(data, tuple): + outputs = [] + for data_sample in data: + output = self.forward_onesample(data_sample, training) + outputs.append(output) + return tuple(outputs) + else: + raise TypeError('Unsupported data type for `data`!') + + def forward_onesample(self, data: dict, training: bool = False) -> dict: + """Perform normalization, padding, bgr2rgb conversion and batch + augmentation on one data sample. + + Args: + data (dict): data sampled from dataloader. + training (bool): Whether to enable training time augmentation. + + Returns: + dict: Data in the same format as the model input. """ data = self.cast_data(data) @@ -89,18 +114,31 @@ def forward(self, data: Sequence[dict], training: bool = False) -> tuple: batch_inputs = stack_batch(inputs, self.pad_size_divisor, self.pad_value) + if self.format_shape == 'MIX2d3d': + if batch_inputs.ndim == 4: + format_shape, view_shape = 'NCHW', (-1, 1, 1) + else: + format_shape, view_shape = 'NCTHW', None + else: + format_shape, view_shape = self.format_shape, None + # ------ To RGB ------ if self.to_rgb: - if self.format_shape == 'NCHW': + if format_shape == 'NCHW': batch_inputs = batch_inputs[..., [2, 1, 0], :, :] - elif self.format_shape == 'NCTHW': + elif format_shape == 'NCTHW': batch_inputs = batch_inputs[..., [2, 1, 0], :, :, :] else: - raise ValueError(f'Invalid format shape: {self.format_shape}') + raise ValueError(f'Invalid format shape: {format_shape}') # -- Normalization --- if self._enable_normalize: - batch_inputs = (batch_inputs - self.mean) / self.std + if view_shape is None: + batch_inputs = (batch_inputs - self.mean) / self.std + else: + mean = self.mean.view(view_shape) + std = self.std.view(view_shape) + batch_inputs = (batch_inputs - mean) / std else: batch_inputs = batch_inputs.to(torch.float32) diff --git a/mmaction/models/heads/__init__.py b/mmaction/models/heads/__init__.py index 9890d5aa5a..964f7b45e4 100644 --- a/mmaction/models/heads/__init__.py +++ b/mmaction/models/heads/__init__.py @@ -3,6 +3,7 @@ from .gcn_head import GCNHead from .i3d_head import I3DHead from .mvit_head import MViTHead +from .omni_head import OmniHead from .slowfast_head import SlowFastHead from .timesformer_head import TimeSformerHead from .tpn_head import TPNHead @@ -13,7 +14,7 @@ from .x3d_head import X3DHead __all__ = [ - 'TSNHead', 'I3DHead', 'BaseHead', 'TSMHead', 'SlowFastHead', 'TPNHead', - 'X3DHead', 'TRNHead', 'TimeSformerHead', 'GCNHead', 'TSNAudioHead', - 'MViTHead' + 'BaseHead', 'GCNHead', 'I3DHead', 'MViTHead', 'OmniHead', 'SlowFastHead', + 'TPNHead', 'TRNHead', 'TSMHead', 'TSNAudioHead', 'TSNHead', + 'TimeSformerHead', 'X3DHead' ] diff --git a/mmaction/models/heads/omni_head.py b/mmaction/models/heads/omni_head.py new file mode 100644 index 0000000000..f5084dde06 --- /dev/null +++ b/mmaction/models/heads/omni_head.py @@ -0,0 +1,125 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple, Union + +import torch +import torch.nn.functional as F +from torch import Tensor, nn + +from mmaction.evaluation import top_k_accuracy +from mmaction.registry import MODELS +from mmaction.utils import ConfigType, SampleList +from .base import BaseHead + + +@MODELS.register_module() +class OmniHead(BaseHead): + """Classification head for OmniResNet that accepts both image and video + inputs. + + Args: + image_classes (int): Number of image classes to be classified. + video_classes (int): Number of video classes to be classified. + in_channels (int): Number of channels in input feature. + loss_cls (dict or ConfigDict): Config for building loss. + Default: dict(type='CrossEntropyLoss') + image_dropout_ratio (float): Probability of dropout layer for the image + head. Defaults to 0.2. + video_dropout_ratio (float): Probability of dropout layer for the video + head. Defaults to 0.5. + video_nl_head (bool): if true, use a non-linear head for the video + head. Defaults to True. + """ + + def __init__(self, + image_classes: int, + video_classes: int, + in_channels: int, + loss_cls: ConfigType = dict(type='CrossEntropyLoss'), + image_dropout_ratio: float = 0.2, + video_dropout_ratio: float = 0.5, + video_nl_head: bool = True, + **kwargs) -> None: + super().__init__(image_classes, in_channels, loss_cls, **kwargs) + + self.fc2d = nn.Sequential( + nn.AdaptiveAvgPool2d(1), nn.Flatten(), nn.BatchNorm1d(in_channels), + nn.Dropout(image_dropout_ratio), + nn.Linear(in_channels, image_classes)) + + if video_nl_head: + self.fc3d = nn.Sequential( + nn.AdaptiveAvgPool3d(1), nn.Flatten(), + nn.Linear(in_channels, video_classes * 2), + nn.BatchNorm1d(video_classes * 2), nn.ReLU(inplace=True), + nn.Dropout(video_dropout_ratio), + nn.Linear(video_classes * 2, video_classes)) + else: + self.fc3d = nn.Sequential( + nn.AdaptiveAvgPool3d(1), nn.Flatten(), + nn.BatchNorm1d(in_channels), nn.Dropout(video_dropout_ratio), + nn.Linear(in_channels, video_classes)) + + def forward(self, x: Tensor, **kwargs) -> Tensor: + """Defines the computation performed at every call. + + Args: + x (Tensor): The input data. + + Returns: + Tensor: The classification scores for input samples. + """ + if len(x.shape) == 4: + cls_score = self.fc2d(x) + else: + cls_score = self.fc3d(x) + return cls_score + + def loss_by_feat(self, cls_scores: Union[Tensor, Tuple[Tensor]], + data_samples: SampleList) -> dict: + """Calculate the loss based on the features extracted by the head. + + Args: + cls_scores (Tensor): Classification prediction results of + all class, has shape (batch_size, num_classes). + data_samples (List[:obj:`ActionDataSample`]): The batch + data samples. + + Returns: + dict: A dictionary of loss components. + """ + if hasattr(data_samples[0], 'gt_labels'): + labels = [x.gt_labels.item for x in data_samples] + else: + labels = [x.gt_label.label for x in data_samples] + labels = torch.stack(labels).to(cls_scores.device) + labels = labels.squeeze() + + losses = dict() + if labels.shape == torch.Size([]): + labels = labels.unsqueeze(0) + elif labels.dim() == 1 and cls_scores.size()[0] == 1: + # Fix a bug when training with soft labels and batch size is 1. + # When using soft labels, `labels` and `cls_socre` share the same + # shape. + labels = labels.unsqueeze(0) + + if cls_scores.size() != labels.size(): + top_k_acc = top_k_accuracy(cls_scores.detach().cpu().numpy(), + labels.detach().cpu().numpy(), + self.topk) + for k, a in zip(self.topk, top_k_acc): + losses[f'top{k}_acc'] = torch.tensor( + a, device=cls_scores.device) + if self.label_smooth_eps != 0: + if cls_scores.size() != labels.size(): + labels = F.one_hot(labels, num_classes=self.num_classes) + labels = ((1 - self.label_smooth_eps) * labels + + self.label_smooth_eps / self.num_classes) + + loss_cls = self.loss_cls(cls_scores, labels) + # loss_cls may be dictionary or single tensor + if isinstance(loss_cls, dict): + losses.update(loss_cls) + else: + losses['loss_cls'] = loss_cls + return losses diff --git a/mmaction/models/recognizers/__init__.py b/mmaction/models/recognizers/__init__.py index 61242b7962..1b7db21451 100644 --- a/mmaction/models/recognizers/__init__.py +++ b/mmaction/models/recognizers/__init__.py @@ -4,8 +4,9 @@ from .recognizer3d import Recognizer3D from .recognizer_audio import RecognizerAudio from .recognizer_gcn import RecognizerGCN +from .recognizer_omni import RecognizerOmni __all__ = [ 'BaseRecognizer', 'RecognizerGCN', 'Recognizer2D', 'Recognizer3D', - 'RecognizerAudio' + 'RecognizerAudio', 'RecognizerOmni' ] diff --git a/mmaction/models/recognizers/recognizer_omni.py b/mmaction/models/recognizers/recognizer_omni.py new file mode 100644 index 0000000000..69c6c8de81 --- /dev/null +++ b/mmaction/models/recognizers/recognizer_omni.py @@ -0,0 +1,183 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Dict, Sequence, Union + +import torch +from mmengine.model import BaseModel + +from mmaction.registry import MODELS +from mmaction.utils import ConfigType, ForwardResults, SampleList + + +@MODELS.register_module() +class RecognizerOmni(BaseModel): + """An Omni-souce recognizer model framework for joint-training of image and + video recognition tasks. + + The `backbone` and `cls_head` should be able to accept both images and + videos as inputs. + """ + + def __init__(self, backbone: ConfigType, cls_head: ConfigType, + data_preprocessor: ConfigType) -> None: + super().__init__(data_preprocessor=data_preprocessor) + self.backbone = MODELS.build(backbone) + self.cls_head = MODELS.build(cls_head) + + def forward(self, *data_samples, mode: str, **kwargs) -> ForwardResults: + """The unified entry for a forward process in both training and test. + + The method should accept three modes: + + - ``tensor``: Forward the whole network and return tensor or tuple of + tensor without any post-processing, same as a common nn.Module. + - ``predict``: Forward and return the predictions, which are fully + processed to a list of :obj:`ActionDataSample`. + - ``loss``: Forward and return a dict of losses according to the given + inputs and data samples. + + Note that this method doesn't handle neither back propagation nor + optimizer updating, which are done in the :meth:`train_step`. + + Args: + data_samples: should be a sequence of ``SampleList`` if + ``mode="predict"`` or ``mode="loss"``. Each ``SampleList`` is + the annotation data of one data source. + It should be a single torch tensor if ``mode="tensor"``. + mode (str): Return what kind of value. Defaults to ``tensor``. + + Returns: + The return type depends on ``mode``. + + - If ``mode="tensor"``, return a tensor or a tuple of tensor. + - If ``mode="predict"``, return a list of ``ActionDataSample``. + - If ``mode="loss"``, return a dict of tensor. + """ + + if mode == 'loss' or mode == 'predict': + if mode == 'loss': + return self.loss(data_samples) + return self.predict(data_samples) + + elif mode == 'tensor': + + assert isinstance(data_samples, torch.Tensor) + + data_ndim = data_samples.ndim + if data_ndim not in [4, 5]: + info = f'Input is a {data_ndim}D tensor. ' + info += 'Only 4D (BCHW) or 5D (BCTHW) tensors are supported!' + raise ValueError(info) + + return self._forward(data_samples, **kwargs) + + def loss(self, data_samples: Sequence[SampleList]) -> dict: + """Calculate losses from a batch of inputs and data samples. + + Args: + data_samples (Sequence[SampleList]): a sequence of SampleList. Each + SampleList contains data samples from the same data source. + + Returns: + dict: A dictionary of loss components. + """ + loss_dict = {} + for idx, data in enumerate(data_samples): + inputs, data_samples = data['inputs'], data['data_samples'] + feats = self.extract_feat(inputs) + loss_cls = self.cls_head.loss(feats, data_samples) + for key in loss_cls: + loss_dict[key + f'_{idx}'] = loss_cls[key] + return loss_dict + + def predict(self, data_samples: Sequence[SampleList]) -> SampleList: + """Predict results from a batch of inputs and data samples with post- + processing. + + Args: + data_samples (Sequence[SampleList]): a sequence of SampleList. Each + SampleList contains data samples from the same data source. + + Returns: + List[``ActionDataSample``]: Return the recognition results. + The returns value is ``ActionDataSample``, which usually contains + ``pred_scores``. And the ``pred_scores`` usually contains + following keys. + + - item (torch.Tensor): Classification scores, has a shape + (num_classes, ) + """ + assert len(data_samples) == 1 + feats = self.extract_feat(data_samples[0]['inputs'], test_mode=True) + predictions = self.cls_head.predict(feats, + data_samples[0]['data_samples']) + return predictions + + def _forward(self, + inputs: torch.Tensor, + stage: str = 'backbone', + **kwargs) -> ForwardResults: + """Network forward process. Usually includes backbone, neck and head + forward without any post-processing. + + Args: + inputs (torch.Tensor): Raw Inputs of the recognizer. + stage (str): Which stage to output the features. + + Returns: + Union[tuple, torch.Tensor]: Features from ``backbone`` or ``head`` + forward. + """ + feats, _ = self.extract_feat(inputs, stage=stage) + return feats + + def _run_forward(self, data: Union[dict, tuple, list], + mode: str) -> Union[Dict[str, torch.Tensor], list]: + """Unpacks data for :meth:`forward` + Args: + data (dict or tuple or list): Data sampled from dataset. + mode (str): Mode of forward. + Returns: + dict or list: Results of training or testing mode. + """ + if isinstance(data, dict): + data = [data] + results = self(*data, mode=mode) + elif isinstance(data, (list, tuple)): + results = self(*data, mode=mode) + else: + raise TypeError + return results + + def extract_feat(self, + inputs: torch.Tensor, + stage: str = 'backbone', + test_mode: bool = False) -> tuple: + """Extract features of different stages. + + Args: + inputs (torch.Tensor): The input data. + stage (str): Which stage to output the feature. + Defaults to ``'backbone'``. + test_mode (bool): Whether in test mode. Defaults to False. + + Returns: + torch.Tensor: The extracted features. + dict: A dict recording the kwargs for downstream + pipeline. These keys are usually included: + ``loss_aux``. + """ + + if len(inputs.shape) == 6: + inputs = inputs.view((-1, ) + inputs.shape[2:]) + + # Check settings of test + if test_mode: + x = self.backbone(inputs) + return x + else: + # Return features extracted through backbone + x = self.backbone(inputs) + if stage == 'backbone': + return x + x = self.cls_head(x) + return x diff --git a/tests/datasets/transforms/test_loading.py b/tests/datasets/transforms/test_loading.py index 4954611b1c..5413475a92 100644 --- a/tests/datasets/transforms/test_loading.py +++ b/tests/datasets/transforms/test_loading.py @@ -15,10 +15,12 @@ GenerateLocalizationLabels, LoadAudioFeature, LoadHVULabel, LoadLocalizationFeature, - LoadProposals, OpenCVDecode, - OpenCVInit, PIMSDecode, PIMSInit, - PyAVDecode, PyAVDecodeMotionVector, - PyAVInit, RawFrameDecode) + LoadProposals, LoadRGBFromFile, + OpenCVDecode, OpenCVInit, PIMSDecode, + PIMSInit, PyAVDecode, + PyAVDecodeMotionVector, PyAVInit) + +from mmaction.datasets.transforms import RawFrameDecode # isort:skip class BaseTestLoading: @@ -747,3 +749,35 @@ def test_generate_localization_label(self): assert_array_almost_equal( generate_localization_labels_result['gt_bbox'], [[0.375, 0.625]], decimal=4) + + +class TestLoadImageFromFile: + + def test_load_img(self): + data_prefix = osp.join(osp.dirname(__file__), '../../data') + + results = dict(img_path=osp.join(data_prefix, 'test.jpg')) + transform = LoadRGBFromFile() + results = transform(copy.deepcopy(results)) + assert results['img_path'] == osp.join(data_prefix, 'test.jpg') + assert results['img'].shape == (240, 320, 3) + assert results['img'].dtype == np.uint8 + assert results['img_shape'] == (240, 320) + assert results['ori_shape'] == (240, 320) + assert repr(transform) == transform.__class__.__name__ + \ + "(ignore_empty=False, to_float32=False, color_type='color', " + \ + "imdecode_backend='cv2', io_backend='disk')" + + # to_float32 + transform = LoadRGBFromFile(to_float32=True) + results = transform(copy.deepcopy(results)) + assert results['img'].dtype == np.float32 + + # test load empty + fake_img_path = osp.join(data_prefix, 'fake.jpg') + results['img_path'] = fake_img_path + transform = LoadRGBFromFile(ignore_empty=False) + with pytest.raises(FileNotFoundError): + transform(copy.deepcopy(results)) + transform = LoadRGBFromFile(ignore_empty=True) + assert transform(copy.deepcopy(results)) is None diff --git a/tests/models/backbones/test_resnet_omni.py b/tests/models/backbones/test_resnet_omni.py new file mode 100644 index 0000000000..2ddbadc73b --- /dev/null +++ b/tests/models/backbones/test_resnet_omni.py @@ -0,0 +1,26 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch +import torchvision + +from mmaction.models import OmniResNet +from mmaction.testing import generate_backbone_demo_inputs + + +def test_x3d_backbone(): + """Test x3d backbone.""" + _ = OmniResNet() + + resnet50 = torchvision.models.resnet50() + params = resnet50.state_dict() + torch.save(params, './r50.pth') + model = OmniResNet(pretrain_2d='./r50.pth') + + input_shape = (2, 3, 8, 64, 64) + videos = generate_backbone_demo_inputs(input_shape) + feat = model(videos) + assert feat.shape == torch.Size([2, 2048, 8, 2, 2]) + + input_shape = (2, 3, 64, 64) + images = generate_backbone_demo_inputs(input_shape) + feat = model(images) + assert feat.shape == torch.Size([2, 2048, 2, 2]) diff --git a/tests/models/heads/test_omni_head.py b/tests/models/heads/test_omni_head.py new file mode 100644 index 0000000000..f9181893af --- /dev/null +++ b/tests/models/heads/test_omni_head.py @@ -0,0 +1,46 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import torch + +from mmaction.models import OmniHead + + +class obj(): + + def __init__(self, name, value): + super(obj, self).__init__() + setattr(self, name, value) + + +def testOmniHead(): + head = OmniHead(image_classes=100, video_classes=200, in_channels=400) + + image_feat = torch.randn(2, 400, 8, 8) + image_score = head(image_feat) + assert image_score.shape == torch.Size([2, 100]) + + video_feat = torch.randn(2, 400, 8, 8, 8) + video_score = head(video_feat) + assert video_score.shape == torch.Size([2, 200]) + + head = OmniHead( + image_classes=100, + video_classes=200, + in_channels=400, + video_nl_head=True) + + video_feat = torch.randn(2, 400, 8, 8, 8) + video_score = head(video_feat) + assert video_score.shape == torch.Size([2, 200]) + data_samples = [ + obj('gt_label', obj('label', torch.tensor(1))) for _ in range(2) + ] + losses = head.loss_by_feat(video_score, data_samples) + assert 'loss_cls' in losses + + image_feat = torch.randn(1, 400, 8, 8) + head.eval() + image_score = head(image_feat) + assert image_score.shape == torch.Size([1, 100]) + data_samples = [obj('gt_labels', obj('item', torch.tensor(1)))] + losses = head.loss_by_feat(image_score, data_samples) + assert 'loss_cls' in losses diff --git a/tests/models/recognizers/recognizer_omni.py b/tests/models/recognizers/recognizer_omni.py new file mode 100644 index 0000000000..23c58748de --- /dev/null +++ b/tests/models/recognizers/recognizer_omni.py @@ -0,0 +1,60 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from unittest.mock import MagicMock + +import torch + +from mmaction.registry import MODELS +from mmaction.structures import ActionDataSample +from mmaction.testing import get_recognizer_cfg +from mmaction.utils import register_all_modules + + +def test_omni_resnet(): + register_all_modules() + config = get_recognizer_cfg( + 'omnisource/slowonly_r50_16xb16-8x8x1-256e_imagenet-kinetics400-rgb.py' + ) + recognizer = MODELS.build(config.model) + + # test train_step + + video_sample = { + 'inputs': [ + torch.randint(0, 255, (1, 3, 8, 224, 224)), + torch.randint(0, 255, (1, 3, 8, 224, 224)) + ], + 'data_samples': [ + ActionDataSample().set_gt_labels(2), + ActionDataSample().set_gt_labels(2) + ] + } + + image_sample = { + 'inputs': [ + torch.randint(0, 255, (1, 3, 224, 224)), + torch.randint(0, 255, (1, 3, 224, 224)) + ], + 'data_samples': [ + ActionDataSample().set_gt_labels(2), + ActionDataSample().set_gt_labels(2) + ] + } + + optim_wrapper = MagicMock() + loss_vars = recognizer.train_step([video_sample, image_sample], + optim_wrapper) + assert 'loss_cls_0' in loss_vars + assert 'loss_cls_1' in loss_vars + + loss_vars = recognizer.train_step([image_sample, video_sample], + optim_wrapper) + assert 'loss_cls_0' in loss_vars + assert 'loss_cls_1' in loss_vars + + # test test_step + with torch.no_grad(): + predictions = recognizer.test_step(video_sample) + score = predictions[0].pred_scores.item + assert len(predictions) == 1 + assert torch.min(score) >= 0 + assert torch.max(score) <= 1 From baa264bb687d4bf8c440b9a8b86876ceea482ee6 Mon Sep 17 00:00:00 2001 From: Kai Hu Date: Tue, 10 Jan 2023 04:35:49 -0500 Subject: [PATCH 55/57] [Feature] support repeat_aug (#2170) --- mmaction/datasets/__init__.py | 12 +- mmaction/datasets/repeat_aug_dataset.py | 145 +++++++++++++++++++++++ tests/datasets/test_repeataug_dataset.py | 77 ++++++++++++ 3 files changed, 226 insertions(+), 8 deletions(-) create mode 100644 mmaction/datasets/repeat_aug_dataset.py create mode 100644 tests/datasets/test_repeataug_dataset.py diff --git a/mmaction/datasets/__init__.py b/mmaction/datasets/__init__.py index 9b933f98cf..0b34a72fc0 100644 --- a/mmaction/datasets/__init__.py +++ b/mmaction/datasets/__init__.py @@ -5,16 +5,12 @@ from .base import BaseActionDataset from .pose_dataset import PoseDataset from .rawframe_dataset import RawframeDataset +from .repeat_aug_dataset import RepeatAugDataset, repeat_pseudo_collate from .transforms import * # noqa: F401, F403 from .video_dataset import VideoDataset __all__ = [ - 'VideoDataset', - 'RawframeDataset', - 'AVADataset', - 'AVAKineticsDataset', - 'PoseDataset', - 'BaseActionDataset', - 'ActivityNetDataset', - 'AudioDataset', + 'AVADataset', 'AVAKineticsDataset', 'ActivityNetDataset', 'AudioDataset', + 'BaseActionDataset', 'PoseDataset', 'RawframeDataset', 'RepeatAugDataset', + 'VideoDataset', 'repeat_pseudo_collate' ] diff --git a/mmaction/datasets/repeat_aug_dataset.py b/mmaction/datasets/repeat_aug_dataset.py new file mode 100644 index 0000000000..47f517a916 --- /dev/null +++ b/mmaction/datasets/repeat_aug_dataset.py @@ -0,0 +1,145 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from copy import deepcopy +from typing import Any, Callable, List, Optional, Sequence, Union + +import numpy as np +from mmengine.dataset import COLLATE_FUNCTIONS, pseudo_collate + +from mmaction.registry import DATASETS +from mmaction.utils import ConfigType +from .video_dataset import VideoDataset + + +def get_type(transform: Union[dict, Callable]) -> str: + """get the type of the transform.""" + if isinstance(transform, dict) and 'type' in transform: + return transform['type'] + elif callable(transform): + return transform.__repr__().split('(')[0] + else: + raise TypeError + + +@DATASETS.register_module() +class RepeatAugDataset(VideoDataset): + """Video dataset for action recognition. + + The dataset loads raw videos and apply specified transforms to return a + dict containing the frame tensors and other information. + + The ann_file is a text file with multiple lines, and each line indicates + a sample video with the filepath and label, which are split with a + whitespace. Example of a annotation file: + + .. code-block:: txt + + some/path/000.mp4 1 + some/path/001.mp4 1 + some/path/002.mp4 2 + some/path/003.mp4 2 + some/path/004.mp4 3 + some/path/005.mp4 3 + + + Args: + ann_file (str): Path to the annotation file. + pipeline (List[Union[dict, ConfigDict, Callable]]): A sequence of + data transforms. + data_prefix (dict or ConfigDict): Path to a directory where videos + are held. Defaults to ``dict(video='')``. + multi_class (bool): Determines whether the dataset is a multi-class + dataset. Defaults to False. + num_classes (int, optional): Number of classes of the dataset, used in + multi-class datasets. Defaults to None. + start_index (int): Specify a start index for frames in consideration of + different filename format. However, when taking videos as input, + it should be set to 0, since frames loaded from videos count + from 0. Defaults to 0. + modality (str): Modality of data. Support ``RGB``, ``Flow``. + Defaults to ``RGB``. + test_mode (bool): Store True when building test or validation dataset. + Defaults to False. + """ + + def __init__(self, + ann_file: str, + pipeline: List[Union[dict, Callable]], + data_prefix: ConfigType = dict(video=''), + num_repeats: int = 4, + multi_class: bool = False, + num_classes: Optional[int] = None, + start_index: int = 0, + modality: str = 'RGB', + **kwargs) -> None: + + use_decord = get_type(pipeline[0]) == 'DecordInit' and \ + get_type(pipeline[2]) == 'DecordDecode' + + assert use_decord, ( + 'RepeatAugDataset requires decord as the video ' + 'loading backend, will support more backends in the ' + 'future') + + super().__init__( + ann_file, + pipeline=pipeline, + data_prefix=data_prefix, + multi_class=multi_class, + num_classes=num_classes, + start_index=start_index, + modality=modality, + test_mode=False, + **kwargs) + self.num_repeats = num_repeats + + def prepare_data(self, idx) -> List[dict]: + """Get data processed by ``self.pipeline``. + + Reduce the video loading and decompressing. + Args: + idx (int): The index of ``data_info``. + Returns: + List[dict]: A list of length num_repeats. + """ + transforms = self.pipeline.transforms + + data_info = self.get_data_info(idx) + data_info = transforms[0](data_info) # DecordInit + + frame_inds_list, frame_inds_length = [], [0] + + fake_data_info = dict( + total_frames=data_info['total_frames'], + start_index=data_info['start_index']) + + for repeat in range(self.num_repeats): + data_info_ = transforms[1](fake_data_info) # SampleFrames + frame_inds = data_info_['frame_inds'] + frame_inds_list.append(frame_inds.reshape(-1)) + frame_inds_length.append(frame_inds.size + frame_inds_length[-1]) + + for key in data_info_: + data_info[key] = data_info_[key] + + data_info['frame_inds'] = np.concatenate(frame_inds_list) + + data_info = transforms[2](data_info) # DecordDecode + imgs = data_info.pop('imgs') + + data_info_list = [] + for repeat in range(self.num_repeats): + data_info_ = deepcopy(data_info) + start = frame_inds_length[repeat] + end = frame_inds_length[repeat + 1] + data_info_['imgs'] = imgs[start:end] + for transform in transforms[3:]: + data_info_ = transform(data_info_) + data_info_list.append(data_info_) + del imgs + return data_info_list + + +@COLLATE_FUNCTIONS.register_module() +def repeat_pseudo_collate(data_batch: Sequence) -> Any: + data_batch = [i for j in data_batch for i in j] + return pseudo_collate(data_batch) diff --git a/tests/datasets/test_repeataug_dataset.py b/tests/datasets/test_repeataug_dataset.py new file mode 100644 index 0000000000..b011e6125d --- /dev/null +++ b/tests/datasets/test_repeataug_dataset.py @@ -0,0 +1,77 @@ +# Copyright (c) OpenMMLab. All rights reserved. +import pytest +from mmengine.testing import assert_dict_has_keys + +from mmaction.datasets import RepeatAugDataset +from mmaction.utils import register_all_modules +from .base import BaseTestDataset + + +class TestVideoDataset(BaseTestDataset): + register_all_modules() + + def test_video_dataset(self): + with pytest.raises(AssertionError): + # Currently only support decord backend + video_dataset = RepeatAugDataset( + self.video_ann_file, + self.video_pipeline, + data_prefix={'video': self.data_prefix}, + start_index=3) + + video_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', clip_len=4, frame_interval=2, + num_clips=1), + dict(type='DecordDecode') + ] + + video_dataset = RepeatAugDataset( + self.video_ann_file, + video_pipeline, + data_prefix={'video': self.data_prefix}, + start_index=3) + assert len(video_dataset) == 2 + assert video_dataset.start_index == 3 + + video_dataset = RepeatAugDataset( + self.video_ann_file, + video_pipeline, + data_prefix={'video': self.data_prefix}) + assert video_dataset.start_index == 0 + + def test_video_dataset_multi_label(self): + video_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', clip_len=4, frame_interval=2, + num_clips=1), + dict(type='DecordDecode') + ] + video_dataset = RepeatAugDataset( + self.video_ann_file_multi_label, + video_pipeline, + data_prefix={'video': self.data_prefix}, + multi_class=True, + num_classes=100) + assert video_dataset.start_index == 0 + + def test_video_pipeline(self): + video_pipeline = [ + dict(type='DecordInit'), + dict( + type='SampleFrames', clip_len=4, frame_interval=2, + num_clips=1), + dict(type='DecordDecode') + ] + target_keys = ['filename', 'label', 'start_index', 'modality'] + + # RepeatAugDataset not in test mode + video_dataset = RepeatAugDataset( + self.video_ann_file, + video_pipeline, + data_prefix={'video': self.data_prefix}) + result = video_dataset[0] + assert isinstance(result, (list, tuple)) + assert assert_dict_has_keys(result[0], target_keys) From fec3aed6ff24ff242a69d05f4808bf30cac2ef9f Mon Sep 17 00:00:00 2001 From: cir7 <33249023+cir7@users.noreply.github.com> Date: Tue, 10 Jan 2023 17:53:20 +0800 Subject: [PATCH 56/57] Bump version to 1.0.0rc2 (#2169) --- .circleci/test.yml | 2 +- .github/workflows/merge_stage_test.yml | 2 +- README.md | 16 ++++--- docs/en/get_started.md | 2 +- docs/en/notes/changelog.md | 63 +++++++++++++++++++++++++- docs/zh_cn/get_started.md | 2 +- mmaction/__init__.py | 2 +- mmaction/version.py | 2 +- setup.py | 1 - 9 files changed, 77 insertions(+), 15 deletions(-) diff --git a/.circleci/test.yml b/.circleci/test.yml index 20575636c0..3984767a12 100644 --- a/.circleci/test.yml +++ b/.circleci/test.yml @@ -177,7 +177,7 @@ workflows: name: minimum_version_cpu torch: 1.6.0 torchvision: 0.7.0 - python: 3.6.9 # The lowest python 3.6.x version available on CircleCI images + python: 3.7.4 requires: - lint - build_cpu: diff --git a/.github/workflows/merge_stage_test.yml b/.github/workflows/merge_stage_test.yml index 22a6390700..8c9862d049 100644 --- a/.github/workflows/merge_stage_test.yml +++ b/.github/workflows/merge_stage_test.yml @@ -21,7 +21,7 @@ jobs: runs-on: ubuntu-18.04 strategy: matrix: - python-version: [3.6, 3.8, 3.9] + python-version: [3.8, 3.9] torch: [1.8.1] include: - torch: 1.8.1 diff --git a/README.md b/README.md index cadcd8b9e8..4475b1a508 100644 --- a/README.md +++ b/README.md @@ -70,13 +70,15 @@ The 1.x branch works with **PyTorch 1.6+**. ## What's New -- (2023-1-5) We support STGCN++ on NTU-RGB+D. -- (2022-11-30) We refine our skeleton-based pipelines and support the joint training of multi-stream skeleton information, including **joint, bone, joint-motion, and bone-motion**. -- (2022-10-11) We support **Video Swin Transformer** on Kinetics400 and additionally train a Swin-L model on Kinetics700 to extract video features for downstream tasks. -- (2022-10-25) We support **VideoMAE** on Kinetics400. -- (2022-10-28) We support **C2D** on Kinetics400, achieve 73.57% Top-1 accuracy (higher than 71.8% in the [paper](https://arxiv.org/abs/1711.07971)). - -**Release**: v1.0.0rc1 was released in 14/10/2022. Please refer to [changelog.md](docs/en/notes/changelog.md) for details and release history. +**Release**: v1.0.0rc2 with the following new features: + +- We Support Omni-Sourece training on ImageNet and Kinetics datasets. +- We support exporting spatial-temporal detection models to ONNX. +- We support **STGCN++** on NTU-RGB+D. +- We support **MViT V2** on Kinetics 400 and something-V2. +- We refine our skeleton-based pipelines and support the joint training of multi-stream skeleton information, including **joint, bone, joint-motion, and bone-motion**. +- We support **VideoMAE** on Kinetics400. +- We support **C2D** on Kinetics400, achieve 73.57% Top-1 accuracy (higher than 71.8% in the [paper](https://arxiv.org/abs/1711.07971)). ## Installation diff --git a/docs/en/get_started.md b/docs/en/get_started.md index 7ae03f9774..cd506623f0 100644 --- a/docs/en/get_started.md +++ b/docs/en/get_started.md @@ -2,7 +2,7 @@ In this section we demonstrate how to prepare an environment with PyTorch. -MMAction2 works on Linux, Windows and macOS. It requires Python 3.6+, CUDA 9.2+ and PyTorch 1.6+. +MMAction2 works on Linux, Windows and macOS. It requires Python 3.7+, CUDA 9.2+ and PyTorch 1.6+. ```{note} If you are experienced with PyTorch and have already installed it, just skip this part and jump to the [next section](#installation). Otherwise, you can follow these steps for the preparation. diff --git a/docs/en/notes/changelog.md b/docs/en/notes/changelog.md index 1c96e5161d..0487ca0cc7 100644 --- a/docs/en/notes/changelog.md +++ b/docs/en/notes/changelog.md @@ -1,6 +1,67 @@ # Changelog -## 1.0.0rc1 (14/10/2022) +## 1.0.0rc2 (1/6/2023) + +**Highlights** + +**New Features** + +- Support VideoMAE ([#1942](https://github.com/open-mmlab/mmaction2/pull/1942)) +- Support MViT V2 ([#2007](https://github.com/open-mmlab/mmaction2/pull/2007)) +- Supoort C2D ([#2022](https://github.com/open-mmlab/mmaction2/pull/2022)) +- Support AVA-Kinetics dataset ([#2080](https://github.com/open-mmlab/mmaction2/pull/2080)) +- Support STGCN++ ([#2156](https://github.com/open-mmlab/mmaction2/pull/2156)) +- Support exporting spatial-temporal detection models to ONNX ([#2148](https://github.com/open-mmlab/mmaction2/pull/2148)) +- Support Omni-Sourece training on ImageNet and Kinetics datasets ([#2143](https://github.com/open-mmlab/mmaction2/pull/2143)) + +**Improvements** + +- Support repeat batch data augmentation ([#2170](https://github.com/open-mmlab/mmaction2/pull/2170)) +- Support calculating FLOPs tool powered by fvcore ([#1997](https://github.com/open-mmlab/mmaction2/pull/1997)) +- Support Spatial-temporal detection demo ([#2019](https://github.com/open-mmlab/mmaction2/pull/2019)) +- Add SyncBufferHook and add randomness config in train.py ([#2044](https://github.com/open-mmlab/mmaction2/pull/2044)) +- Refactor gradcam ([#2049](https://github.com/open-mmlab/mmaction2/pull/2049)) +- Support init_cfg in Swin and ViTMAE ([#2055](https://github.com/open-mmlab/mmaction2/pull/2055)) +- Refactor STGCN and related pipelines ([#2087](https://github.com/open-mmlab/mmaction2/pull/2087)) +- Refactor visualization tools ([#2092](https://github.com/open-mmlab/mmaction2/pull/2092)) +- Update `SampleFrames` transform and improve most models' performance ([#1942](https://github.com/open-mmlab/mmaction2/pull/1942)) +- Support real-time webcam demo ([#2152](https://github.com/open-mmlab/mmaction2/pull/2152)) +- Refactor and enhance 2s-AGCN ([#2130](https://github.com/open-mmlab/mmaction2/pull/2130)) +- Support adjusting fps in `SampleFrame` ([#2157](https://github.com/open-mmlab/mmaction2/pull/2157)) + +**Bug Fixes** + +- Fix CI upstream library dependency ([#2000](https://github.com/open-mmlab/mmaction2/pull/2000)) +- Fix SlowOnly readme typos and results ([#2006](https://github.com/open-mmlab/mmaction2/pull/2006)) +- Fix VideoSwin readme ([#2010](https://github.com/open-mmlab/mmaction2/pull/2010)) +- Fix tools and mim error ([#2028](https://github.com/open-mmlab/mmaction2/pull/2028)) +- Fix Imgaug wrapper ([#2024](https://github.com/open-mmlab/mmaction2/pull/2024)) +- Remove useless scripts ([#2032](https://github.com/open-mmlab/mmaction2/pull/2032)) +- Fix multi-view inference ([#2045](https://github.com/open-mmlab/mmaction2/pull/2045)) +- Update mmcv maximum version to 1.8.0 ([#2047](https://github.com/open-mmlab/mmaction2/pull/2047)) +- Fix torchserver dependency ([#2053](https://github.com/open-mmlab/mmaction2/pull/2053)) +- Fix `gen_ntu_rgbd_raw` script ([#2076](https://github.com/open-mmlab/mmaction2/pull/2076)) +- Update AVA-Kinetics experiment configs and results ([#2099](https://github.com/open-mmlab/mmaction2/pull/2099)) +- Add `joint.pkl` and `bone.pkl` used in multi-stream fusion tool ([#2106](https://github.com/open-mmlab/mmaction2/pull/2106)) +- Fix lint CI config ([#2110](https://github.com/open-mmlab/mmaction2/pull/2110)) +- Update testing accuracy for modified `SampleFrames` ([#2117](https://github.com/open-mmlab/mmaction2/pull/2117)), ([#2121](https://github.com/open-mmlab/mmaction2/pull/2121)), ([#2122](https://github.com/open-mmlab/mmaction2/pull/2122)), ([#2124](https://github.com/open-mmlab/mmaction2/pull/2124)), ([#2125](https://github.com/open-mmlab/mmaction2/pull/2125)), ([#2126](https://github.com/open-mmlab/mmaction2/pull/2126)), ([#2129](https://github.com/open-mmlab/mmaction2/pull/2129)), ([#2128](https://github.com/open-mmlab/mmaction2/pull/2128)) +- Fix timm related bug ([#1976](https://github.com/open-mmlab/mmaction2/pull/1976)) +- Fix `check_videos.py` script ([#2134](https://github.com/open-mmlab/mmaction2/pull/2134)) +- Update CI maximum torch version to 1.13.0 ([#2118](https://github.com/open-mmlab/mmaction2/pull/2118)) + +**Documentation** + +- Add MMYOLO description in README ([#2011](https://github.com/open-mmlab/mmaction2/pull/2011)) +- Add v1.x introduction in README ([#2023](https://github.com/open-mmlab/mmaction2/pull/2023)) +- Fix link in README ([#2035](https://github.com/open-mmlab/mmaction2/pull/2035)) +- Refine some docs ([#2038](https://github.com/open-mmlab/mmaction2/pull/2038)), ([#2040](https://github.com/open-mmlab/mmaction2/pull/2040)), ([#2058](https://github.com/open-mmlab/mmaction2/pull/2058)) +- Update TSN/TSM Readme ([#2082](https://github.com/open-mmlab/mmaction2/pull/2082)) +- Add chinese document ([#2083](https://github.com/open-mmlab/mmaction2/pull/2083)) +- Adjust docment structure ([#2088](https://github.com/open-mmlab/mmaction2/pull/2088)) +- Fix Sth-Sth and Jester dataset links ([#2103](https://github.com/open-mmlab/mmaction2/pull/2103)) +- Fix doc link ([#2131](https://github.com/open-mmlab/mmaction2/pull/2131)) + +## 1.0.0rc1 (10/14/2022) **Highlights** diff --git a/docs/zh_cn/get_started.md b/docs/zh_cn/get_started.md index 57d1ba97d8..df0851235a 100644 --- a/docs/zh_cn/get_started.md +++ b/docs/zh_cn/get_started.md @@ -2,7 +2,7 @@ 在本节中,我们将演示如何准备 PyTorch 相关的依赖环境。 -MMAction2 适用于 Linux、Windows 和 MacOS。它需要 Python 3.6+,CUDA 9.2+ 和 PyTorch 1.6+。 +MMAction2 适用于 Linux、Windows 和 MacOS。它需要 Python 3.7+,CUDA 9.2+ 和 PyTorch 1.6+。 ``` 如果你对配置 PyTorch 环境已经很熟悉,并且已经完成了配置,可以直接进入[下一节](#安装)。 diff --git a/mmaction/__init__.py b/mmaction/__init__.py index ddb95b18ab..dac9a6dacd 100644 --- a/mmaction/__init__.py +++ b/mmaction/__init__.py @@ -9,7 +9,7 @@ mmcv_maximum_version = '2.1.0' mmcv_version = digit_version(mmcv.__version__) -mmengine_minimum_version = '0.1.0' +mmengine_minimum_version = '0.3.0' mmengine_maximum_version = '1.0.0' mmengine_version = digit_version(mmengine.__version__) diff --git a/mmaction/version.py b/mmaction/version.py index 61a380c424..be3f0959a7 100644 --- a/mmaction/version.py +++ b/mmaction/version.py @@ -1,6 +1,6 @@ # Copyright (c) Open-MMLab. All rights reserved. -__version__ = '1.0.0rc1' +__version__ = '1.0.0rc2' def parse_version_info(version_str: str): diff --git a/setup.py b/setup.py index ce4f1be67f..b248eb13d6 100644 --- a/setup.py +++ b/setup.py @@ -179,7 +179,6 @@ def add_mim_extension(): 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', From 215378c1a2188dadfbf77dda167dcaccb8388c50 Mon Sep 17 00:00:00 2001 From: ly015 Date: Tue, 10 Jan 2023 18:27:32 +0800 Subject: [PATCH 57/57] Add mminstall --- .github/workflows/test_mim.yml | 47 ---------------------------------- docs/en/notes/changelog.md | 2 +- requirements/mminstall.txt | 1 + setup.py | 1 + 4 files changed, 3 insertions(+), 48 deletions(-) delete mode 100644 .github/workflows/test_mim.yml diff --git a/.github/workflows/test_mim.yml b/.github/workflows/test_mim.yml deleted file mode 100644 index 88594d0e77..0000000000 --- a/.github/workflows/test_mim.yml +++ /dev/null @@ -1,47 +0,0 @@ -name: test-mim - -on: - push: - paths: - - 'model-index.yml' - - 'configs/**' - - pull_request: - paths: - - 'model-index.yml' - - 'configs/**' - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - build_cpu: - runs-on: ubuntu-18.04 - strategy: - matrix: - python-version: [3.7] - torch: [1.8.0] - include: - - torch: 1.8.0 - torch_version: torch1.8 - torchvision: 0.9.0 - steps: - - uses: actions/checkout@v2 - - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 - with: - python-version: ${{ matrix.python-version }} - - name: Upgrade pip - run: pip install pip --upgrade - - name: Install Pillow - run: pip install Pillow==6.2.2 - if: ${{matrix.torchvision == '0.4.2'}} - - name: Install PyTorch - run: pip install torch==${{matrix.torch}}+cpu torchvision==${{matrix.torchvision}}+cpu -f https://download.pytorch.org/whl/torch_stable.html - - name: Install openmim - run: pip install openmim - - name: Build and install - run: rm -rf .eggs && mim install -e . - - name: test commands of mim - run: mim search mmaction2 diff --git a/docs/en/notes/changelog.md b/docs/en/notes/changelog.md index 0487ca0cc7..2421324146 100644 --- a/docs/en/notes/changelog.md +++ b/docs/en/notes/changelog.md @@ -1,6 +1,6 @@ # Changelog -## 1.0.0rc2 (1/6/2023) +## 1.0.0rc2 (1/10/2023) **Highlights** diff --git a/requirements/mminstall.txt b/requirements/mminstall.txt index b15de7a496..cc624e4490 100644 --- a/requirements/mminstall.txt +++ b/requirements/mminstall.txt @@ -1 +1,2 @@ mmcv>=2.0.0rc0,<2.1.0 +mmengine>=0.3.0 diff --git a/setup.py b/setup.py index b248eb13d6..c8a8f8e0f2 100644 --- a/setup.py +++ b/setup.py @@ -190,5 +190,6 @@ def add_mim_extension(): 'all': parse_requirements('requirements.txt'), 'tests': parse_requirements('requirements/tests.txt'), 'optional': parse_requirements('requirements/optional.txt'), + 'mim': parse_requirements('requirements/mminstall.txt'), }, zip_safe=False)