python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
import glob import os import os.path as osp import mmcv import torch.nn as nn from mmaction.models import build_localizer, build_recognizer def _get_config_path(): """Find the predefined recognizer config path.""" repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__))) config_dpath = osp.join(repo_dir, 'configs') if not osp.exists(config_dpath): raise Exception('Cannot find config path') config_fpaths = list(glob.glob(osp.join(config_dpath, '*.py'))) config_names = [os.path.relpath(p, config_dpath) for p in config_fpaths] print(f'Using {len(config_names)} config files') config_fpaths = [ osp.join(config_dpath, config_fpath) for config_fpath in config_fpaths ] return config_fpaths def test_config_build_recognizer(): """Test that all mmaction models defined in the configs can be initialized.""" repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__))) config_dpath = osp.join(repo_dir, 'configs/recognition') if not osp.exists(config_dpath): raise Exception('Cannot find config path') config_fpaths = list(glob.glob(osp.join(config_dpath, '*.py'))) # test all config file in `configs` directory for config_fpath in config_fpaths: config_mod = mmcv.Config.fromfile(config_fpath) print(f'Building recognizer, config_fpath = {config_fpath!r}') # Remove pretrained keys to allow for testing in an offline environment if 'pretrained' in config_mod.model['backbone']: config_mod.model['backbone']['pretrained'] = None recognizer = build_recognizer( config_mod.model, train_cfg=config_mod.train_cfg, test_cfg=config_mod.test_cfg) assert isinstance(recognizer, nn.Module) def _get_config_path_for_localizer(): """Find the predefined localizer config path for localizer.""" repo_dir = osp.dirname(osp.dirname(osp.dirname(__file__))) config_dpath = osp.join(repo_dir, 'configs/localization') if not osp.exists(config_dpath): raise Exception('Cannot find config path') config_fpaths = list(glob.glob(osp.join(config_dpath, '*.py'))) config_names = [os.path.relpath(p, config_dpath) for p in config_fpaths] print(f'Using {len(config_names)} config files') config_fpaths = [ osp.join(config_dpath, config_fpath) for config_fpath in config_fpaths ] return config_fpaths def test_config_build_localizer(): """Test that all mmaction models defined in the configs can be initialized.""" config_fpaths = _get_config_path_for_localizer() # test all config file in `configs/localization` directory for config_fpath in config_fpaths: config_mod = mmcv.Config.fromfile(config_fpath) print(f'Building localizer, config_fpath = {config_fpath!r}') if config_mod.get('model', None): localizer = build_localizer(config_mod.model) assert isinstance(localizer, nn.Module)
InternVideo-main
Downstream/Open-Set-Action-Recognition/tests/test_runtime/test_config.py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import subprocess import sys sys.path.insert(0, os.path.abspath('..')) # -- Project information ----------------------------------------------------- project = 'MMAction2' copyright = '2020, OpenMMLab' author = 'MMAction2 Authors' version_file = '../mmaction/version.py' def get_version(): with open(version_file, 'r') as f: exec(compile(f.read(), version_file, 'exec')) return locals()['__version__'] # The full version, including alpha/beta/rc tags release = get_version() # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', 'recommonmark', 'sphinx_markdown_tables' ] # numpy and torch are required autodoc_mock_imports = ['mmaction.version', 'cv2', 'PIL'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # -- Options for HTML output ------------------------------------------------- source_suffix = {'.rst': 'restructuredtext', '.md': 'markdown'} # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] master_doc = 'index' def builder_inited_handler(app): subprocess.run(['./merge_docs.sh']) subprocess.run(['./stat.py']) def setup(app): app.connect('builder-inited', builder_inited_handler)
InternVideo-main
Downstream/Open-Set-Action-Recognition/docs/conf.py
#!/usr/bin/env python import functools as func import glob import re files = sorted(glob.glob('*_models.md')) stats = [] for f in files: with open(f, 'r') as content_file: content = content_file.read() # title title = content.split('\n')[0].replace('#', '') # count papers papers = set(x.lower().strip() for x in re.findall(r'\btitle={(.*)}', content)) paperlist = '\n'.join(sorted(' - ' + x for x in papers)) # count configs configs = set(x.lower().strip() for x in re.findall(r'https.*configs/.*\.py', content)) # count ckpts ckpts = set(x.lower().strip() for x in re.findall(r'https://download.*\.pth', content) if 'mmaction' in x) statsmsg = f""" ## [{title}]({f}) * Number of checkpoints: {len(ckpts)} * Number of configs: {len(configs)} * Number of papers: {len(papers)} {paperlist} """ stats.append((papers, configs, ckpts, statsmsg)) allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _, _ in stats]) allconfigs = func.reduce(lambda a, b: a.union(b), [c for _, c, _, _ in stats]) allckpts = func.reduce(lambda a, b: a.union(b), [c for _, _, c, _ in stats]) msglist = '\n'.join(x for _, _, _, x in stats) modelzoo = f""" # Model Zoo Statistics * Number of checkpoints: {len(allckpts)} * Number of configs: {len(allconfigs)} * Number of papers: {len(allpapers)} {msglist} """ with open('modelzoo.md', 'w') as f: f.write(modelzoo)
InternVideo-main
Downstream/Open-Set-Action-Recognition/docs/stat.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTIN', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=4), cls_head=dict( type='TSMHead', num_classes=174, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.8, init_std=0.001, is_shift=False)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/sth-v1/rawframes_train/' data_root_val = 'data/sth-v1/rawframes_val/' ann_file_train = 'data/sth-v1/sth-v1_train_list.txt' ann_file_val = 'data/sth-v1/sth-v1_val_list.txt' ann_file_test = 'data/sth-v1/sth-v1_val_list.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=6, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, filename_tmpl='{:05}.jpg', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, filename_tmpl='{:05}.jpg', pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, filename_tmpl='{:05}.jpg', pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.02, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict( policy='CosineAnnealing', min_lr_ratio=0.5, warmup='linear', warmup_ratio=0.1, warmup_by_epoch=True, warmup_iters=1) total_epochs = 40 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tin_r50_1x1x8_40e_sthv1_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tin/tin_r50_1x1x8_40e_sthv1_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTIN', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=4), cls_head=dict( type='TSMHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=6, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.01, # this lr is for 8 gpus momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb/' # load_from = 'modelzoo/tsm_r50_1x1x8_50e_kinetics400_rgb_20200607-af7fb746.pth' # noqa: E501 load_from = 'https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb/tsm_r50_1x1x8_50e_kinetics400_rgb_20200607-af7fb746.pth' # noqa: E501 resume_from = None
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tin/tin_tsm_finetune_r50_1x1x8_50e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTIN', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=4), cls_head=dict( type='TSMHead', num_classes=174, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.8, init_std=0.001, is_shift=False)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/sth-v2/rawframes_train/' data_root_val = 'data/sth-v2/rawframes_val/' ann_file_train = 'data/sth-v2/sth-v2_train_list.txt' ann_file_val = 'data/sth-v2/sth-v2_val_list.txt' ann_file_test = 'data/sth-v2/sth-v2_val_list.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=6, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, filename_tmpl='{:05}.jpg', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, filename_tmpl='{:05}.jpg', pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, filename_tmpl='{:05}.jpg', pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.02, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict( policy='CosineAnnealing', by_epoch=False, warmup='linear', warmup_iters=1, warmup_by_epoch=True, min_lr=0) total_epochs = 40 checkpoint_config = dict(interval=1) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tin_r50_1x1x8_40e_sthv2_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tin/tin_r50_1x1x8_40e_sthv2_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=200, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) omnisource = True # dataset settings dataset_type = 'VideoDataset' # The flag indicates using joint training omnisource = True data_root = 'data/OmniSource/kinetics_200_train' data_root_val = 'data/OmniSource/kinetics_200_val' web_root = 'data/OmniSource/' ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' ann_file_web = ('data/OmniSource/annotations/webimage_200/' 'tsn_8seg_webimage_200_wodup.txt') ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] train_web_pipeline = [ dict(type='ImageDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, omni_videos_per_gpu=[12, 64], workers_per_gpu=2, train=[ dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), dict( type='ImageDataset', ann_file=ann_file_web, data_prefix=web_root, pipeline=train_web_pipeline, num_classes=200, sample_by_class=True, power=0.5) ], val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.00375, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/omnisource/' 'tsn_r50_1x1x8_100e_minikinetics_webimage_rgb') load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_webimage_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=200, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) omnisource = True # dataset settings dataset_type = 'VideoDataset' # The flag indicates using joint training omnisource = True data_root = 'data/OmniSource/kinetics_200_train' data_root_val = 'data/OmniSource/kinetics_200_val' kraw_root = 'data/OmniSource/kinetics_raw_200_train' ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' ann_file_kraw = ('data/OmniSource/annotations/kinetics_raw_200/' 'slowonly_8x8_kinetics_raw_200.json') ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] train_kraw_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, workers_per_gpu=2, train=[ dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), dict( type='RawVideoDataset', ann_file=ann_file_kraw, data_prefix=kraw_root, pipeline=train_kraw_pipeline, clipname_tmpl='part_{}.mp4', sampling_strategy='positive') ], val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.00375, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/omnisource/' 'tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb') load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_kineticsraw_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=200, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'VideoDataset' # The flag indicates using joint training data_root = 'data/OmniSource/kinetics_200_train' data_root_val = 'data/OmniSource/kinetics_200_val' ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.00375, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/omnisource/tsn_r50_1x1x8_100e_minikinetics_rgb' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=200, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) omnisource = True # dataset settings dataset_type = 'VideoDataset' # The flag indicates using joint training omnisource = True data_root = 'data/OmniSource/kinetics_200_train' data_root_val = 'data/OmniSource/kinetics_200_val' gg_root = 'data/OmniSource/googleimage_200' ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' ann_file_gg = ('data/OmniSource/annotations/googleimage_200/' 'tsn_8seg_googleimage_200_wodup.txt') ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] train_gg_pipeline = [ dict(type='ImageDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, omni_videos_per_gpu=[12, 64], workers_per_gpu=2, train=[ dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), dict( type='ImageDataset', ann_file=ann_file_gg, data_prefix=gg_root, pipeline=train_gg_pipeline) ], val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.00375, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/omnisource/' 'tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb') load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_googleimage_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=200, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) omnisource = True # dataset settings dataset_type = 'VideoDataset' # The flag indicates using joint training omnisource = True data_root = 'data/OmniSource/kinetics_200_train' data_root_val = 'data/OmniSource/kinetics_200_val' web_root = 'data/OmniSource/' iv_root = 'data/OmniSource/insvideo_200' kraw_root = 'data/OmniSource/kinetics_raw_200_train' ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' ann_file_web = ('data/OmniSource/annotations/webimage_200/' 'tsn_8seg_webimage_200_wodup.txt') ann_file_iv = ('data/OmniSource/annotations/insvideo_200/' 'slowonly_8x8_insvideo_200_wodup.txt') ann_file_kraw = ('data/OmniSource/annotations/kinetics_raw_200/' 'slowonly_8x8_kinetics_raw_200.json') ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] train_web_pipeline = [ dict(type='ImageDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] train_iv_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] train_kraw_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, omni_videos_per_gpu=[12, 64, 12, 12], train_ratio=[2, 1, 1, 1], workers_per_gpu=1, train=[ dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), dict( type='ImageDataset', ann_file=ann_file_web, data_prefix=web_root, pipeline=train_web_pipeline, num_classes=200, sample_by_class=True, power=0.5), dict( type=dataset_type, ann_file=ann_file_iv, data_prefix=iv_root, pipeline=train_iv_pipeline, num_classes=200, sample_by_class=True, power=0.5), dict( type='RawVideoDataset', ann_file=ann_file_kraw, data_prefix=kraw_root, pipeline=train_kraw_pipeline, clipname_tmpl='part_{}.mp4', sampling_strategy='positive') ], val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.00375, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/omnisource/' 'tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb') load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_omnisource_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=200, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) omnisource = True # dataset settings dataset_type = 'VideoDataset' # The flag indicates using joint training omnisource = True data_root = 'data/OmniSource/kinetics_200_train' data_root_val = 'data/OmniSource/kinetics_200_val' iv_root = 'data/OmniSource/insvideo_200' ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' ann_file_iv = ('data/OmniSource/annotations/insvideo_200/' 'slowonly_8x8_insvideo_200_wodup.txt') ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] train_iv_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, workers_per_gpu=2, train=[ dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), dict( type=dataset_type, ann_file=ann_file_iv, data_prefix=iv_root, pipeline=train_iv_pipeline, num_classes=200, sample_by_class=True, power=0.5) ], val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.00375, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/omnisource/' 'tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb') load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/tsn_r50_1x1x8_100e_minikinetics/tsn_r50_1x1x8_100e_minikinetics_insvideo_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=200, spatial_type='avg', dropout_ratio=0.5)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' # The flag indicates using joint training omnisource = True data_root = 'data/OmniSource/kinetics_200_train' data_root_val = 'data/OmniSource/kinetics_200_val' iv_root = 'data/OmniSource/insvideo_200' ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' ann_file_iv = ('data/OmniSource/annotations/insvideo_200/' 'slowonly_8x8_insvideo_200_wodup.txt') ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] train_iv_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, workers_per_gpu=2, train=[ dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), dict( type=dataset_type, ann_file=ann_file_iv, data_prefix=iv_root, pipeline=train_iv_pipeline, num_classes=200, sample_by_class=True, power=0.5) ], val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.15, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=8) workflow = [('train', 1)] evaluation = dict( interval=8, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/omnisource/' 'slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb') load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_insvideo_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=200, spatial_type='avg', dropout_ratio=0.5)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' # The flag indicates using joint training omnisource = True data_root = 'data/OmniSource/kinetics_200_train' data_root_val = 'data/OmniSource/kinetics_200_val' kraw_root = 'data/OmniSource/kinetics_raw_200_train' ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' ann_file_kraw = ('data/OmniSource/annotations/kinetics_raw_200/' 'slowonly_8x8_kinetics_raw_200.json') ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] train_kraw_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, workers_per_gpu=2, train=[ dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), dict( type='RawVideoDataset', ann_file=ann_file_kraw, data_prefix=kraw_root, pipeline=train_kraw_pipeline, clipname_tmpl='part_{}.mp4', sampling_strategy='positive') ], val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.15, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=8) workflow = [('train', 1)] evaluation = dict( interval=8, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/omnisource/' 'slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb') load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_kineticsraw_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=200, spatial_type='avg', dropout_ratio=0.5)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' # The flag indicates using joint training omnisource = True data_root = 'data/OmniSource/kinetics_200_train' data_root_val = 'data/OmniSource/kinetics_200_val' ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.15, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=8) workflow = [('train', 1)] evaluation = dict( interval=8, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/omnisource/slowonly_r50_8x8x1_256e_minikinetics_rgb' load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=200, spatial_type='avg', dropout_ratio=0.5)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' # The flag indicates using joint training omnisource = True data_root = 'data/OmniSource/kinetics_200_train' data_root_val = 'data/OmniSource/kinetics_200_val' gg_root = 'data/OmniSource/googleimage_200' ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' ann_file_gg = ('data/OmniSource/annotations/googleimage_200/' 'tsn_8seg_googleimage_200_wodup.txt') ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] train_gg_pipeline = [ dict(type='ImageDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='BuildPseudoClip', clip_len=8), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, workers_per_gpu=2, train=[ dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), dict( type='ImageDataset', ann_file=ann_file_gg, data_prefix=gg_root, pipeline=train_gg_pipeline) ], val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.15, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=8) workflow = [('train', 1)] evaluation = dict( interval=8, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/omnisource/' 'slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb') load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_googleimage_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=200, spatial_type='avg', dropout_ratio=0.5)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' # The flag indicates using joint training omnisource = True data_root = 'data/OmniSource/kinetics_200_train' data_root_val = 'data/OmniSource/kinetics_200_val' web_root = 'data/OmniSource/' ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' ann_file_web = ('data/OmniSource/annotations/webimage_200/' 'tsn_8seg_webimage_200_wodup.txt') ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] train_web_pipeline = [ dict(type='ImageDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='BuildPseudoClip', clip_len=8), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, workers_per_gpu=2, train=[ dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), dict( type='ImageDataset', ann_file=ann_file_web, data_prefix=web_root, pipeline=train_web_pipeline, num_classes=200, sample_by_class=True, power=0.5) ], val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.15, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=8) workflow = [('train', 1)] evaluation = dict( interval=8, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/omnisource/' 'slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb') load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_webimage_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=200, spatial_type='avg', dropout_ratio=0.5)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' # The flag indicates using joint training omnisource = True data_root = 'data/OmniSource/kinetics_200_train' data_root_val = 'data/OmniSource/kinetics_200_val' web_root = 'data/OmniSource/' iv_root = 'data/OmniSource/insvideo_200' kraw_root = 'data/OmniSource/kinetics_raw_200_train' ann_file_train = 'data/OmniSource/annotations/kinetics_200/k200_train.txt' ann_file_web = ('data/OmniSource/annotations/webimage_200/' 'tsn_8seg_webimage_200_wodup.txt') ann_file_iv = ('data/OmniSource/annotations/insvideo_200/' 'slowonly_8x8_insvideo_200_wodup.txt') ann_file_kraw = ('data/OmniSource/annotations/kinetics_raw_200/' 'slowonly_8x8_kinetics_raw_200.json') ann_file_val = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' ann_file_test = 'data/OmniSource/annotations/kinetics_200/k200_val.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] train_web_pipeline = [ dict(type='ImageDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='BuildPseudoClip', clip_len=8), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] train_iv_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] train_kraw_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, workers_per_gpu=1, train_ratio=[2, 1, 1, 1], train=[ dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), dict( type='ImageDataset', ann_file=ann_file_web, data_prefix=web_root, pipeline=train_web_pipeline, num_classes=200, sample_by_class=True, power=0.5), dict( type=dataset_type, ann_file=ann_file_iv, data_prefix=iv_root, pipeline=train_iv_pipeline, num_classes=200, sample_by_class=True, power=0.5), dict( type='RawVideoDataset', ann_file=ann_file_kraw, data_prefix=kraw_root, pipeline=train_kraw_pipeline, clipname_tmpl='part_{}.mp4', sampling_strategy='positive') ], val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.15, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=8) workflow = [('train', 1)] evaluation = dict( interval=8, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/omnisource/' 'slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb') load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/omnisource/slowonly_r50_8x8x1_256e_minikinetics/slowonly_r50_8x8x1_256e_minikinetics_omnisource_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet2Plus1d', depth=34, pretrained=None, pretrained2d=False, norm_eval=False, conv_cfg=dict(type='Conv2plus1d'), norm_cfg=dict(type='SyncBN', requires_grad=True, eps=1e-3), conv1_kernel=(3, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(1, 1, 1, 1), spatial_strides=(1, 2, 2, 2), temporal_strides=(1, 2, 2, 2), zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=400, in_channels=512, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=6, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline, test_mode=True), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline, test_mode=True)) # optimizer optimizer = dict( type='SGD', lr=0.075, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 180 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/r2plus1d_r34_3d_32x2x1_180e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)] find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/r2plus1d_r34_32x2x1_180e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet2Plus1d', depth=34, pretrained=None, pretrained2d=False, norm_eval=False, conv_cfg=dict(type='Conv2plus1d'), norm_cfg=dict(type='SyncBN', requires_grad=True, eps=1e-3), conv1_kernel=(3, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(1, 1, 1, 1), spatial_strides=(1, 2, 2, 2), temporal_strides=(1, 2, 2, 2), zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=400, in_channels=512, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline, test_mode=True), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline, test_mode=True)) # optimizer optimizer = dict( type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 180 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/r2plus1d_r34_8x8x1_180e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)] find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet2Plus1d', depth=34, pretrained=None, pretrained2d=False, norm_eval=False, conv_cfg=dict(type='Conv2plus1d'), norm_cfg=dict(type='SyncBN', requires_grad=True, eps=1e-3), act_cfg=dict(type='ReLU'), conv1_kernel=(3, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(1, 1, 1, 1), spatial_strides=(1, 2, 2, 2), temporal_strides=(1, 2, 2, 2), zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=400, in_channels=512, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/kinetics400/videos_train' data_root_val = 'data/kinetics400/videos_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=16, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline, test_mode=True), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline, test_mode=True)) # optimizer optimizer = dict( type='SGD', lr=0.2, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 180 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/r2plus1d_r34_video_3d_8x8x1_180e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)] find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/r2plus1d_r34_video_8x8x1_180e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet2Plus1d', depth=34, pretrained=None, pretrained2d=False, norm_eval=False, conv_cfg=dict(type='Conv2plus1d'), norm_cfg=dict(type='SyncBN', requires_grad=True, eps=1e-3), act_cfg=dict(type='ReLU'), conv1_kernel=(3, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(1, 1, 1, 1), spatial_strides=(1, 2, 2, 2), temporal_strides=(1, 2, 2, 2), zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=400, in_channels=512, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=1, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/r2plus1d/r2plus1d_r34_video_inference_8x8x1_180e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dCSN', pretrained2d=False, pretrained= # noqa: E251 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_ig65m_20200807-771c4135.pth', # noqa: E501 depth=152, with_pool2=False, bottleneck_mode='ir', norm_eval=False, zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=400, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=3, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.000125, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( policy='step', step=[32, 48], warmup='linear', warmup_ratio=0.1, warmup_by_epoch=True, warmup_iters=16) total_epochs = 58 checkpoint_config = dict(interval=2) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook')]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_58e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dCSN', pretrained2d=False, pretrained= # noqa: E251 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_ig65m_20200807-771c4135.pth', # noqa: E501 depth=152, with_pool2=False, bottleneck_mode='ir', norm_eval=True, bn_frozen=True, zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=101, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.000125, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( policy='step', step=[20, 40], warmup='linear', warmup_ratio=0.1, warmup_by_epoch=True, warmup_iters=5) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook')]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/csn/finetune_ucf101_csn_dnn' # noqa: E501 load_from = 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb_20200812-9037a758.pth' resume_from = None workflow = [('train', 1)] find_unused_parameters = True
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/csn/finetune_ucf101_csn_dnn.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dCSN', pretrained2d=False, pretrained= # noqa: E251 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_ig65m_20200807-771c4135.pth', # noqa: E501 depth=152, with_pool2=False, bottleneck_mode='ir', norm_eval=True, bn_frozen=True, zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=101, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=1, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/csn/inference_csn_dnn.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dCSN', pretrained2d=False, pretrained= # noqa: E251 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_ig65m_20200807-771c4135.pth', # noqa: E501 depth=152, with_pool2=False, bottleneck_mode='ir', norm_eval=True, bn_frozen=True, zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=400, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=3, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.000125, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( policy='step', step=[32, 48], warmup='linear', warmup_ratio=0.1, warmup_by_epoch=True, warmup_iters=16) total_epochs = 58 checkpoint_config = dict(interval=2) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook')]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb' # noqa: E501 load_from = None resume_from = None workflow = [('train', 1)] find_unused_parameters = True
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dCSN', pretrained2d=False, pretrained= # noqa: E251 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_ig65m_20200807-771c4135.pth', # noqa: E501 depth=152, with_pool2=False, bottleneck_mode='ir', norm_eval=True, bn_frozen=True, zero_init_residual=False), cls_head=dict( type='I3DHead', loss_cls=dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', annealing_method='exp'), num_classes=101, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings evidence='exp' # only used for EDL test_cfg = dict(average_clips='score') # dataset settings dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=1, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/csn/inference_csn_enn.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', with_kldiv=False, with_avuloss=True, annealing_method='exp') model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dCSN', pretrained2d=False, pretrained= # noqa: E251 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_from_scratch_r152_ig65m_20200807-771c4135.pth', # noqa: E501 depth=152, with_pool2=False, bottleneck_mode='ir', norm_eval=True, bn_frozen=True, zero_init_residual=False), cls_head=dict( type='I3DHead', loss_cls=evidence_loss, num_classes=101, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01), debias_head=dict( type='DebiasHead', loss_cls=evidence_loss, # actually not used! loss_factor=0.1, num_classes=101, in_channels=2048, dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='evidence', evidence_type='exp') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.000125, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( policy='step', step=[20, 40], warmup='linear', warmup_ratio=0.1, warmup_by_epoch=True, warmup_iters=5) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook')]) annealing_runner = True # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/csn/finetune_ucf101_csn_edlnokl_avuc_debias' # noqa: E501 load_from = 'https://download.openmmlab.com/mmaction/recognition/csn/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb/ircsn_ig65m_pretrained_bnfrozen_r152_32x2x1_58e_kinetics400_rgb_20200812-9037a758.pth' resume_from = None workflow = [('train', 1)] find_unused_parameters = True
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/csn/finetune_ucf101_csn_edlnokl_avuc_debias.py
model = dict( type='Recognizer3DBNN', backbone=dict( type='ResNet3dSlowFast', pretrained=None, resample_rate=4, # tau speed_ratio=4, # alpha channel_ratio=8, # beta_inv slow_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=True, fusion_kernel=7, conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), fast_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=False, base_channels=8, conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1, norm_eval=False)), cls_head=dict( type='SlowFastBNNHead', in_channels=2304, # 2048+256 num_classes=101, spatial_type='avg', dropout_ratio=0)) test_cfg = dict(average_clips='prob', npass=10) dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=2, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=None, data_prefix=None, start_index=0, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/inference_slowfast_bnn.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowFast', pretrained=None, resample_rate=4, # tau speed_ratio=4, # alpha channel_ratio=8, # beta_inv slow_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=True, fusion_kernel=7, conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), fast_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=False, base_channels=8, conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1, norm_eval=False)), cls_head=dict( type='SlowFastHead', in_channels=2304, # 2048+256 num_classes=400, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( policy='CosineAnnealing', min_lr=0, warmup='linear', warmup_by_epoch=True, warmup_iters=34) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/slowfast_r50_3d_8x8x1_256e_kinetics400_rgb' load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowFast', pretrained=None, resample_rate=8, # tau speed_ratio=8, # alpha channel_ratio=8, # beta_inv slow_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=True, conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), fast_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=False, base_channels=8, conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1, norm_eval=False)), cls_head=dict( type='SlowFastHead', in_channels=2304, # 2048+256 num_classes=400, spatial_type='avg', dropout_ratio=0.5)) # model training and testing settings test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=1, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/slowfast_r50_video_inference_4x16x1_256e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowFast', pretrained=None, resample_rate=8, # tau speed_ratio=8, # alpha channel_ratio=8, # beta_inv slow_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=True, conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), fast_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=False, base_channels=8, conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1, norm_eval=False)), cls_head=dict( type='SlowFastHead', in_channels=2304, # 2048+256 num_classes=400, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' data_root = 'data/kinetics400/videos_train' data_root_val = 'data/kinetics400/videos_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( policy='CosineAnnealing', min_lr=0, warmup='linear', warmup_by_epoch=True, warmup_iters=34) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/slowfast_r50_video_3d_4x16x1_256e_kinetics400_rgb' load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/slowfast_r50_video_4x16x1_256e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowFast', pretrained=None, resample_rate=4, # tau speed_ratio=4, # alpha channel_ratio=8, # beta_inv slow_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=True, fusion_kernel=7, conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), fast_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=False, base_channels=8, conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1, norm_eval=False)), cls_head=dict( type='SlowFastHead', in_channels=2304, # 2048+256 num_classes=101, spatial_type='avg', dropout_ratio=0.5)) test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=2, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=None, data_prefix=None, start_index=0, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/inference_slowfast_dnn.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', with_kldiv=False, with_avuloss=True, annealing_method='exp') model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowFast', pretrained=None, resample_rate=4, # tau speed_ratio=4, # alpha channel_ratio=8, # beta_inv slow_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=True, fusion_kernel=7, conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), fast_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=False, base_channels=8, conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1, norm_eval=False)), cls_head=dict( type='SlowFastHead', loss_cls=evidence_loss, in_channels=2304, # 2048+256 num_classes=101, spatial_type='avg', dropout_ratio=0.5), debias_head=dict( type='DebiasHead', loss_cls=evidence_loss, # actually not used! loss_factor=0.1, num_classes=101, in_channels=2048, # only slow features are debiased dropout_ratio=0.5, init_std=0.01)) train_cfg = None test_cfg = dict(average_clips='evidence', evidence_type='exp') dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( policy='CosineAnnealing', min_lr=0, warmup='linear', warmup_by_epoch=True, warmup_iters=5) total_epochs = 50 checkpoint_config = dict(interval=10) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) annealing_runner = True dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/finetune_ucf101_slowfast_dnn' load_from = 'https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/slowfast_r50_8x8x1_256e_kinetics400_rgb_20200716-73547d2b.pth' resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/finetune_ucf101_slowfast_edlnokl_avuc_debias.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowFast', pretrained=None, resample_rate=8, # tau speed_ratio=8, # alpha channel_ratio=8, # beta_inv slow_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=True, conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), fast_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=False, base_channels=8, conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1, norm_eval=False)), cls_head=dict( type='SlowFastHead', in_channels=2304, # 2048+256 num_classes=400, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( policy='CosineAnnealing', min_lr=0, warmup='linear', warmup_by_epoch=True, warmup_iters=34) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/slowfast_r50_3d_4x16x1_256e_kinetics400_rgb' load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/slowfast_r50_4x16x1_256e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowFast', pretrained=None, resample_rate=4, # tau speed_ratio=4, # alpha channel_ratio=8, # beta_inv slow_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=True, fusion_kernel=7, conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), fast_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=False, base_channels=8, conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1, norm_eval=False)), cls_head=dict( type='SlowFastHead', loss_cls=dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', annealing_method='exp'), in_channels=2304, # 2048+256 num_classes=101, spatial_type='avg', dropout_ratio=0.5)) evidence='exp' # only used for EDL test_cfg = dict(average_clips='score') dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ # dict(type='OpenCVInit', num_threads=1), dict(type='DecordInit'), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), # dict(type='OpenCVDecode'), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=None, data_prefix=None, start_index=0, pipeline=test_pipeline)) dist_params = dict(backend='nccl')
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/inference_slowfast_enn.py
model = dict( type='Recognizer3DRPL', backbone=dict( type='ResNet3dSlowFast', pretrained=None, resample_rate=4, # tau speed_ratio=4, # alpha channel_ratio=8, # beta_inv slow_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=True, fusion_kernel=7, conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), fast_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=False, base_channels=8, conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1, norm_eval=False)), cls_head=dict( type='SlowFastRPLHead', loss_cls=dict(type='RPLoss', temperature=1, weight_pl=0.1), in_channels=2304, # 2048+256 num_classes=101, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( policy='CosineAnnealing', min_lr=0, warmup='linear', warmup_by_epoch=True, warmup_iters=5) total_epochs = 50 checkpoint_config = dict(interval=10) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/finetune_ucf101_slowfast_rpl' load_from = 'https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/slowfast_r50_8x8x1_256e_kinetics400_rgb_20200716-73547d2b.pth' resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/finetune_ucf101_slowfast_rpl.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowFast', pretrained=None, resample_rate=4, # tau speed_ratio=4, # alpha channel_ratio=8, # beta_inv slow_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=True, fusion_kernel=7, conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), fast_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=False, base_channels=8, conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1, norm_eval=False)), cls_head=dict( type='SlowFastHead', in_channels=2304, # 2048+256 num_classes=101, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( policy='CosineAnnealing', min_lr=0, warmup='linear', warmup_by_epoch=True, warmup_iters=5) total_epochs = 50 checkpoint_config = dict(interval=10) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/finetune_ucf101_slowfast_dnn' load_from = 'https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/slowfast_r50_8x8x1_256e_kinetics400_rgb_20200716-73547d2b.pth' resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/finetune_ucf101_slowfast_dnn.py
model = dict( type='Recognizer3DRPL', backbone=dict( type='ResNet3dSlowFast', pretrained=None, resample_rate=4, # tau speed_ratio=4, # alpha channel_ratio=8, # beta_inv slow_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=True, fusion_kernel=7, conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), fast_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=False, base_channels=8, conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1, norm_eval=False)), cls_head=dict( type='SlowFastRPLHead', loss_cls=dict(type='RPLoss', temperature=1, weight_pl=0.1), in_channels=2304, # 2048+256 num_classes=101, spatial_type='avg', dropout_ratio=0.5)) test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=2, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=None, data_prefix=None, start_index=0, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/inference_slowfast_rpl.py
model = dict( type='Recognizer3DBNN', backbone=dict( type='ResNet3dSlowFast', pretrained=None, resample_rate=4, # tau speed_ratio=4, # alpha channel_ratio=8, # beta_inv slow_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=True, fusion_kernel=7, conv1_kernel=(1, 7, 7), dilations=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), fast_pathway=dict( type='resnet3d', depth=50, pretrained=None, lateral=False, base_channels=8, conv1_kernel=(5, 7, 7), conv1_stride_t=1, pool1_stride_t=1, norm_eval=False)), cls_head=dict( type='SlowFastBNNHead', in_channels=2304, # 2048+256 num_classes=101, spatial_type='avg', dropout_ratio=0)) train_cfg = dict(loss_weight=1e-6, npass=2) test_cfg = dict(average_clips='prob', npass=10) dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( policy='CosineAnnealing', min_lr=0, warmup='linear', warmup_by_epoch=True, warmup_iters=5) total_epochs = 50 checkpoint_config = dict(interval=10) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/finetune_ucf101_slowfast_bnn' load_from = 'https://download.openmmlab.com/mmaction/recognition/slowfast/slowfast_r50_8x8x1_256e_kinetics400_rgb/slowfast_r50_8x8x1_256e_kinetics400_rgb_20200716-73547d2b.pth' resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowfast/finetune_ucf101_slowfast_bnn.py
# model settings model = dict( type='Recognizer2DBNN', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=8), cls_head=dict( type='TSMBNNHead', num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = dict(loss_weight=1e-6, npass=2) test_cfg = dict(average_clips='prob', npass=10) # dataset settings dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, val_dataloader=dict(videos_per_gpu=4), train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm/finetune_ucf101_tsm_bnn' load_from = 'https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb_20200727-e1e0c785.pth' resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/finetune_ucf101_tsm_bnn.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet101', depth=101, norm_eval=False, shift_div=8), cls_head=dict( type='TSMHead', num_classes=174, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/sthv1/rawframes' data_root_val = 'data/sthv1/rawframes' ann_file_train = 'data/sthv1/sthv1_train_list_rawframes.txt' ann_file_val = 'data/sthv1/sthv1_val_list_rawframes.txt' ann_file_test = 'data/sthv1/sthv1_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, filename_tmpl='{:05}.jpg', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, filename_tmpl='{:05}.jpg', pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, filename_tmpl='{:05}.jpg', pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.01, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=1) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm_r101_1x1x8_50e_sthv1_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r101_1x1x8_50e_sthv1_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, num_segments=16, norm_eval=False, shift_div=8), cls_head=dict( type='TSMHead', num_classes=174, num_segments=16, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/sthv2/rawframes' data_root_val = 'data/sthv2/rawframes' ann_file_train = 'data/sthv2/sthv2_train_list_rawframes.txt' ann_file_val = 'data/sthv2/sthv2_val_list_rawframes.txt' ann_file_test = 'data/sthv2/sthv2_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=6, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.0075, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=1) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm_r50_1x1x16_50e_sthv2_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x16_50e_sthv2_rgb.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', with_kldiv=False, with_avuloss=True, annealing_method='exp') model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=8), cls_head=dict( type='TSMHead', loss_cls=evidence_loss, num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True), debias_head=dict( type='DebiasHead', loss_cls=evidence_loss, # actually not used! loss_factor=0.1, num_classes=101, in_channels=2048, dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='evidence', evidence_type='exp') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/kinetics10/videos_train' data_root_val = 'data/kinetics10/videos_val' ann_file_train = 'data/kinetics10/kinetics10_train_list_videos.txt' ann_file_val = 'data/kinetics10/kinetics10_val_list_videos.txt' ann_file_test = 'data/kinetics10/kinetics10_val_list_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=4, workers_per_gpu=4, val_dataloader=dict(videos_per_gpu=4), train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.001, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) annealing_runner = True # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm/train_kinetics10_tsm_DEAR' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/train_kinetics10_tsm_DEAR.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=8), cls_head=dict( type='TSMHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='DecordInit', num_threads=1), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=1, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_video_inference_1x1x8_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=8), cls_head=dict( type='TSMHead', num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, val_dataloader=dict(videos_per_gpu=4), train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.001, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm/finetune_ucf101_tsm_dnn' load_from = 'https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb_20200727-e1e0c785.pth' resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/finetune_ucf101_tsm_dnn.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', with_kldiv=False, with_avuloss=True, annealing_method='exp') model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=8), cls_head=dict( type='TSMHead', loss_cls=evidence_loss, num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True), debias_head=dict( type='DebiasHead', loss_cls=evidence_loss, # actually not used! loss_factor=0.1, num_classes=101, in_channels=2048, dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='evidence', evidence_type='exp') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, val_dataloader=dict(videos_per_gpu=4), train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.001, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) annealing_runner = True # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm/finetune_ucf101_tsm_edlnokl_avuc_debias' load_from = 'https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb_20200727-e1e0c785.pth' resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/finetune_ucf101_tsm_edlnokl_avuc_debias.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=8), cls_head=dict( type='TSMHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, val_dataloader=dict(videos_per_gpu=4), train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.02, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm_r50_dense_1x1x8_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_dense_1x1x8_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, num_segments=16, norm_eval=False, shift_div=8), cls_head=dict( type='TSMHead', num_classes=400, num_segments=16, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=6, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.0075, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm_r50_1x1x16_50e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x16_50e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2DRPL', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=8), cls_head=dict( type='TSMRPLHead', loss_cls=dict(type='RPLoss', temperature=1, weight_pl=0.1), num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, test=dict( type=dataset_type, ann_file=None, data_prefix=None, start_index=0, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/inference_tsm_rpl.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=8), cls_head=dict( type='TSMHead', num_classes=174, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/sthv1/rawframes' data_root_val = 'data/sthv1/rawframes' ann_file_train = 'data/sthv1/sthv1_train_list_rawframes.txt' ann_file_val = 'data/sthv1/sthv1_val_list_rawframes.txt' ann_file_test = 'data/sthv1/sthv1_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, filename_tmpl='{:05}.jpg', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, filename_tmpl='{:05}.jpg', pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, filename_tmpl='{:05}.jpg', pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.01, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=1) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm_r50_1x1x8_50e_sthv1_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x8_50e_sthv1_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, non_local=((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0)), non_local_cfg=dict( sub_sample=True, use_scale=False, norm_cfg=dict(type='BN3d', requires_grad=True), mode='embedded_gaussian'), shift_div=8), cls_head=dict( type='TSMHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.01, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm_nl_embedded_gaussian_r50_1x1x8_50e_kinetics400_rgb/' # noqa: E501 load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_nl_embedded_gaussian_r50_1x1x8_50e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=8), cls_head=dict( type='TSMHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.01, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm_r50_1x1x8_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, non_local=((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0)), non_local_cfg=dict( sub_sample=True, use_scale=False, norm_cfg=dict(type='BN3d', requires_grad=True), mode='gaussian'), shift_div=8), cls_head=dict( type='TSMHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.01, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm_nl_gaussian_r50_1x1x8_50e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_nl_gaussian_r50_1x1x8_50e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=8), cls_head=dict( type='TSMHead', loss_cls=dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', annealing_method='exp'), num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings evidence='exp' # only used for EDL test_cfg = dict(average_clips='score') # dataset settings dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, test=dict( type=dataset_type, ann_file=None, data_prefix=None, start_index=0, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/inference_tsm_enn.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, num_segments=16, norm_eval=False, shift_div=8), cls_head=dict( type='TSMHead', num_classes=174, num_segments=16, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/sthv1/rawframes' data_root_val = 'data/sthv1/rawframes' ann_file_train = 'data/sthv1/sthv1_train_list_rawframes.txt' ann_file_val = 'data/sthv1/sthv1_val_list_rawframes.txt' ann_file_test = 'data/sthv1/sthv1_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=6, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, filename_tmpl='{:05}.jpg', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, filename_tmpl='{:05}.jpg', pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, filename_tmpl='{:05}.jpg', pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.0075, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=1) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm_r50_1x1x16_50e_sthv1_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x16_50e_sthv1_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet101', depth=101, norm_eval=False, shift_div=8), cls_head=dict( type='TSMHead', num_classes=174, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/sthv2/rawframes' data_root_val = 'data/sthv2/rawframes' ann_file_train = 'data/sthv2/sthv2_train_list_rawframes.txt' ann_file_val = 'data/sthv2/sthv2_val_list_rawframes.txt' ann_file_test = 'data/sthv2/sthv2_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.01, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=1) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm_r101_1x1x8_50e_sthv2_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r101_1x1x8_50e_sthv2_rgb.py
# model settings model = dict( type='Recognizer2DRPL', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=8), cls_head=dict( type='TSMRPLHead', loss_cls=dict(type='RPLoss', temperature=1, weight_pl=0.1), num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, val_dataloader=dict(videos_per_gpu=4), train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm/finetune_ucf101_tsm_rpl' load_from = 'https://download.openmmlab.com/mmaction/recognition/tsm/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb/tsm_r50_dense_256p_1x1x8_100e_kinetics400_rgb_20200727-e1e0c785.pth' resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/finetune_ucf101_tsm_rpl.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, temporal_pool=True, shift_div=8), cls_head=dict( type='TSMHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, temporal_pool=True, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.01, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm_temporal_pool_r50_1x1x8_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_temporal_pool_r50_1x1x8_50e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=8), cls_head=dict( type='TSMHead', num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, test=dict( type=dataset_type, ann_file=None, data_prefix=None, start_index=0, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/inference_tsm_dnn.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=8), cls_head=dict( type='TSMHead', num_classes=174, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/sthv2/rawframes' data_root_val = 'data/sthv2/rawframes' ann_file_train = 'data/sthv2/sthv2_train_list_rawframes.txt' ann_file_val = 'data/sthv2/sthv2_val_list_rawframes.txt' ann_file_test = 'data/sthv2/sthv2_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=6, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.0075, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=1) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm_r50_1x1x8_50e_sthv2_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_1x1x8_50e_sthv2_rgb.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', with_kldiv=False, with_avuloss=True, annealing_method='exp') model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=8), cls_head=dict( type='TSMHead', loss_cls=evidence_loss, num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='evidence', evidence_type='exp') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/kinetics10/videos_train' data_root_val = 'data/kinetics10/videos_val' ann_file_train = 'data/kinetics10/kinetics10_train_list_videos.txt' ann_file_val = 'data/kinetics10/kinetics10_val_list_videos.txt' ann_file_test = 'data/kinetics10/kinetics10_val_list_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=4, workers_per_gpu=4, val_dataloader=dict(videos_per_gpu=4), train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.001, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) annealing_runner = True # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm/train_kinetics10_tsm_DEAR_noDebias' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/train_kinetics10_tsm_DEAR_noDebias.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=8), cls_head=dict( type='TSMHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/kinetics400/videos_train' data_root_val = 'data/kinetics400/videos_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.02, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm_r50_video_2d_1x1x8_50e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_r50_video_1x1x8_50e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2DBNN', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, shift_div=8), cls_head=dict( type='TSMBNNHead', num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0, init_std=0.001, is_shift=True)) # model training and testing settings test_cfg = dict(average_clips='prob', npass=10) # dataset settings dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, test=dict( type=dataset_type, ann_file=None, data_prefix=None, start_index=0, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/inference_tsm_bnn.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, norm_eval=False, non_local=((0, 0, 0), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 0, 0)), non_local_cfg=dict( sub_sample=True, use_scale=False, norm_cfg=dict(type='BN3d', requires_grad=True), mode='dot_product'), shift_div=8), cls_head=dict( type='TSMHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001, is_shift=True)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.01, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsm_nl_gaussian_r50_1x1x8_50e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsm/tsm_nl_dot_product_r50_1x1x8_50e_kinetics400_rgb.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', with_kldiv=False, with_avuloss=False, annealing_method='exp') # mae huge model = dict( type='Recognizer3D', backbone=dict( type='VisionTransformer3D', patch_size=16, embed_dim=1280, depth=32, num_heads=16, mlp_ratio=4, qkv_bias=True, num_classes=0, # pretrained='work_dirs/mae/finetune_ucf101_mae_dnn/huangbingkun/model/vit_h_hybridv2_pt_1200e_k700_ft_rep_2.pth' ), cls_head=dict( type='BaseClsHead', loss_cls=evidence_loss, in_channels=1280, num_classes=101, dropout_ratio=0.5, )) # model training and testing settings evidence='exp' # only used for EDL test_cfg = dict(average_clips='score') # dataset settings dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), # dict( # type='SampleFrames', # clip_len=1, # frame_interval=1, # num_clips=32, # test_mode=True), # dict(type='OpenCVDecode'), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 224)), dict(type='ThreeCrop', crop_size=224), # dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=4, workers_per_gpu=4, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline)) dist_params = dict(backend='nccl')
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/mae/inference_mae_enn.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', with_kldiv=False, with_avuloss=False, annealing_method='exp') # mae huge ------------ model = dict( type='Recognizer3D', backbone=dict( type='VisionTransformer3D', patch_size=16, embed_dim=1280, depth=32, num_heads=16, mlp_ratio=4, qkv_bias=True, num_classes=0, pretrained='work_dirs/mae/finetune_ucf101_mae_dnn/huangbingkun/model/vit_h_hybridv2_pt_1200e_k700_ft_rep_2.pth' ), cls_head=dict( type='BaseClsHead', loss_cls=evidence_loss, in_channels=1280, num_classes=101, dropout_ratio=0.5, )) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='evidence', evidence_type='exp') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ # dict(type='OpenCVInit', num_threads=1), dict(type='DecordInit'), dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), # dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=32), # dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), # dict(type='OpenCVDecode'), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ # dict(type='OpenCVInit', num_threads=1), dict(type='DecordInit'), dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), # dict( # type='SampleFrames', # clip_len=1, # frame_interval=1, # num_clips=32, # test_mode=True), # dict( # type='SampleFrames', # clip_len=32, # frame_interval=2, # num_clips=1, # test_mode=True), # dict(type='OpenCVDecode'), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ # dict(type='OpenCVInit', num_threads=1), dict(type='DecordInit'), dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), # dict( # type='SampleFrames', # clip_len=1, # frame_interval=1, # num_clips=32, # test_mode=True), # dict( # type='SampleFrames', # clip_len=32, # frame_interval=2, # num_clips=1, # test_mode=True), # dict(type='OpenCVDecode'), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 224)), dict(type='ThreeCrop', crop_size=224), # dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=1, # set to 2 for evaluation on GPU with 24GB workers_per_gpu=4, # set to 2 for evaluation on GPU with 24GB train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, start_index=0, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, # change from 0.01 to 0.001 weight_decay=0.0001, nesterov=True) optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) # change from [40,80] to [20,40] total_epochs = 50 # change from 100 to 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=60, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) annealing_runner = True # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/finetune_ucf101_mae_edlnokl/' # load_from = 'https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb_20200725-24eb54cc.pth' # model path can be found in model zoo load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/mae/finetune_ucf101_mae_edlnokl.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='C3D', pretrained= # noqa: E251 'https://download.openmmlab.com/mmaction/recognition/c3d/c3d_sports1m_pretrain_20201016-dcc47ddc.pth', # noqa: E501 style='pytorch', conv_cfg=dict(type='Conv3d'), norm_cfg=None, act_cfg=dict(type='ReLU'), dropout_ratio=0.5, init_std=0.005), cls_head=dict( type='I3DHead', num_classes=101, in_channels=4096, spatial_type=None, dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='score') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/ucf101/rawframes' data_root_val = 'data/ucf101/rawframes' split = 1 # official train/test splits. valid numbers: 1, 2, 3 ann_file_train = f'data/ucf101/ucf101_train_split_{split}_rawframes.txt' ann_file_val = f'data/ucf101/ucf101_val_split_{split}_rawframes.txt' ann_file_test = f'data/ucf101/ucf101_val_split_{split}_rawframes.txt' img_norm_cfg = dict(mean=[104, 117, 128], std=[1, 1, 1], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=16, frame_interval=1, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(128, 171)), dict(type='RandomCrop', size=112), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=16, frame_interval=1, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(128, 171)), dict(type='CenterCrop', crop_size=112), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=16, frame_interval=1, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(128, 171)), dict(type='CenterCrop', crop_size=112), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] data = dict( videos_per_gpu=30, workers_per_gpu=2, test_dataloader=dict(videos_per_gpu=1), train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 45 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = f'./work_dirs/c3d_sports1m_16x1x1_45e_ucf101_split_{split}_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', # pretrained='work_dirs/i3d/finetune_ucf101_i3d_edlnokl/latest.pth', # pretrained=False, depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), zero_init_residual=False), cls_head=dict( type='I3DHead', loss_cls=dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', annealing_method='exp'), num_classes=101, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings evidence='exp' # only used for EDL test_cfg = dict(average_clips='score') # dataset settings dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline)) dist_params = dict(backend='nccl')
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/inference_i3d_enn.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=400, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/i3d_r50_32x2x1_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer3DRPL', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), zero_init_residual=False), cls_head=dict( type='I3DRPLHead', loss_cls=dict(type='RPLoss', temperature=1, weight_pl=0.1), num_classes=101, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, # set to 8 for training workers_per_gpu=4, # set to 4 for training train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, start_index=0, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, # change from 0.01 to 0.001 weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 # change from 100 to 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/finetune_ucf101_i3d_rpl/' load_from = 'https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb_20200725-24eb54cc.pth' # model path can be found in model zoo resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_rpl.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=101, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/inference_i3d_dnn.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), non_local=((0, 0, 0), (0, 1, 0, 1), (0, 1, 0, 1, 0, 1), (0, 0, 0)), non_local_cfg=dict( sub_sample=True, use_scale=False, norm_cfg=dict(type='BN3d', requires_grad=True), mode='dot_product'), zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=400, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_nl_dot_product_r50_32x2x1_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=400, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='RawFrameDecode', decoding_backend='turbojpeg'), dict(type='Resize', scale=(-1, 256), lazy=True), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0, lazy=True), dict(type='Resize', scale=(224, 224), keep_ratio=False, lazy=True), dict(type='Flip', flip_ratio=0.5, lazy=True), dict(type='Fuse'), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='RawFrameDecode', decoding_backend='turbojpeg'), dict(type='Resize', scale=(-1, 256), lazy=True), dict(type='CenterCrop', crop_size=224, lazy=True), dict(type='Flip', flip_ratio=0, lazy=True), dict(type='Fuse'), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='RawFrameDecode', decoding_backend='turbojpeg'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_lazy_32x2x1_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=400, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='DecordInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=1, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_video_inference_32x2x1_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, with_pool2=True, zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=400, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/kinetics400/videos_train' data_root_val = 'data/kinetics400/videos_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/i3d_r50_video_heavy_8x8x1_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_video_heavy_8x8x1_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), non_local=((0, 0, 0), (0, 1, 0, 1), (0, 1, 0, 1, 0, 1), (0, 0, 0)), non_local_cfg=dict( sub_sample=True, use_scale=False, norm_cfg=dict(type='BN3d', requires_grad=True), mode='embedded_gaussian'), zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=400, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb/' # noqa: E501 load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_nl_embedded_gaussian_r50_32x2x1_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer3DBNN', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), zero_init_residual=False), cls_head=dict( type='I3DBNNHead', num_classes=101, in_channels=2048, spatial_type='avg', dropout_ratio=0, init_std=0.01)) # model training and testing settings test_cfg = dict(average_clips='prob', npass=10) # dataset settings dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/inference_i3d_bnn.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=(1, 1, 1, 1), conv1_stride_t=1, pool1_stride_t=1, with_pool2=True, zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=400, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/i3d_r50_heavy_8x8x1_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_heavy_8x8x1_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer3DBNN', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), zero_init_residual=False), cls_head=dict( type='I3DBNNHead', num_classes=101, in_channels=2048, spatial_type='avg', dropout_ratio=0, init_std=0.01)) # model training and testing settings train_cfg = dict(loss_weight=1e-6, npass=2) test_cfg = dict(average_clips='prob', npass=10) # dataset settings dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, # set to 8 for training workers_per_gpu=4, # set to 4 for training train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, start_index=0, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, # change from 0.01 to 0.001 weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 # change from 100 to 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/finetune_ucf101_i3d_bnn/' load_from = 'https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb_20200725-24eb54cc.pth' # model path can be found in model zoo resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_bnn.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', with_kldiv=False, with_avuloss=False, annealing_method='exp') model = dict( type='Recognizer3D', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), zero_init_residual=False), cls_head=dict( type='I3DHead', loss_cls=evidence_loss, num_classes=101, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='evidence', evidence_type='exp') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, # set to 2 for evaluation on GPU with 24GB workers_per_gpu=4, # set to 2 for evaluation on GPU with 24GB train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, start_index=0, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, # change from 0.01 to 0.001 weight_decay=0.0001, nesterov=True) optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) # change from [40,80] to [20,40] total_epochs = 50 # change from 100 to 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) annealing_runner = True # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/finetune_ucf101_i3d_edlnokl/' load_from = 'https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb_20200725-24eb54cc.pth' # model path can be found in model zoo resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_edlnokl.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), non_local=((0, 0, 0), (0, 1, 0, 1), (0, 1, 0, 1, 0, 1), (0, 0, 0)), non_local_cfg=dict( sub_sample=True, use_scale=False, norm_cfg=dict(type='BN3d', requires_grad=True), mode='gaussian'), zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=400, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_nl_gaussian_r50_32x2x1_100e_kinetics400_rgb.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=10, evidence='exp', loss_type='log', with_kldiv=False, with_avuloss=True, annealing_method='exp') model = dict( type='Recognizer3D', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), zero_init_residual=False), cls_head=dict( type='I3DHead', loss_cls=evidence_loss, num_classes=10, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='evidence', evidence_type='exp') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/kinetics10/videos_train' data_root_val = 'data/kinetics10/videos_val' ann_file_train = 'data/kinetics10/kinetics10_train_list_videos.txt' ann_file_val = 'data/kinetics10/kinetics10_val_list_videos.txt' ann_file_test = 'data/kinetics10/kinetics10_val_list_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, # set to 2 for evaluation on GPU with 24GB workers_per_gpu=4, # set to 2 for evaluation on GPU with 24GB train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, start_index=0, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, # change from 0.01 to 0.001 weight_decay=0.0001, nesterov=True) optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) # change from [40,80] to [20,40] total_epochs = 50 # change from 100 to 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) annealing_runner = True # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/train_kinetics10_i3d_DEAR_noDebias/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/train_kinetics10_i3d_DEAR_noDebias.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=400, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/kinetics400/videos_train' data_root_val = 'data/kinetics400/videos_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=10, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/i3d_r50_video_3d_32x2x1_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_video_32x2x1_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=101, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, # set to 8 for training workers_per_gpu=4, # set to 4 for training train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, start_index=0, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, # change from 0.01 to 0.001 weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 # change from 100 to 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/finetune_ucf101_i3d_dnn/' load_from = 'https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb_20200725-24eb54cc.pth' # model path can be found in model zoo resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_dnn.py
# model settings model = dict( type='Recognizer3DRPL', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), zero_init_residual=False), cls_head=dict( type='I3DRPLHead', loss_cls=dict(type='RPLoss', temperature=1, weight_pl=0.1), num_classes=101, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/inference_i3d_rpl.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), zero_init_residual=False), cls_head=dict( type='I3DHead', num_classes=400, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/i3d_r50_dense_32x2x1_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/i3d_r50_dense_32x2x1_100e_kinetics400_rgb.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', with_kldiv=False, with_avuloss=True, annealing_method='exp') model = dict( type='Recognizer3D', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), zero_init_residual=False), cls_head=dict( type='I3DHead', loss_cls=evidence_loss, num_classes=101, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01), debias_head=dict( type='DebiasHead', loss_cls=evidence_loss, # actually not used! loss_factor=0.1, num_classes=101, in_channels=2048, dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='evidence', evidence_type='exp') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, # set to 2 for evaluation on GPU with 24GB workers_per_gpu=4, # set to 2 for evaluation on GPU with 24GB train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, start_index=0, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, # change from 0.01 to 0.001 weight_decay=0.0001, nesterov=True) optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) # change from [40,80] to [20,40] total_epochs = 50 # change from 100 to 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) annealing_runner = True # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/finetune_ucf101_i3d_edlnokl_avuc_debias/' load_from = 'https://download.openmmlab.com/mmaction/recognition/i3d/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb/i3d_r50_dense_256p_32x2x1_100e_kinetics400_rgb_20200725-24eb54cc.pth' # model path can be found in model zoo resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/finetune_ucf101_i3d_edlnokl_avuc_debias.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=10, evidence='exp', loss_type='log', with_kldiv=False, with_avuloss=True, annealing_method='exp') model = dict( type='Recognizer3D', backbone=dict( type='ResNet3d', pretrained2d=True, pretrained='torchvision://resnet50', depth=50, conv_cfg=dict(type='Conv3d'), norm_eval=False, inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), zero_init_residual=False), cls_head=dict( type='I3DHead', loss_cls=evidence_loss, num_classes=10, in_channels=2048, spatial_type='avg', dropout_ratio=0.5, init_std=0.01), debias_head=dict( type='DebiasHead', loss_cls=evidence_loss, # actually not used! loss_factor=0.1, num_classes=10, in_channels=2048, dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='evidence', evidence_type='exp') # dataset settings dataset_type = 'VideoDataset' data_root = 'data/kinetics10/videos_train' data_root_val = 'data/kinetics10/videos_val' ann_file_train = 'data/kinetics10/kinetics10_train_list_videos.txt' ann_file_val = 'data/kinetics10/kinetics10_val_list_videos.txt' ann_file_test = 'data/kinetics10/kinetics10_val_list_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='DenseSampleFrames', clip_len=32, frame_interval=2, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, # set to 2 for evaluation on GPU with 24GB workers_per_gpu=4, # set to 2 for evaluation on GPU with 24GB train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, start_index=0, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, # change from 0.01 to 0.001 weight_decay=0.0001, nesterov=True) optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) # change from [40,80] to [20,40] total_epochs = 50 # change from 100 to 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) annealing_runner = True # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/train_kinetics10_i3d_DEAR/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/i3d/train_kinetics10_i3d_DEAR.py
# model settings model = dict( type='Recognizer3DRPL', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), neck=dict( type='TPN', in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1)))), cls_head=dict( type='TPNRPLHead', loss_cls=dict(type='RPLoss', temperature=1, weight_pl=0.1), num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.01)) evidence='exp' # only used for EDL test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=2, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_rpl.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', with_kldiv=False, with_avuloss=True, annealing_method='exp') model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), neck=dict( type='TPN', in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1))), aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), cls_head=dict( type='TPNHead', loss_cls=evidence_loss, num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.01), debias_head=dict( type='DebiasHead', loss_cls=evidence_loss, # actually not used! loss_factor=0.1, num_classes=101, in_channels=1024, dropout_ratio=0.5, init_std=0.01)) train_cfg = None test_cfg = dict(average_clips='evidence', evidence_type='exp') dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='OpenCVDecode'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) annealing_runner = True # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_avuc' # noqa: E501 load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_debias_r50_8x8x1_150e_kinetics_rgb.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', annealing_method='exp') model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), neck=dict( type='TPN', in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1))), aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), cls_head=dict( type='TPNHead', loss_cls=evidence_loss, num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.01)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='OpenCVDecode'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) annealing_runner = True # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss' # noqa: E501 load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_r50_8x8x1_150e_kinetics_rgb.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', with_kldiv=False, with_avuloss=True, disentangle=True, annealing_method='exp') model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), neck=dict( type='TPN', in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1))), aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), cls_head=dict( type='TPNHead', loss_cls=evidence_loss, num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.01), debias_head=dict( type='DebiasHead', loss_cls=evidence_loss, # actually not used! loss_factor=0.1, num_classes=101, in_channels=1024, dropout_ratio=0.5, init_std=0.01)) train_cfg = None test_cfg = dict(average_clips='evidence', evidence_type='exp') dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='OpenCVDecode'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) annealing_runner = True # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_avuc' # noqa: E501 load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_davuc_debias_r50_8x8x1_150e_kinetics_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), neck=dict( type='TPN', in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1))), aux_head_cfg=dict(out_channels=400, loss_weight=0.5)), cls_head=dict( type='TPNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.01)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='FrameSelector'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=8, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[75, 125]) total_epochs = 150 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics400_rgb' # noqa: E501 load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb.py
# model settings model = dict( type='Recognizer3DBNN', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), neck=dict( type='TPN', in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1)))), cls_head=dict( type='TPNBNNHead', num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0, # for bnn, dropout is not necessary init_std=0.01)) test_cfg = dict(average_clips='prob', npass=10) dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=2, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_bnn.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', with_kldiv=False, with_avuloss=True, annealing_method='exp') model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), neck=dict( type='TPN', in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1))), rebias_head_cfg=dict(out_channels=101, loss_weight=0.5, loss_rebias=dict(type='RebiasLoss', lambda_g=1.0, criteria='hsic'))), cls_head=dict( type='TPNHead', loss_cls=evidence_loss, num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.01)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='OpenCVDecode'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) annealing_runner = True # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_nokl_avuc_rebias' # noqa: E501 load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_rebias_r50_8x8x1_150e_kinetics_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), neck=dict( type='TPN', in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1))), aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), cls_head=dict( type='TPNHead', num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.01)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='OpenCVDecode'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_celoss' # noqa: E501 load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_celoss_r50_8x8x1_150e_kinetics_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), neck=dict( type='TPN', in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1))), aux_head_cfg=dict(out_channels=400, loss_weight=0.5)), cls_head=dict( type='TPNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.01)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='FrameSelector'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=8, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[75, 125]) total_epochs = 150 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tpn_slowonly_r50_8x8x1_150e_kinetics400_rgb' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNetTSM', pretrained='torchvision://resnet50', depth=50, out_indices=(2, 3), norm_eval=False, shift_div=8), neck=dict( type='TPN', in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1))), aux_head_cfg=dict(out_channels=174, loss_weight=0.5)), cls_head=dict( type='TPNHead', num_classes=174, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.01)) train_cfg = None test_cfg = dict(average_clips=None) dataset_type = 'RawframeDataset' data_root = 'data/sthv1/rawframes' data_root_val = 'data/sthv1/rawframes' ann_file_train = 'data/sthv1/sthv1_train_list_rawframes.txt' ann_file_val = 'data/sthv1/sthv1_val_list_rawframes.txt' ann_file_test = 'data/sthv1/sthv1_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='FrameSelector'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=8, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[75, 125]) total_epochs = 150 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tpn_tsm_r50_8x8x1_150e_kinetics400_rgb' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_tsm_r50_1x1x8_150e_sthv1_rgb.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', with_kldiv=False, with_avuloss=True, annealing_method='exp') model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), neck=dict( type='TPN', in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1))), aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), cls_head=dict( type='TPNHead', loss_cls=evidence_loss, num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.01)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='OpenCVDecode'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) annealing_runner = True # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_avuc' # noqa: E501 load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_avuc_r50_8x8x1_150e_kinetics_rgb.py