python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', with_kldiv=False, with_avuloss=False, annealing_method='exp') model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), neck=dict( type='TPN', in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1))), aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), cls_head=dict( type='TPNHead', loss_cls=evidence_loss, num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.01)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='OpenCVDecode'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) annealing_runner = True # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_avuc' # noqa: E501 load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_nokl_r50_8x8x1_150e_kinetics_rgb.py
# model settings evidence_loss = dict(type='EvidenceLoss', num_classes=101, evidence='exp', loss_type='log', with_avuloss=True, annealing_method='exp') model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), neck=dict( type='TPN', in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1))), aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), cls_head=dict( type='TPNHead', loss_cls=evidence_loss, num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.01)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='OpenCVDecode'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) annealing_runner = True # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_edlloss_avuc' # noqa: E501 load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_edlloss_avuc_r50_8x8x1_150e_kinetics_rgb.py
# model settings model = dict( type='Recognizer3DRPL', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), neck=dict( type='TPN', in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1))), aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), cls_head=dict( type='TPNRPLHead', loss_cls=dict(type='RPLoss', temperature=1, weight_pl=0.1), num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.01)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='OpenCVDecode'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_rpl' # noqa: E501 load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/finetune_ucf101_tpn_slowonly_rpl.py
# model settings model = dict( type='Recognizer3DBNN', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), neck=dict( type='TPN', in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1))), aux_head_cfg=dict(out_channels=101, loss_weight=0.5)), cls_head=dict( type='TPNBNNHead', loss_cls=dict(type='BayesianNNLoss'), num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0, # for bnn, dropout is not necessary init_std=0.01)) train_cfg = dict(loss_weight=1e-6, npass=2) test_cfg = dict(average_clips='prob', npass=10) dataset_type = 'VideoDataset' data_root = 'data/ucf101/videos' data_root_val = 'data/ucf101/videos' ann_file_train = 'data/ucf101/ucf101_train_split_1_videos.txt' ann_file_val = 'data/ucf101/ucf101_val_split_1_videos.txt' ann_file_test = 'data/ucf101/ucf101_val_split_1_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='OpenCVDecode'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='ColorJitter', color_space_aug=True), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.002, momentum=0.9, weight_decay=0.0001, nesterov=True) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=10) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tpn_slowonly/finetune_ucf101_tpn_slowonly_bnn' # noqa: E501 load_from = 'https://download.openmmlab.com/mmaction/recognition/tpn/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb/tpn_imagenet_pretrained_slowonly_r50_8x8x1_150e_kinetics_rgb_20200923-52629684.pth' resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/tpn_slowonly_bnn_r50_8x8x1_150e_kinetics_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), neck=dict( type='TPN', in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1)))), cls_head=dict( type='TPNHead', num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.01)) evidence='exp' # only used for EDL test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=2, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_dnn.py
# model settings model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, out_indices=(2, 3), conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), neck=dict( type='TPN', in_channels=(1024, 2048), out_channels=1024, spatial_modulation_cfg=dict( in_channels=(1024, 2048), out_channels=2048), temporal_modulation_cfg=dict(downsample_scales=(8, 8)), upsample_cfg=dict(scale_factor=(1, 1, 1)), downsample_cfg=dict(downsample_scale=(1, 1, 1)), level_fusion_cfg=dict( in_channels=(1024, 1024), mid_channels=(1024, 1024), out_channels=2048, downsample_scales=((1, 1, 1), (1, 1, 1)))), cls_head=dict( type='TPNHead', num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.01)) evidence='exp' # only used for EDL test_cfg = dict(average_clips='score') dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=2, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tpn/inference_tpn_slowonly_enn.py
# model settings model = dict( type='Recognizer3D', backbone=dict(type='X3D', gamma_w=1, gamma_b=2.25, gamma_d=2.2), cls_head=dict( type='X3DHead', in_channels=432, num_classes=400, spatial_type='avg', dropout_ratio=0.5, fc1_bias=False)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root_val = 'data/kinetics400/rawframes_val' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[114.75, 114.75, 114.75], std=[57.38, 57.38, 57.38], to_bgr=False) test_pipeline = [ dict( type='SampleFrames', clip_len=16, frame_interval=5, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=1, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) dist_params = dict(backend='nccl')
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/x3d/x3d_m_16x5x1_facebook_kinetics400_rgb.py
# model settings model = dict( type='Recognizer3D', backbone=dict(type='X3D', gamma_w=1, gamma_b=2.25, gamma_d=2.2), cls_head=dict( type='X3DHead', in_channels=432, num_classes=400, spatial_type='avg', dropout_ratio=0.5, fc1_bias=False)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'RawframeDataset' data_root_val = 'data/kinetics400/rawframes_val' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[114.75, 114.75, 114.75], std=[57.38, 57.38, 57.38], to_bgr=False) test_pipeline = [ dict( type='SampleFrames', clip_len=13, frame_interval=6, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 192)), dict(type='CenterCrop', crop_size=192), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=1, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) dist_params = dict(backend='nccl')
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/x3d/x3d_s_13x6x1_facebook_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=1, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_inference_1x1x3_100e_kinetics400_rgb.py
model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=51, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) train_cfg = None test_cfg = dict(average_clips=None) dataset_type = 'RawframeDataset' data_root = 'data/hmdb51/rawframes' data_root_val = 'data/hmdb51/rawframes' ann_file_train = 'data/hmdb51/hmdb51_train_split_1_rawframes.txt' ann_file_val = 'data/hmdb51/hmdb51_val_split_1_rawframes.txt' ann_file_test = 'data/hmdb51/hmdb51_val_split_1_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline)) optimizer = dict(type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=5) evaluation = dict( interval=1, metrics=['top_k_accuracy', 'mean_class_accuracy'], topk=(1, 5)) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_1x1x8_50e_hmdb51_kinetics400_rgb/' load_from = 'https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_256p_1x1x8_100e_kinetics400_rgb/tsn_r50_256p_1x1x8_100e_kinetics400_rgb_20200817-883baf16.pth' # noqa: E501 resume_from = None workflow = [('train', 1)] gpu_ids = range(0, 1)
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_1x1x3_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train_320p' data_root_val = 'data/kinetics400/rawframes_val_320p' ann_file_train = 'data/kinetics400/kinetics_flow_train_list.txt' ann_file_val = 'data/kinetics400/kinetics_flow_val_list.txt' ann_file_test = 'data/kinetics400/kinetics_flow_val_list.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=5, frame_interval=1, num_clips=3), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW_Flow'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=5, frame_interval=1, num_clips=3, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW_Flow'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=5, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW_Flow'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, filename_tmpl='{}_{:05d}.jpg', modality='Flow', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, filename_tmpl='{}_{:05d}.jpg', modality='Flow', pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, filename_tmpl='{}_{:05d}.jpg', modality='Flow', pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[70, 100]) total_epochs = 110 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_320p_1x1x3_110e_kinetics400_flow/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x3_110e_kinetics400_flow.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='OpenCVInit', num_threads=1), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='OpenCVDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=1, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_inference_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'VideoDataset' data_root = 'data/kinetics400/videos_train' data_root_val = 'data/kinetics400/videos_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_video_1x1x8_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # fp16 settings fp16 = dict() # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_fp16_r50_1x1x3_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_fp16_r50_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='modelzoo/tsn_r50_320p_1x1x8_110e_kinetics400_flow.pth', depth=50, in_channels=10, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=200, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.8, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/ActivityNet/flow' data_root_val = 'data/ActivityNet/flow' ann_file_train = 'data/ActivityNet/anet_train_video.txt' ann_file_val = 'data/ActivityNet/anet_val_video.txt' ann_file_test = 'data/ActivityNet/anet_val_clip.txt' img_norm_cfg = dict(mean=[128, 128], std=[128, 128], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=5, frame_interval=1, num_clips=8), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW_Flow'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=5, frame_interval=1, num_clips=8, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW_Flow'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=5, frame_interval=1, num_clips=25, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW_Flow'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, filename_tmpl='flow_{}_{:05d}.jpg', with_offset=True, modality='Flow', start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, filename_tmpl='flow_{}_{:05d}.jpg', with_offset=True, modality='Flow', start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, filename_tmpl='flow_{}_{:05d}.jpg', with_offset=True, modality='Flow', start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[60, 120]) total_epochs = 150 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_320p_1x1x8_150e_activitynet_video_flow/' load_from = None resume_from = None workflow = [('train', 5)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_video_flow.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=True), cls_head=dict( type='TSNHead', num_classes=174, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/sthv1/rawframes' data_root_val = 'data/sthv1/rawframes' ann_file_train = 'data/sthv1/sthv1_train_list_rawframes.txt' ann_file_val = 'data/sthv1/sthv1_val_list_rawframes.txt' ann_file_test = 'data/sthv1/sthv1_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=4, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, filename_tmpl='{:05}.jpg', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, filename_tmpl='{:05}.jpg', pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, filename_tmpl='{:05}.jpg', pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=1) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_1x1x16_50e_sthv1_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x16_50e_sthv1_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'VideoDataset' data_root = 'data/kinetics400/videos_train' data_root_val = 'data/kinetics400/videos_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), dict(type='DecordDecode'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_video_1x1x3_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_320p_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=174, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/sthv2/rawframes' data_root_val = 'data/sthv2/rawframes' ann_file_train = 'data/sthv2/sthv2_train_list_rawframes.txt' ann_file_val = 'data/sthv2/sthv2_val_list_rawframes.txt' ann_file_test = 'data/sthv2/sthv2_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=16, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_1x1x8_50e_sthv2_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_sthv2_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'VideoDataset' data_root = 'data/kinetics400/videos_train' data_root_val = 'data/kinetics400/videos_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=4, val_dataloader=dict(videos_per_gpu=1), train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_video_dense_1x1x8_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_dense_1x1x8_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_dense_1x1x8_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_dense_1x1x8_100e_kinetics400_rgb.py
model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=51, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) train_cfg = None test_cfg = dict(average_clips=None) dataset_type = 'RawframeDataset' data_root = 'data/hmdb51/rawframes' data_root_val = 'data/hmdb51/rawframes' ann_file_train = 'data/hmdb51/hmdb51_train_split_1_rawframes.txt' ann_file_val = 'data/hmdb51/hmdb51_val_split_1_rawframes.txt' ann_file_test = 'data/hmdb51/hmdb51_val_split_1_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline)) optimizer = dict(type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=5) evaluation = dict( interval=1, metrics=['top_k_accuracy', 'mean_class_accuracy'], topk=(1, 5)) log_config = dict( interval=5, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_1x1x8_50e_hmdb51_mit_rgb/' load_from = 'https://download.openmmlab.com/mmaction/recognition/tsn/tsn_r50_1x1x6_100e_mit_rgb/tsn_r50_1x1x6_100e_mit_rgb_20200618-d512ab1b.pth' # noqa: E501 resume_from = None workflow = [('train', 1)] gpu_ids = range(0, 1)
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_mit_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='modelzoo/tsn_r50_320p_1x1x8_100e_kinetics400_rgb.pth', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=200, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.8, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/ActivityNet/rgb' data_root_val = 'data/ActivityNet/rgb' ann_file_train = 'data/ActivityNet/anet_train_clip.txt' ann_file_val = 'data/ActivityNet/anet_val_clip.txt' ann_file_test = 'data/ActivityNet/anet_val_clip.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline, with_offset=True, start_index=0, filename_tmpl='image_{:05d}.jpg'), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline, with_offset=True, start_index=0, filename_tmpl='image_{:05d}.jpg'), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline, with_offset=True, start_index=0, filename_tmpl='image_{:05d}.jpg')) # optimizer optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_320p_1x1x8_50e_activitynet_clip_rgb/' load_from = None resume_from = None workflow = [('train', 5)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_clip_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='modelzoo/tsn_r50_320p_1x1x8_100e_kinetics400_rgb.pth', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=200, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.8, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/ActivityNet/rgb' data_root_val = 'data/ActivityNet/rgb' ann_file_train = 'data/ActivityNet/anet_train_video.txt' ann_file_val = 'data/ActivityNet/anet_val_video.txt' ann_file_test = 'data/ActivityNet/anet_val_clip.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline, with_offset=True, start_index=0, filename_tmpl='image_{:05d}.jpg'), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline, with_offset=True, start_index=0, filename_tmpl='image_{:05d}.jpg'), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline, with_offset=True, start_index=0, filename_tmpl='image_{:05d}.jpg')) # optimizer optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=1) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_320p_1x1x8_50e_activitynet_video_rgb/' load_from = None resume_from = None workflow = [('train', 5)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_50e_activitynet_video_rgb.py
model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=51, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) train_cfg = None test_cfg = dict(average_clips=None) dataset_type = 'RawframeDataset' data_root = 'data/hmdb51/rawframes' data_root_val = 'data/hmdb51/rawframes' ann_file_train = 'data/hmdb51/hmdb51_train_split_1_rawframes.txt' ann_file_val = 'data/hmdb51/hmdb51_val_split_1_rawframes.txt' ann_file_test = 'data/hmdb51/hmdb51_val_split_1_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=test_pipeline)) optimizer = dict(type='SGD', lr=0.025, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=5) evaluation = dict( interval=1, metrics=['top_k_accuracy', 'mean_class_accuracy'], topk=(1, 5)) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_1x1x8_50e_hmdb51_imagenet_rgb/' load_from = None resume_from = None workflow = [('train', 1)] gpu_ids = range(0, 1)
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_hmdb51_imagenet_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=339, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/mit/rawframes/training' data_root_val = '/data/mit/rawframes/validation/' ann_file_train = 'data/mit/mit_train_list_rawframes.txt' ann_file_val = 'data/mit/mit_val_list_rawframes.txt' ann_file_test = 'data/mit/mit_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=6), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.8), random_crop=False, max_wh_scale_gap=0), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=6, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=6, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=16, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_1x1x6_100e_mit_rgb' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x6_100e_mit_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=5), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='DenseSampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=16, workers_per_gpu=4, val_dataloader=dict(videos_per_gpu=4), train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.03, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_dense_1x1x5_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_dense_1x1x5_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='modelzoo/tsn_r50_320p_1x1x8_110e_kinetics400_flow.pth', depth=50, in_channels=10, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=200, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.8, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/ActivityNet/flow' data_root_val = 'data/ActivityNet/flow' ann_file_train = 'data/ActivityNet/anet_train_clip.txt' ann_file_val = 'data/ActivityNet/anet_val_clip.txt' ann_file_test = 'data/ActivityNet/anet_val_clip.txt' img_norm_cfg = dict(mean=[128, 128], std=[128, 128], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=5, frame_interval=1, num_clips=8), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW_Flow'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=5, frame_interval=1, num_clips=8, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW_Flow'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=5, frame_interval=1, num_clips=25, test_mode=True), dict(type='FrameSelector'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW_Flow'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, filename_tmpl='flow_{}_{:05d}.jpg', with_offset=True, modality='Flow', start_index=0, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, filename_tmpl='flow_{}_{:05d}.jpg', with_offset=True, modality='Flow', start_index=0, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, filename_tmpl='flow_{}_{:05d}.jpg', with_offset=True, modality='Flow', start_index=0, pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[60, 120]) total_epochs = 150 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_320p_1x1x8_150e_activitynet_clip_flow/' load_from = None resume_from = None workflow = [('train', 5)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_150e_activitynet_clip_flow.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train_320p' data_root_val = 'data/kinetics400/rawframes_val_320p' ann_file_train = 'data/kinetics400/kinetics_flow_train_list.txt' ann_file_val = 'data/kinetics400/kinetics_flow_val_list.txt' ann_file_test = 'data/kinetics400/kinetics_flow_val_list.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=5, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW_Flow'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=5, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW_Flow'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=5, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW_Flow'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, filename_tmpl='{}_{:05d}.jpg', modality='Flow', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, filename_tmpl='{}_{:05d}.jpg', modality='Flow', pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, filename_tmpl='{}_{:05d}.jpg', modality='Flow', pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001875, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[70, 100]) total_epochs = 110 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_320p_1x1x8_110e_kinetics400_flow/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_110e_kinetics400_flow.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=True), cls_head=dict( type='TSNHead', num_classes=174, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/sthv1/rawframes' data_root_val = 'data/sthv1/rawframes' ann_file_train = 'data/sthv1/sthv1_train_list_rawframes.txt' ann_file_val = 'data/sthv1/sthv1_val_list_rawframes.txt' ann_file_test = 'data/sthv1/sthv1_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1, num_fixed_crops=13), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=16, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, filename_tmpl='{:05}.jpg', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, filename_tmpl='{:05}.jpg', pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, filename_tmpl='{:05}.jpg', pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0005) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_1x1x8_50e_sthv1_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x8_50e_sthv1_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=174, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.5, init_std=0.001)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/sthv2/rawframes' data_root_val = 'data/sthv2/rawframes' ann_file_train = 'data/sthv2/sthv2_train_list_rawframes.txt' ann_file_val = 'data/sthv2/sthv2_val_list_rawframes.txt' ann_file_test = 'data/sthv2/sthv2_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=16), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=4, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0005) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=1) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_1x1x16_50e_sthv2_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x16_50e_sthv2_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=101, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.001)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/ucf101/rawframes/' data_root_val = 'data/ucf101/rawframes/' split = 1 # official train/test splits. valid numbers: 1, 2, 3 ann_file_train = f'data/ucf101/ucf101_train_split_{split}_rawframes.txt' ann_file_val = f'data/ucf101/ucf101_val_split_{split}_rawframes.txt' ann_file_test = f'data/ucf101/ucf101_val_split_{split}_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.00128, momentum=0.9, weight_decay=0.0005) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[]) total_epochs = 75 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = f'./work_dirs/tsn_r50_1x1x3_75e_ucf101_split_{split}_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_1x1x3_75e_ucf101_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train_320p' data_root_val = 'data/kinetics400/rawframes_val_320p' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes_320p.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.00375, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict( interval=2, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_320p_1x1x8_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x8_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=700, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'VideoDataset' data_root = 'data/kinetics700/videos_train' data_root_val = 'data/kinetics700/videos_val' ann_file_train = 'data/kinetics700/kinetics700_train_list_videos.txt' ann_file_val = 'data/kinetics700/kinetics700_val_list_videos.txt' ann_file_test = 'data/kinetics700/kinetics700_val_list_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.00375, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_1x1x3_100e_kinetics700_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics700_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=600, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'VideoDataset' data_root = 'data/kinetics600/videos_train' data_root_val = 'data/kinetics600/videos_val' ann_file_train = 'data/kinetics600/kinetics600_train_list_videos.txt' ann_file_val = 'data/kinetics600/kinetics600_val_list_videos.txt' ann_file_test = 'data/kinetics600/kinetics600_val_list_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='DecordDecode'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.00375, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_1x1x3_100e_kinetics600_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_video_1x1x8_100e_kinetics600_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train_320p' data_root_val = 'data/kinetics400/rawframes_val_320p' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes_320p.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_320p_1x1x3_100e_kinetics400_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r50_320p_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet101', depth=101, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=313, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), loss_cls=dict(type='BCELossWithLogits', loss_weight=160.0), dropout_ratio=0.5, init_std=0.01, multi_class=True, label_smooth_eps=0)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/mmit/rawframes/training' data_root_val = '/data/mmit/rawframes/validation/' ann_file_train = 'data/mmit/mmit_train_list_rawframes.txt' ann_file_val = 'data/mmit/mmit_val_list_rawframes.txt' ann_file_test = 'data/mmit/mmit_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=5), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=5, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=5, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='MultiGroupCrop', crop_size=256, groups=1), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=16, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline, multi_class=True, num_classes=313), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline, multi_class=True, num_classes=313), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline, multi_class=True, num_classes=313)) # optimizer optimizer = dict( type='SGD', constructor='TSMOptimizerConstructor', paramwise_cfg=dict(fc_lr5=True), lr=0.01, # this lr is used for 8 gpus momentum=0.9, weight_decay=0.0001, ) optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[20, 40]) total_epochs = 50 checkpoint_config = dict(interval=5) evaluation = dict(interval=5, metrics=['mmit_mean_average_precision']) # yapf:disable log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r101_1x1x5_50e_mmit_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/tsn_r101_1x1x5_50e_mmit_rgb.py
# model settings category_nums = dict( action=739, attribute=117, concept=291, event=69, object=1678, scene=248) target_cate = 'object' model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet18', depth=18, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=category_nums[target_cate], in_channels=512, spatial_type='avg', multi_class=True, consensus=dict(type='AvgConsensus', dim=1), loss_cls=dict(type='BCELossWithLogits', loss_weight=333.), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/hvu/rawframes_train' data_root_val = 'data/hvu/rawframes_val' ann_file_train = f'data/hvu/hvu_{target_cate}_train.json' ann_file_val = f'data/hvu/hvu_{target_cate}_val.json' ann_file_test = f'data/hvu/hvu_{target_cate}_val.json' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg'), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg'), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg')) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict(interval=2, metrics=['mean_average_precision']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = f'./work_dirs/tsn_r18_1x1x8_100e_hvu_{target_cate}_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_concept_rgb.py
# model settings category_nums = dict( action=739, attribute=117, concept=291, event=69, object=1678, scene=248) target_cate = 'action' model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet18', depth=18, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=category_nums[target_cate], in_channels=512, spatial_type='avg', multi_class=True, consensus=dict(type='AvgConsensus', dim=1), loss_cls=dict(type='BCELossWithLogits', loss_weight=333.), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/hvu/rawframes_train' data_root_val = 'data/hvu/rawframes_val' ann_file_train = f'data/hvu/hvu_{target_cate}_train.json' ann_file_val = f'data/hvu/hvu_{target_cate}_val.json' ann_file_test = f'data/hvu/hvu_{target_cate}_val.json' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg'), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg'), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg')) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict(interval=2, metrics=['mean_average_precision']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = f'./work_dirs/tsn_r18_1x1x8_100e_hvu_{target_cate}_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_action_rgb.py
# model settings category_nums = dict( action=739, attribute=117, concept=291, event=69, object=1678, scene=248) target_cate = 'scene' model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet18', depth=18, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=category_nums[target_cate], in_channels=512, spatial_type='avg', multi_class=True, consensus=dict(type='AvgConsensus', dim=1), loss_cls=dict(type='BCELossWithLogits', loss_weight=333.), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/hvu/rawframes_train' data_root_val = 'data/hvu/rawframes_val' ann_file_train = f'data/hvu/hvu_{target_cate}_train.json' ann_file_val = f'data/hvu/hvu_{target_cate}_val.json' ann_file_test = f'data/hvu/hvu_{target_cate}_val.json' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg'), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg'), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg')) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict(interval=2, metrics=['mean_average_precision']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = f'./work_dirs/tsn_r18_1x1x8_100e_hvu_{target_cate}_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_scene_rgb.py
# model settings category_nums = dict( action=739, attribute=117, concept=291, event=69, object=1678, scene=248) target_cate = 'attribute' model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet18', depth=18, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=category_nums[target_cate], in_channels=512, spatial_type='avg', multi_class=True, consensus=dict(type='AvgConsensus', dim=1), loss_cls=dict(type='BCELossWithLogits', loss_weight=333.), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/hvu/rawframes_train' data_root_val = 'data/hvu/rawframes_val' ann_file_train = f'data/hvu/hvu_{target_cate}_train.json' ann_file_val = f'data/hvu/hvu_{target_cate}_val.json' ann_file_test = f'data/hvu/hvu_{target_cate}_val.json' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg'), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg'), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg')) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict(interval=2, metrics=['mean_average_precision']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = f'./work_dirs/tsn_r18_1x1x8_100e_hvu_{target_cate}_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_attribute_rgb.py
# model settings category_nums = dict( action=739, attribute=117, concept=291, event=69, object=1678, scene=248) target_cate = 'event' model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet18', depth=18, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=category_nums[target_cate], in_channels=512, spatial_type='avg', multi_class=True, consensus=dict(type='AvgConsensus', dim=1), loss_cls=dict(type='BCELossWithLogits', loss_weight=333.), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/hvu/rawframes_train' data_root_val = 'data/hvu/rawframes_val' ann_file_train = f'data/hvu/hvu_{target_cate}_train.json' ann_file_val = f'data/hvu/hvu_{target_cate}_val.json' ann_file_test = f'data/hvu/hvu_{target_cate}_val.json' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg'), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg'), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg')) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict(interval=2, metrics=['mean_average_precision']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = f'./work_dirs/tsn_r18_1x1x8_100e_hvu_{target_cate}_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_event_rgb.py
# model settings category_nums = dict( action=739, attribute=117, concept=291, event=69, object=1678, scene=248) target_cate = 'object' model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet18', depth=18, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=category_nums[target_cate], in_channels=512, spatial_type='avg', multi_class=True, consensus=dict(type='AvgConsensus', dim=1), loss_cls=dict(type='BCELossWithLogits', loss_weight=333.), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/hvu/rawframes_train' data_root_val = 'data/hvu/rawframes_val' ann_file_train = f'data/hvu/hvu_{target_cate}_train.json' ann_file_val = f'data/hvu/hvu_{target_cate}_val.json' ann_file_test = f'data/hvu/hvu_{target_cate}_val.json' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=8, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg'), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg'), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline, multi_class=True, num_classes=category_nums[target_cate], filename_tmpl='img_{:05d}.jpg')) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=1) evaluation = dict(interval=2, metrics=['mean_average_precision']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = f'./work_dirs/tsn_r18_1x1x8_100e_hvu_{target_cate}_rgb/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/hvu/tsn_r18_1x1x8_100e_hvu_object_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train_320p' data_root_val = 'data/kinetics400/rawframes_val_320p' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes_320p.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/tsn_r50_multiscalecrop_320p_1x1x3' '_100e_kinetics400_rgb/') load_from = None resume_from = None workflow = [('train', 5)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_320p_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/tsn_r50_multiscalecrop_340x256_1x1x3' '_100e_kinetics400_rgb/') load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_340x256_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train_256p' data_root_val = 'data/kinetics400/rawframes_val_256p' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes_256p.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes_256p.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_256p.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict( type='MultiScaleCrop', input_size=224, scales=(1, 0.875, 0.75, 0.66), random_crop=False, max_wh_scale_gap=1), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/tsn_r50_multiscalecrop_256p_1x1x3' '_100e_kinetics400_rgb/') load_from = None resume_from = None workflow = [('train', 5)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_multiscalecrop_256p_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root_val = 'data/kinetics400/rawframes_val' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( workers_per_gpu=4, test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) dist_params = dict(backend='nccl')
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_340x256_1x1x25_3crop_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root_val = 'data/kinetics400/rawframes_val' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( workers_per_gpu=4, test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) dist_params = dict(backend='nccl')
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_340x256_1x1x25_10crop_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/tsn_r50_randomresizedcrop_340x256_1x1x3' '_100e_kinetics400_rgb') load_from = None resume_from = None workflow = [('train', 5)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_340x256_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root_val = 'data/kinetics400/rawframes_val_320p' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( workers_per_gpu=4, test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) dist_params = dict(backend='nccl')
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_320p_1x1x25_10crop_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root_val = 'data/kinetics400/rawframes_val_320p' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( workers_per_gpu=4, test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) dist_params = dict(backend='nccl')
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_320p_1x1x25_3crop_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root_val = 'data/kinetics400/rawframes_val_256p' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_256p.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='TenCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( workers_per_gpu=4, test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) dist_params = dict(backend='nccl')
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_256p_1x1x25_10crop_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root_val = 'data/kinetics400/rawframes_val_256p' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_256p.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( workers_per_gpu=4, test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) dist_params = dict(backend='nccl')
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_test_256p_1x1x25_3crop_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train_320p' data_root_val = 'data/kinetics400/rawframes_val_320p' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes_320p.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/tsn_r50_randomresizedcrop_320p_1x1x3' '_100e_kinetics400_rgb/') load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_320p_1x1x3_100e_kinetics400_rgb.py
# model settings model = dict( type='Recognizer2D', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False), cls_head=dict( type='TSNHead', num_classes=400, in_channels=2048, spatial_type='avg', consensus=dict(type='AvgConsensus', dim=1), dropout_ratio=0.4, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips=None) # dataset settings dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train_256p' data_root_val = 'data/kinetics400/rawframes_val_256p' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes_256p.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes_256p.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_256p.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=1, frame_interval=1, num_clips=25, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=32, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40, 80]) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/tsn_r50_randomresizedcrop_256p_1x1x3' '_100e_kinetics400_rgb/') load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/tsn/data_benchmark/tsn_r50_randomresizedcrop_256p_1x1x3_100e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=400, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( policy='step', step=[90, 130], warmup='linear', warmup_by_epoch=True, warmup_iters=10) total_epochs = 150 checkpoint_config = dict(interval=4) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/slowonly_imagenet_pretrained_r50_8x8x1_150e' '_kinetics400_rgb') load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_8x8x1_150e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, in_channels=2, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), with_pool2=False, norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=400, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics_flow_train_list.txt' ann_file_val = 'data/kinetics400/kinetics_flow_val_list.txt' ann_file_test = 'data/kinetics400/kinetics_flow_val_list.txt' img_norm_cfg = dict(mean=[128, 128], std=[128, 128]) train_pipeline = [ dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, modality='Flow', filename_tmpl='{}_{:05d}.jpg', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, modality='Flow', filename_tmpl='{}_{:05d}.jpg', pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, modality='Flow', filename_tmpl='{}_{:05d}.jpg', pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.06, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( policy='CosineAnnealing', min_lr=0, warmup='linear', warmup_by_epoch=True, warmup_iters=34) total_epochs = 196 checkpoint_config = dict(interval=4) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/slowonly_r50_8x8x1_256e_kinetics400_flow' load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_flow.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=101, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=400, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( policy='CosineAnnealing', min_lr=0, warmup='linear', warmup_ratio=0.1, warmup_by_epoch=True, warmup_iters=34) total_epochs = 196 checkpoint_config = dict(interval=4) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/slowonly_r101_8x8x1_196e_kinetics400_rgb' load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r101_8x8x1_196e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=400, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( policy='step', step=[90, 130], warmup='linear', warmup_by_epoch=True, warmup_iters=10) total_epochs = 150 checkpoint_config = dict(interval=4) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/slowonly_imagenet_pretrained_r50_4x16x1_150e' '_kinetics400_rgb') load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_150e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=400, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'RawframeDataset' data_root = 'data/gym/subaction_frames' data_root_val = 'data/gym/subaction_frames' ann_file_train = 'data/gym/annotations/gym99_train_frame.txt' ann_file_val = 'data/gym/annotations/gym99_val_frame.txt' ann_file_test = 'data/gym/annotations/gym99_val_frame.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=24, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.03, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[90, 110]) total_epochs = 120 checkpoint_config = dict(interval=1) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/slowonly_imagenet_pretrained_r50_4x16x1_120e_gym99_rgb' load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_imagenet_pretrained_r50_4x16x1_120e_gym99_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=400, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/slowonly_r50_8x8x1_256e_kinetics400_rgb' load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_8x8x1_256e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=600, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' data_root = 'data/kinetics600/videos_train' data_root_val = 'data/kinetics600/videos_val' ann_file_train = 'data/kinetics600/kinetics600_train_list_videos.txt' ann_file_val = 'data/kinetics600/kinetics600_val_list_videos.txt' ann_file_test = 'data/kinetics600/kinetics600_val_list_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='DecordDecode'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='DecordDecode'), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='DecordDecode'), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.15, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/slowonly_r50_video_8x8x1_256e_kinetics600_rgb' load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics600_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=700, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' data_root = 'data/kinetics700/videos_train' data_root_val = 'data/kinetics700/videos_val' ann_file_train = 'data/kinetics700/kinetics700_train_list_videos.txt' ann_file_val = 'data/kinetics700/kinetics700_val_list_videos.txt' ann_file_test = 'data/kinetics700/kinetics700_val_list_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1), dict(type='DecordDecode'), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=1, test_mode=True), dict(type='DecordDecode'), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=8, frame_interval=8, num_clips=10, test_mode=True), dict(type='DecordDecode'), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=12, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.15, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/slowonly_r50_video_8x8x1_256e_kinetics700_rgb' load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_8x8x1_256e_kinetics700_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, in_channels=2, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), with_pool2=False, norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=400, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'RawframeDataset' data_root = 'data/gym/subaction_frames' data_root_val = 'data/gym/subaction_frames' ann_file_train = 'data/gym/annotations/gym99_train_frame.txt' ann_file_val = 'data/gym/annotations/gym99_val_frame.txt' ann_file_test = 'data/gym/annotations/gym99_val_frame.txt' img_norm_cfg = dict(mean=[128, 128], std=[128, 128]) train_pipeline = [ dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=24, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, modality='Flow', filename_tmpl='{}_{:05d}.jpg', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, modality='Flow', filename_tmpl='{}_{:05d}.jpg', pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, modality='Flow', filename_tmpl='{}_{:05d}.jpg', pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.03, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[90, 110]) total_epochs = 120 checkpoint_config = dict(interval=1) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/' 'slowonly_kinetics_pretrained_r50_4x16x1_120e_gym99_flow') load_from = ('https://download.openmmlab.com/mmaction/recognition/slowonly/' 'slowonly_r50_4x16x1_256e_kinetics400_flow/' 'slowonly_r50_4x16x1_256e_kinetics400_flow_20200704-decb8568.pth') resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_kinetics_pretrained_r50_4x16x1_120e_gym99_flow.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=400, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' data_root = 'data/kinetics400/videos_train' data_root_val = 'data/kinetics400/videos_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='DecordInit'), dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=10, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=24, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.3, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/slowonly_r50_video_4x16x1_256e_kinetics400_rgb' load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_4x16x1_256e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained='torchvision://resnet50', lateral=False, in_channels=2, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), with_pool2=False, norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=400, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics_flow_train_list.txt' ann_file_val = 'data/kinetics400/kinetics_flow_val_list.txt' ann_file_test = 'data/kinetics400/kinetics_flow_val_list.txt' img_norm_cfg = dict(mean=[128, 128], std=[128, 128]) train_pipeline = [ dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=24, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, modality='Flow', filename_tmpl='{}_{:05d}.jpg', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, modality='Flow', filename_tmpl='{}_{:05d}.jpg', pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, modality='Flow', filename_tmpl='{}_{:05d}.jpg', pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.06, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict( policy='CosineAnnealing', min_lr=0, warmup='linear', warmup_by_epoch=True, warmup_iters=34) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/slowonly_r50_4x16x1_256e_kinetics400_flow' load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_flow.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=400, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/slowonly_r50_4x16x1_256e_kinetics400_rgb' load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=400, spatial_type='avg', dropout_ratio=0.5)) test_cfg = dict(average_clips='prob') dataset_type = 'VideoDataset' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) test_pipeline = [ dict(type='DecordInit'), dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=10, test_mode=True), dict(type='DecordDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=1, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=None, data_prefix=None, pipeline=test_pipeline))
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/slowonly_r50_video_inference_4x16x1_256e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=400, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train' data_root_val = 'data/kinetics400/rawframes_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=16, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.6, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('slowonly_r50_randomresizedcrop_320p_4x16x1' '_256e_kinetics400_rgb') load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_340x256_4x16x1_256e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=400, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train_320p' data_root_val = 'data/kinetics400/rawframes_val_320p' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes_320p.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_320p.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=16, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.6, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/slowonly_r50_randomresizedcrop_320p_4x16x1' '_256e_kinetics400_rgb') load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_320p_4x16x1_256e_kinetics400_rgb.py
model = dict( type='Recognizer3D', backbone=dict( type='ResNet3dSlowOnly', depth=50, pretrained=None, lateral=False, conv1_kernel=(1, 7, 7), conv1_stride_t=1, pool1_stride_t=1, inflate=(0, 0, 1, 1), norm_eval=False), cls_head=dict( type='I3DHead', in_channels=2048, num_classes=400, spatial_type='avg', dropout_ratio=0.5)) train_cfg = None test_cfg = dict(average_clips='prob') dataset_type = 'RawframeDataset' data_root = 'data/kinetics400/rawframes_train_256p' data_root_val = 'data/kinetics400/rawframes_val_256p' ann_file_train = 'data/kinetics400/kinetics400_train_list_rawframes_256p.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_rawframes_256p.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_rawframes_256p.txt' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) train_pipeline = [ dict(type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='RandomResizedCrop'), dict(type='Resize', scale=(224, 224), keep_ratio=False), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs', 'label']) ] val_pipeline = [ dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=1, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] test_pipeline = [ dict( type='SampleFrames', clip_len=4, frame_interval=16, num_clips=10, test_mode=True), dict(type='RawFrameDecode'), dict(type='Resize', scale=(-1, 256)), dict(type='ThreeCrop', crop_size=256), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCTHW'), dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['imgs']) ] data = dict( videos_per_gpu=16, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.6, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 256 checkpoint_config = dict(interval=4) workflow = [('train', 1)] evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/slowonly_r50_randomresizedcrop_256p_4x16x1' '_256e_kinetics400_rgb') load_from = None resume_from = None find_unused_parameters = False
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition/slowonly/data_benchmark/slowonly_r50_randomresizedcrop_256p_4x16x1_256e_kinetics400_rgb.py
# model settings model = dict( type='AudioRecognizer', backbone=dict( type='ResNetAudio', depth=50, pretrained=None, in_channels=1, norm_eval=False), cls_head=dict( type='AudioTSNHead', num_classes=400, in_channels=1024, dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'AudioFeatureDataset' data_root = 'data/kinetics400/audio_feature_train' data_root_val = 'data/kinetics400/audio_feature_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_audio_feature.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_audio_feature.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_audio_feature.txt' train_pipeline = [ dict(type='LoadAudioFeature'), dict(type='SampleFrames', clip_len=64, frame_interval=1, num_clips=1), dict(type='AudioFeatureSelector'), dict(type='FormatAudioShape', input_format='NCTF'), dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['audios']) ] val_pipeline = [ dict(type='LoadAudioFeature'), dict( type='SampleFrames', clip_len=64, frame_interval=1, num_clips=1, test_mode=True), dict(type='AudioFeatureSelector'), dict(type='FormatAudioShape', input_format='NCTF'), dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['audios']) ] test_pipeline = [ dict(type='LoadAudioFeature'), dict( type='SampleFrames', clip_len=64, frame_interval=1, num_clips=10, test_mode=True), dict(type='AudioFeatureSelector'), dict(type='FormatAudioShape', input_format='NCTF'), dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['audios']) ] data = dict( videos_per_gpu=160, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=2.0, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=1, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = ('./work_dirs/' + 'audioonly_r50_64x1x1_100e_kinetics400_audio_feature/') load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition_audio/audioonly/audioonly_r50_64x1x1_100e_kinetics400_audio_feature.py
# model settings model = dict( type='AudioRecognizer', backbone=dict(type='ResNet', depth=50, in_channels=1, norm_eval=False), cls_head=dict( type='AudioTSNHead', num_classes=400, in_channels=2048, dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'AudioDataset' data_root = 'data/kinetics400/audios' data_root_val = 'data/kinetics400/audios' ann_file_train = 'data/kinetics400/kinetics400_train_list_audio.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_audio.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_audio.txt' train_pipeline = [ dict(type='AudioDecodeInit'), dict(type='SampleFrames', clip_len=64, frame_interval=1, num_clips=1), dict(type='AudioDecode'), dict(type='AudioAmplify', ratio=1.5), dict(type='MelLogSpectrogram'), dict(type='FormatAudioShape', input_format='NCTF'), dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['audios']) ] val_pipeline = [ dict(type='AudioDecodeInit'), dict( type='SampleFrames', clip_len=64, frame_interval=1, num_clips=1, test_mode=True), dict(type='AudioDecode'), dict(type='AudioAmplify', ratio=1.5), dict(type='MelLogSpectrogram'), dict(type='FormatAudioShape', input_format='NCTF'), dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['audios']) ] test_pipeline = [ dict(type='AudioDecodeInit'), dict( type='SampleFrames', clip_len=64, frame_interval=1, num_clips=1, test_mode=True), dict(type='AudioDecodeInit'), dict(type='AudioAmplify', ratio=1.5), dict(type='MelLogSpectrogram'), dict(type='FormatAudioShape', input_format='NCTF'), dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['audios']) ] data = dict( videos_per_gpu=320, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r50_64x1x1_100e_kinetics400_audio/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition_audio/resnet/tsn_r50_64x1x1_100e_kinetics400_audio.py
# model settings model = dict( type='AudioRecognizer', backbone=dict(type='ResNet', depth=18, in_channels=1, norm_eval=False), cls_head=dict( type='AudioTSNHead', num_classes=400, in_channels=512, dropout_ratio=0.5, init_std=0.01)) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='prob') # dataset settings dataset_type = 'AudioFeatureDataset' data_root = 'data/kinetics400/audio_feature_train' data_root_val = 'data/kinetics400/audio_feature_val' ann_file_train = 'data/kinetics400/kinetics400_train_list_audio_feature.txt' ann_file_val = 'data/kinetics400/kinetics400_val_list_audio_feature.txt' ann_file_test = 'data/kinetics400/kinetics400_val_list_audio_feature.txt' train_pipeline = [ dict(type='LoadAudioFeature'), dict(type='SampleFrames', clip_len=64, frame_interval=1, num_clips=1), dict(type='AudioFeatureSelector'), dict(type='FormatAudioShape', input_format='NCTF'), dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['audios']) ] val_pipeline = [ dict(type='LoadAudioFeature'), dict( type='SampleFrames', clip_len=64, frame_interval=1, num_clips=1, test_mode=True), dict(type='AudioFeatureSelector'), dict(type='FormatAudioShape', input_format='NCTF'), dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['audios']) ] test_pipeline = [ dict(type='LoadAudioFeature'), dict( type='SampleFrames', clip_len=64, frame_interval=1, num_clips=1, test_mode=True), dict(type='AudioFeatureSelector'), dict(type='FormatAudioShape', input_format='NCTF'), dict(type='Collect', keys=['audios', 'label'], meta_keys=[]), dict(type='ToTensor', keys=['audios']) ] data = dict( videos_per_gpu=320, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root_val, pipeline=val_pipeline), test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root_val, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.1, momentum=0.9, weight_decay=0.0001) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) # learning policy lr_config = dict(policy='CosineAnnealing', min_lr=0) total_epochs = 100 checkpoint_config = dict(interval=5) evaluation = dict( interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) log_config = dict( interval=20, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook'), ]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/tsn_r18_64x1x1_100e_kinetics400_audio_feature/' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/recognition_audio/resnet/tsn_r18_64x1x1_100e_kinetics400_audio_feature.py
# model settings model = dict( type='BMN', temporal_dim=100, boundary_ratio=0.5, num_samples=32, num_samples_per_bin=3, feat_dim=400, soft_nms_alpha=0.4, soft_nms_low_threshold=0.5, soft_nms_high_threshold=0.9, post_process_top_k=100) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='score') # dataset settings dataset_type = 'ActivityNetDataset' data_root = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' data_root_val = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' ann_file_train = 'data/ActivityNet/anet_anno_train.json' ann_file_val = 'data/ActivityNet/anet_anno_val.json' ann_file_test = 'data/ActivityNet/anet_anno_val.json' test_pipeline = [ dict(type='LoadLocalizationFeature'), dict( type='Collect', keys=['raw_feature'], meta_name='video_meta', meta_keys=[ 'video_name', 'duration_second', 'duration_frame', 'annotations', 'feature_frame' ]), dict(type='ToTensor', keys=['raw_feature']), ] train_pipeline = [ dict(type='LoadLocalizationFeature'), dict(type='GenerateLocalizationLabels'), dict( type='Collect', keys=['raw_feature', 'gt_bbox'], meta_name='video_meta', meta_keys=['video_name']), dict(type='ToTensor', keys=['raw_feature', 'gt_bbox']), dict( type='ToDataContainer', fields=[dict(key='gt_bbox', stack=False, cpu_only=True)]) ] val_pipeline = [ dict(type='LoadLocalizationFeature'), dict(type='GenerateLocalizationLabels'), dict( type='Collect', keys=['raw_feature', 'gt_bbox'], meta_name='video_meta', meta_keys=[ 'video_name', 'duration_second', 'duration_frame', 'annotations', 'feature_frame' ]), dict(type='ToTensor', keys=['raw_feature', 'gt_bbox']), dict( type='ToDataContainer', fields=[dict(key='gt_bbox', stack=False, cpu_only=True)]) ] data = dict( videos_per_gpu=8, workers_per_gpu=8, train_dataloader=dict(drop_last=True), val_dataloader=dict(videos_per_gpu=1), test_dataloader=dict(videos_per_gpu=1), test=dict( type=dataset_type, ann_file=ann_file_test, pipeline=test_pipeline, data_prefix=data_root_val), val=dict( type=dataset_type, ann_file=ann_file_val, pipeline=val_pipeline, data_prefix=data_root_val), train=dict( type=dataset_type, ann_file=ann_file_train, pipeline=train_pipeline, data_prefix=data_root)) # optimizer optimizer = dict( type='Adam', lr=0.001, weight_decay=0.0001) # this lr is used for 2 gpus optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict(policy='step', step=7) total_epochs = 9 checkpoint_config = dict(interval=1) evaluation = dict(interval=1, metrics=['AR@AN']) log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/bmn_400x100_2x8_9e_activitynet_feature/' load_from = None resume_from = None workflow = [('train', 1)] output_config = dict(out=f'{work_dir}/results.json', output_format='json')
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py
# model training and testing settings train_cfg = dict( ssn=dict( assigner=dict( positive_iou_threshold=0.7, background_iou_threshold=0.01, incomplete_iou_threshold=0.3, background_coverage_threshold=0.02, incomplete_overlap_threshold=0.01), sampler=dict( num_per_video=8, positive_ratio=1, background_ratio=1, incomplete_ratio=6, add_gt_as_proposals=True), loss_weight=dict(comp_loss_weight=0.1, reg_loss_weight=0.1), debug=False)) test_cfg = dict( ssn=dict( sampler=dict(test_interval=6, batch_size=16), evaluater=dict( top_k=2000, nms=0.2, softmax_before_filter=True, cls_score_dict=None, cls_top_k=2))) # model settings model = dict( type='SSN', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False, partial_bn=True), spatial_type='avg', dropout_ratio=0.8, cls_head=dict( type='SSNHead', dropout_ratio=0., in_channels=2048, num_classes=20, consensus=dict(type='STPPTest', stpp_stage=(1, 1, 1)), use_regression=True), test_cfg=test_cfg) # dataset settings dataset_type = 'SSNDataset' data_root = './data/thumos14/rawframes/' data_root_val = './data/thumos14/rawframes/' ann_file_train = 'data/thumos14/thumos14_tag_val_proposal_list.txt' ann_file_val = 'data/thumos14/thumos14_tag_val_proposal_list.txt' ann_file_test = 'data/thumos14/thumos14_tag_test_proposal_list.txt' img_norm_cfg = dict(mean=[104, 117, 128], std=[1, 1, 1], to_bgr=True) test_pipeline = [ dict( type='SampleProposalFrames', clip_len=1, body_segments=5, aug_segments=(2, 2), aug_ratio=0.5, mode='test'), dict(type='RawFrameDecode'), dict(type='Resize', scale=(340, 256), keep_ratio=True), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NCHW'), dict( type='Collect', keys=[ 'imgs', 'relative_proposal_list', 'scale_factor_list', 'proposal_tick_list', 'reg_norm_consts' ], meta_keys=[]), dict( type='ToTensor', keys=[ 'imgs', 'relative_proposal_list', 'scale_factor_list', 'proposal_tick_list', 'reg_norm_consts' ]) ] data = dict( videos_per_gpu=1, workers_per_gpu=2, test=dict( type=dataset_type, ann_file=ann_file_test, data_prefix=data_root, train_cfg=train_cfg, test_cfg=test_cfg, aug_ratio=0.5, test_mode=True, pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, weight_decay=1e-6) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[200, 400]) checkpoint_config = dict(interval=5) log_config = dict(interval=5, hooks=[dict(type='TextLoggerHook')]) # runtime settings total_epochs = 450 dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/ssn_r50_1x5_450e_thumos14_rgb' load_from = None resume_from = None workflow = [('train', 1)]
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py
# model training and testing settings train_cfg = dict( ssn=dict( assigner=dict( positive_iou_threshold=0.7, background_iou_threshold=0.01, incomplete_iou_threshold=0.3, background_coverage_threshold=0.02, incomplete_overlap_threshold=0.01), sampler=dict( num_per_video=8, positive_ratio=1, background_ratio=1, incomplete_ratio=6, add_gt_as_proposals=True), loss_weight=dict(comp_loss_weight=0.1, reg_loss_weight=0.1), debug=False)) test_cfg = dict( ssn=dict( sampler=dict(test_interval=6, batch_size=16), evaluater=dict( top_k=2000, nms=0.2, softmax_before_filter=True, cls_score_dict=None, cls_top_k=2))) # model settings model = dict( type='SSN', backbone=dict( type='ResNet', pretrained='torchvision://resnet50', depth=50, norm_eval=False, partial_bn=True), spatial_type='avg', dropout_ratio=0.8, loss_cls=dict(type='SSNLoss'), cls_head=dict( type='SSNHead', dropout_ratio=0., in_channels=2048, num_classes=20, consensus=dict( type='STPPTrain', stpp_stage=(1, 1, 1), num_segments_list=(2, 5, 2)), use_regression=True), train_cfg=train_cfg) # dataset settings dataset_type = 'SSNDataset' data_root = './data/thumos14/rawframes/' data_root_val = './data/thumos14/rawframes/' ann_file_train = 'data/thumos14/thumos14_tag_val_proposal_list.txt' ann_file_val = 'data/thumos14/thumos14_tag_val_proposal_list.txt' ann_file_test = 'data/thumos14/thumos14_tag_test_proposal_list.txt' img_norm_cfg = dict(mean=[104, 117, 128], std=[1, 1, 1], to_bgr=True) train_pipeline = [ dict( type='SampleProposalFrames', clip_len=1, body_segments=5, aug_segments=(2, 2), aug_ratio=0.5), dict(type='RawFrameDecode'), dict(type='Resize', scale=(340, 256), keep_ratio=True), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NPTCHW'), dict( type='Collect', keys=[ 'imgs', 'reg_targets', 'proposal_scale_factor', 'proposal_labels', 'proposal_type' ], meta_keys=[]), dict( type='ToTensor', keys=[ 'imgs', 'reg_targets', 'proposal_scale_factor', 'proposal_labels', 'proposal_type' ]) ] val_pipeline = [ dict( type='SampleProposalFrames', clip_len=1, body_segments=5, aug_segments=(2, 2), aug_ratio=0.5), dict(type='RawFrameDecode'), dict(type='Resize', scale=(340, 256), keep_ratio=True), dict(type='CenterCrop', crop_size=224), dict(type='Flip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='FormatShape', input_format='NPTCHW'), dict( type='Collect', keys=[ 'imgs', 'reg_targets', 'proposal_scale_factor', 'proposal_labels', 'proposal_type' ], meta_keys=[]), dict( type='ToTensor', keys=[ 'imgs', 'reg_targets', 'proposal_scale_factor', 'proposal_labels', 'proposal_type' ]) ] data = dict( videos_per_gpu=1, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=ann_file_train, data_prefix=data_root, train_cfg=train_cfg, test_cfg=test_cfg, body_segments=5, aug_segments=(2, 2), aug_ratio=0.5, test_mode=False, verbose=True, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=ann_file_val, data_prefix=data_root, train_cfg=train_cfg, test_cfg=test_cfg, body_segments=5, aug_segments=(2, 2), aug_ratio=0.5, test_mode=False, pipeline=val_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.001, momentum=0.9, weight_decay=1e-6) # this lr is used for 8 gpus optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[200, 400]) checkpoint_config = dict(interval=5) log_config = dict(interval=1, hooks=[dict(type='TextLoggerHook')]) # runtime settings total_epochs = 450 dist_params = dict(backend='nccl') log_level = 'INFO' work_dir = './work_dirs/ssn_r50_1x5_450e_thumos14_rgb' load_from = None resume_from = None workflow = [('train', 1)] find_unused_parameters = True
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py
# model settings model = dict( type='TEM', temporal_dim=100, boundary_ratio=0.1, tem_feat_dim=400, tem_hidden_dim=512, tem_match_threshold=0.5) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='score') # dataset settings dataset_type = 'ActivityNetDataset' data_root = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' data_root_val = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' ann_file_train = 'data/ActivityNet/anet_anno_train.json' ann_file_val = 'data/ActivityNet/anet_anno_val.json' ann_file_test = 'data/ActivityNet/anet_anno_full.json' work_dir = 'work_dirs/bsn_400x100_20e_1x16_activitynet_feature/' tem_results_dir = f'{work_dir}/tem_results/' test_pipeline = [ dict(type='LoadLocalizationFeature'), dict( type='Collect', keys=['raw_feature'], meta_name='video_meta', meta_keys=['video_name']), dict(type='ToTensor', keys=['raw_feature']) ] train_pipeline = [ dict(type='LoadLocalizationFeature'), dict(type='GenerateLocalizationLabels'), dict( type='Collect', keys=['raw_feature', 'gt_bbox'], meta_name='video_meta', meta_keys=['video_name']), dict(type='ToTensor', keys=['raw_feature', 'gt_bbox']), dict(type='ToDataContainer', fields=[dict(key='gt_bbox', stack=False)]) ] val_pipeline = [ dict(type='LoadLocalizationFeature'), dict(type='GenerateLocalizationLabels'), dict( type='Collect', keys=['raw_feature', 'gt_bbox'], meta_name='video_meta', meta_keys=['video_name']), dict(type='ToTensor', keys=['raw_feature', 'gt_bbox']), dict(type='ToDataContainer', fields=[dict(key='gt_bbox', stack=False)]) ] data = dict( videos_per_gpu=16, workers_per_gpu=8, train_dataloader=dict(drop_last=True), val_dataloader=dict(videos_per_gpu=1), test_dataloader=dict(videos_per_gpu=1), test=dict( type=dataset_type, ann_file=ann_file_test, pipeline=test_pipeline, data_prefix=data_root_val), val=dict( type=dataset_type, ann_file=ann_file_val, pipeline=val_pipeline, data_prefix=data_root_val), train=dict( type=dataset_type, ann_file=ann_file_train, pipeline=train_pipeline, data_prefix=data_root)) # optimizer optimizer = dict( type='Adam', lr=0.001, weight_decay=0.0001) # this lr is used for 1 gpus optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict(policy='step', step=7) total_epochs = 20 checkpoint_config = dict(interval=1, filename_tmpl='tem_epoch_{}.pth') log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' load_from = None resume_from = None workflow = [('train', 1), ('val', 1)] output_config = dict(out=tem_results_dir, output_format='csv')
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/localization/bsn/bsn_tem_400x100_1x16_20e_activitynet_feature.py
# dataset settings dataset_type = 'ActivityNetDataset' data_root = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' data_root_val = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' ann_file_train = 'data/ActivityNet/anet_anno_train.json' ann_file_val = 'data/ActivityNet/anet_anno_val.json' ann_file_test = 'data/ActivityNet/anet_anno_test.json' work_dir = 'work_dirs/bsn_400x100_20e_1x16_activitynet_feature/' tem_results_dir = f'{work_dir}/tem_results/' pgm_proposals_dir = f'{work_dir}/pgm_proposals/' pgm_features_dir = f'{work_dir}/pgm_features/' temporal_scale = 100 pgm_proposals_cfg = dict( pgm_proposals_thread=8, temporal_scale=temporal_scale, peak_threshold=0.5) pgm_features_test_cfg = dict( pgm_features_thread=4, top_k=1000, num_sample_start=8, num_sample_end=8, num_sample_action=16, num_sample_interp=3, bsp_boundary_ratio=0.2) pgm_features_train_cfg = dict( pgm_features_thread=4, top_k=500, num_sample_start=8, num_sample_end=8, num_sample_action=16, num_sample_interp=3, bsp_boundary_ratio=0.2)
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/localization/bsn/bsn_pgm_400x100_activitynet_feature.py
# model settings model = dict( type='PEM', pem_feat_dim=32, pem_hidden_dim=256, pem_u_ratio_m=1, pem_u_ratio_l=2, pem_high_temporal_iou_threshold=0.6, pem_low_temporal_iou_threshold=2.2, soft_nms_alpha=0.75, soft_nms_low_threshold=0.65, soft_nms_high_threshold=0.9, post_process_top_k=100) # model training and testing settings train_cfg = None test_cfg = dict(average_clips='score') # dataset settings dataset_type = 'ActivityNetDataset' data_root = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' data_root_val = 'data/ActivityNet/activitynet_feature_cuhk/csv_mean_100/' ann_file_train = 'data/ActivityNet/anet_anno_train.json' ann_file_val = 'data/ActivityNet/anet_anno_val.json' ann_file_test = 'data/ActivityNet/anet_anno_val.json' work_dir = 'work_dirs/bsn_400x100_20e_1x16_activitynet_feature/' pgm_proposals_dir = f'{work_dir}/pgm_proposals/' pgm_features_dir = f'{work_dir}/pgm_features/' test_pipeline = [ dict( type='LoadProposals', top_k=1000, pgm_proposals_dir=pgm_proposals_dir, pgm_features_dir=pgm_features_dir), dict( type='Collect', keys=['bsp_feature', 'tmin', 'tmax', 'tmin_score', 'tmax_score'], meta_name='video_meta', meta_keys=[ 'video_name', 'duration_second', 'duration_frame', 'annotations', 'feature_frame' ]), dict(type='ToTensor', keys=['bsp_feature']) ] train_pipeline = [ dict( type='LoadProposals', top_k=500, pgm_proposals_dir=pgm_proposals_dir, pgm_features_dir=pgm_features_dir), dict( type='Collect', keys=['bsp_feature', 'reference_temporal_iou'], meta_name='video_meta', meta_keys=[]), dict(type='ToTensor', keys=['bsp_feature', 'reference_temporal_iou']), dict( type='ToDataContainer', fields=(dict(key='bsp_feature', stack=False), dict(key='reference_temporal_iou', stack=False))) ] val_pipeline = [ dict( type='LoadProposals', top_k=1000, pgm_proposals_dir=pgm_proposals_dir, pgm_features_dir=pgm_features_dir), dict( type='Collect', keys=['bsp_feature', 'tmin', 'tmax', 'tmin_score', 'tmax_score'], meta_name='video_meta', meta_keys=[ 'video_name', 'duration_second', 'duration_frame', 'annotations', 'feature_frame' ]), dict(type='ToTensor', keys=['bsp_feature']) ] data = dict( videos_per_gpu=16, workers_per_gpu=8, train_dataloader=dict(drop_last=True), val_dataloader=dict(videos_per_gpu=1), test_dataloader=dict(videos_per_gpu=1), test=dict( type=dataset_type, ann_file=ann_file_test, pipeline=test_pipeline, data_prefix=data_root_val), val=dict( type=dataset_type, ann_file=ann_file_val, pipeline=val_pipeline, data_prefix=data_root_val), train=dict( type=dataset_type, ann_file=ann_file_train, pipeline=train_pipeline, data_prefix=data_root)) # optimizer optimizer = dict( type='Adam', lr=0.01, weight_decay=0.00001) # this lr is used for 1 gpus optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict(policy='step', step=10) total_epochs = 20 checkpoint_config = dict(interval=1, filename_tmpl='pem_epoch_{}.pth') evaluation = dict(interval=1, metrics=['AR@AN']) log_config = dict(interval=50, hooks=[dict(type='TextLoggerHook')]) # runtime settings dist_params = dict(backend='nccl') log_level = 'INFO' load_from = None resume_from = None workflow = [('train', 1)] output_config = dict(out=f'{work_dir}/results.json', output_format='json')
InternVideo-main
Downstream/Open-Set-Action-Recognition/configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py
from itertools import count import os import numpy as np import math import sys import time import datetime import logging from typing import Iterable, Optional import torch import torch.nn as nn import torch.nn.functional as F from timm.data import Mixup from timm.utils import accuracy, ModelEma import utils from alphaction.modeling.utils import cat from alphaction.structures.bounding_box import BoxList from data.ava_eval import do_ava_evaluation import pdb def train_class_batch(model, samples, boxes): outputs = model(samples, boxes) labels = cat([proposal.get_field("labels") for proposal in boxes], dim=0) # [n,80] assert outputs.shape[1] == labels.shape[1], \ "The shape of tensor class logits doesn't match the label tensor." # loss = criterion(outputs, target) batch_size = outputs.shape[0] loss = F.binary_cross_entropy_with_logits(outputs, labels.to(dtype=torch.float32), reduction='mean') loss = loss * batch_size return loss, outputs def get_loss_scale_for_deepspeed(model): optimizer = model.optimizer return optimizer.loss_scale if hasattr(optimizer, "loss_scale") else optimizer.cur_scale def train_one_epoch(model: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float = 0, model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None, log_writer=None, start_steps=None, lr_schedule_values=None, wd_schedule_values=None, num_training_steps_per_epoch=None, update_freq=None): model.train(True) metric_logger = utils.MetricLogger(delimiter=" ") metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) header = 'Epoch: [{}]'.format(epoch) print_freq = 10 if loss_scaler is None: model.zero_grad() model.micro_steps = 0 else: optimizer.zero_grad() for data_iter_step, (samples, boxes, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)): step = data_iter_step // update_freq if step >= num_training_steps_per_epoch: continue it = start_steps + step # global training iteration # Update LR & WD for the first acc if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0: for i, param_group in enumerate(optimizer.param_groups): if lr_schedule_values is not None: param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"] if wd_schedule_values is not None and param_group["weight_decay"] > 0: param_group["weight_decay"] = wd_schedule_values[it] samples = samples.to(device, non_blocking=True) boxes = [box.to(device=device) for box in boxes] # boxlist # targets = targets.to(device, non_blocking=True) # if mixup_fn is not None: # samples, targets = mixup_fn(samples, targets) if loss_scaler is None: samples = samples.half() loss, _ = train_class_batch( model, samples, boxes) else: with torch.cuda.amp.autocast(): loss, _ = train_class_batch( model, samples, boxes) loss_value = loss.item() if not math.isfinite(loss_value): print("Loss is {}, stopping training".format(loss_value)) sys.exit(1) if loss_scaler is None: loss /= update_freq model.backward(loss) model.step() if (data_iter_step + 1) % update_freq == 0: # model.zero_grad() # Deepspeed will call step() & model.zero_grad() automatic if model_ema is not None: model_ema.update(model) grad_norm = None loss_scale_value = get_loss_scale_for_deepspeed(model) else: # this attribute is added by timm on one optimizer (adahessian) is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order loss /= update_freq grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=is_second_order, update_grad=(data_iter_step + 1) % update_freq == 0) if (data_iter_step + 1) % update_freq == 0: optimizer.zero_grad() if model_ema is not None: model_ema.update(model) loss_scale_value = loss_scaler.state_dict()["scale"] torch.cuda.synchronize() # if mixup_fn is None: # class_acc = (output.max(-1)[-1] == targets).float().mean() # else: # class_acc = None metric_logger.update(loss=loss_value) # metric_logger.update(class_acc=class_acc) metric_logger.update(loss_scale=loss_scale_value) min_lr = 10. max_lr = 0. for group in optimizer.param_groups: min_lr = min(min_lr, group["lr"]) max_lr = max(max_lr, group["lr"]) metric_logger.update(lr=max_lr) metric_logger.update(min_lr=min_lr) weight_decay_value = None for group in optimizer.param_groups: if group["weight_decay"] > 0: weight_decay_value = group["weight_decay"] metric_logger.update(weight_decay=weight_decay_value) metric_logger.update(grad_norm=grad_norm) if log_writer is not None: log_writer.update(loss=loss_value, head="loss") # log_writer.update(class_acc=class_acc, head="loss") log_writer.update(loss_scale=loss_scale_value, head="opt") log_writer.update(lr=max_lr, head="opt") log_writer.update(min_lr=min_lr, head="opt") log_writer.update(weight_decay=weight_decay_value, head="opt") log_writer.update(grad_norm=grad_norm, head="opt") log_writer.set_step() # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) return {k: meter.global_avg for k, meter in metric_logger.meters.items()} class PostProcessor(nn.Module): def forward(self, class_logits, boxes): # boxes should be (#detections,4) # prob should be calculated in different way. class_logits = torch.sigmoid(class_logits) # [n,80] # 给action_prob乘以box分数 box_scores = cat([box.get_field("scores") for box in boxes], dim=0) box_scores = box_scores.reshape(class_logits.shape[0], 1) # [B,1] action_prob = class_logits * box_scores image_shapes = [box.size for box in boxes] boxes_per_image = [len(box) for box in boxes] box_tensors = [a.bbox for a in boxes] action_prob = action_prob.split(boxes_per_image, dim=0) # [rois,80]->[bs,per_roi,80] results = [] for prob, boxes_per_image, image_shape in zip( action_prob, box_tensors, image_shapes ): boxlist = self.prepare_boxlist(boxes_per_image, prob, image_shape) results.append(boxlist) return results def prepare_boxlist(self, boxes, scores, image_shape): boxlist = BoxList(boxes, image_shape, mode="xyxy") boxlist.add_field("scores", scores) return boxlist @torch.no_grad() def validation_one_epoch(data_loader, model, device, output_dir, epoch, log_writer=None): if not utils.is_main_process(): return metric_logger = utils.MetricLogger(delimiter=" ") header = 'Val:' # switch to evaluation mode model.eval() logging.info("Start evaluation on ava_v2.2 dataset({} videos).".format(data_loader.num_samples)) start_time = time.time() cpu_device = torch.device("cpu") results_dict = {} postprocess = PostProcessor() for batch in metric_logger.log_every(data_loader, 10, header): # pdb.set_trace() videos = batch[0] boxes = batch[1] video_ids = batch[2] videos = videos.to(device, non_blocking=True) boxes = [box.to(device=device) for box in boxes] # boxlist # target = target.to(device, non_blocking=True) # compute output with torch.cuda.amp.autocast(): output = model(videos, boxes) # [n,80] output = postprocess(output, boxes) output = [o.to(cpu_device) for o in output] results_dict.update( {video_id: result for video_id, result in zip(video_ids, output)} ) # pdb.set_trace() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=total_time)) logging.info( "Total inference time: {}".format(total_time_str) ) # convert a dict where the key is the index in a list video_ids = list(sorted(results_dict.keys())) if len(video_ids) != video_ids[-1] + 1: logging.warning( "Number of videos that were gathered from multiple processes is not " "a contiguous set. Some images might be missing from the evaluation" ) # convert to a list predictions = [results_dict[i] for i in video_ids] logging.info("Performing ava evaluation") output_folder = os.path.join(output_dir, "inference") os.makedirs(output_folder, exist_ok=True) eval_res = do_ava_evaluation( dataset=data_loader.dataset, predictions=predictions, output_folder=output_folder, ) if log_writer is not None: eval_res, _ = eval_res total_mAP = eval_res['PascalBoxes_Precision/[email protected]'] log_writer.update(map=total_mAP, head="perf", step=epoch) # 以下没用到 @torch.no_grad() def final_test(data_loader, model, device, file): criterion = torch.nn.CrossEntropyLoss() metric_logger = utils.MetricLogger(delimiter=" ") header = 'Test:' # switch to evaluation mode model.eval() final_result = [] for batch in metric_logger.log_every(data_loader, 10, header): videos = batch[0] target = batch[1] ids = batch[2] chunk_nb = batch[3] split_nb = batch[4] videos = videos.to(device, non_blocking=True) target = target.to(device, non_blocking=True) # compute output with torch.cuda.amp.autocast(): output = model(videos) loss = criterion(output, target) for i in range(output.size(0)): string = "{} {} {} {} {}\n".format(ids[i], \ str(output.data[i].cpu().numpy().tolist()), \ str(int(target[i].cpu().numpy())), \ str(int(chunk_nb[i].cpu().numpy())), \ str(int(split_nb[i].cpu().numpy()))) final_result.append(string) acc1, acc5 = accuracy(output, target, topk=(1, 5)) batch_size = videos.shape[0] metric_logger.update(loss=loss.item()) metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) if not os.path.exists(file): os.mknod(file) with open(file, 'w') as f: f.write("{}, {}\n".format(acc1, acc5)) for line in final_result: f.write(line) # gather the stats from all processes metric_logger.synchronize_between_processes() print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}' .format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss)) return {k: meter.global_avg for k, meter in metric_logger.meters.items()} def merge(eval_path, num_tasks): dict_feats = {} dict_label = {} dict_pos = {} print("Reading individual output files") for x in range(num_tasks): file = os.path.join(eval_path, str(x) + '.txt') lines = open(file, 'r').readlines()[1:] for line in lines: line = line.strip() name = line.split('[')[0] label = line.split(']')[1].split(' ')[1] chunk_nb = line.split(']')[1].split(' ')[2] split_nb = line.split(']')[1].split(' ')[3] data = np.fromstring(line.split('[')[1].split(']')[0], dtype=np.float, sep=',') if not name in dict_feats: dict_feats[name] = [] dict_label[name] = 0 dict_pos[name] = [] if chunk_nb + split_nb in dict_pos[name]: continue dict_feats[name].append(data) dict_pos[name].append(chunk_nb + split_nb) dict_label[name] = label print("Computing final results") input_lst = [] print(len(dict_feats)) for i, item in enumerate(dict_feats): input_lst.append([i, item, dict_feats[item], dict_label[item]]) from multiprocessing import Pool p = Pool(64) ans = p.map(compute_video, input_lst) top1 = [x[1] for x in ans] top5 = [x[2] for x in ans] pred = [x[0] for x in ans] label = [x[3] for x in ans] final_top1 ,final_top5 = np.mean(top1), np.mean(top5) return final_top1*100 ,final_top5*100 def compute_video(lst): i, video_id, data, label = lst feat = [x for x in data] feat = np.mean(feat, axis=0) pred = np.argmax(feat) top1 = (int(pred) == int(label)) * 1.0 top5 = (int(label) in np.argsort(-feat)[:5]) * 1.0 return [pred, top1, top5, int(label)]
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/engine_for_finetuning.py
import numpy as np class TubeMaskingGenerator: def __init__(self, input_size, mask_ratio): self.frames, self.height, self.width = input_size self.num_patches_per_frame = self.height * self.width self.total_patches = self.frames * self.num_patches_per_frame self.num_masks_per_frame = int(mask_ratio * self.num_patches_per_frame) self.total_masks = self.frames * self.num_masks_per_frame def __repr__(self): repr_str = "Maks: total patches {}, mask patches {}".format( self.total_patches, self.total_masks ) return repr_str def __call__(self): mask_per_frame = np.hstack([ np.zeros(self.num_patches_per_frame - self.num_masks_per_frame), np.ones(self.num_masks_per_frame), ]) np.random.shuffle(mask_per_frame) mask = np.tile(mask_per_frame, (self.frames,1)).flatten() return mask
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/masking_generator.py
import torch import torchvision.transforms.functional as F import warnings import random import numpy as np import torchvision from PIL import Image, ImageOps import numbers class GroupRandomCrop(object): def __init__(self, size): if isinstance(size, numbers.Number): self.size = (int(size), int(size)) else: self.size = size def __call__(self, img_tuple): img_group, label = img_tuple w, h = img_group[0].size th, tw = self.size out_images = list() x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) for img in img_group: assert(img.size[0] == w and img.size[1] == h) if w == tw and h == th: out_images.append(img) else: out_images.append(img.crop((x1, y1, x1 + tw, y1 + th))) return (out_images, label) class GroupCenterCrop(object): def __init__(self, size): self.worker = torchvision.transforms.CenterCrop(size) def __call__(self, img_tuple): img_group, label = img_tuple return ([self.worker(img) for img in img_group], label) class GroupNormalize(object): def __init__(self, mean, std): self.mean = mean self.std = std def __call__(self, tensor_tuple): tensor, label = tensor_tuple rep_mean = self.mean * (tensor.size()[0]//len(self.mean)) rep_std = self.std * (tensor.size()[0]//len(self.std)) # TODO: make efficient for t, m, s in zip(tensor, rep_mean, rep_std): t.sub_(m).div_(s) return (tensor,label) class GroupGrayScale(object): def __init__(self, size): self.worker = torchvision.transforms.Grayscale(size) def __call__(self, img_tuple): img_group, label = img_tuple return ([self.worker(img) for img in img_group], label) class GroupScale(object): """ Rescales the input PIL.Image to the given 'size'. 'size' will be the size of the smaller edge. For example, if height > width, then image will be rescaled to (size * height / width, size) size: size of the smaller edge interpolation: Default: PIL.Image.BILINEAR """ def __init__(self, size, interpolation=Image.BILINEAR): self.worker = torchvision.transforms.Resize(size, interpolation) def __call__(self, img_tuple): img_group, label = img_tuple return ([self.worker(img) for img in img_group], label) class GroupMultiScaleCrop(object): def __init__(self, input_size, scales=None, max_distort=1, fix_crop=True, more_fix_crop=True): self.scales = scales if scales is not None else [1, 875, .75, .66] self.max_distort = max_distort self.fix_crop = fix_crop self.more_fix_crop = more_fix_crop self.input_size = input_size if not isinstance(input_size, int) else [input_size, input_size] self.interpolation = Image.BILINEAR def __call__(self, img_tuple): img_group, label = img_tuple im_size = img_group[0].size crop_w, crop_h, offset_w, offset_h = self._sample_crop_size(im_size) crop_img_group = [img.crop((offset_w, offset_h, offset_w + crop_w, offset_h + crop_h)) for img in img_group] ret_img_group = [img.resize((self.input_size[0], self.input_size[1]), self.interpolation) for img in crop_img_group] return (ret_img_group, label) def _sample_crop_size(self, im_size): image_w, image_h = im_size[0], im_size[1] # find a crop size base_size = min(image_w, image_h) crop_sizes = [int(base_size * x) for x in self.scales] crop_h = [self.input_size[1] if abs(x - self.input_size[1]) < 3 else x for x in crop_sizes] crop_w = [self.input_size[0] if abs(x - self.input_size[0]) < 3 else x for x in crop_sizes] pairs = [] for i, h in enumerate(crop_h): for j, w in enumerate(crop_w): if abs(i - j) <= self.max_distort: pairs.append((w, h)) crop_pair = random.choice(pairs) if not self.fix_crop: w_offset = random.randint(0, image_w - crop_pair[0]) h_offset = random.randint(0, image_h - crop_pair[1]) else: w_offset, h_offset = self._sample_fix_offset(image_w, image_h, crop_pair[0], crop_pair[1]) return crop_pair[0], crop_pair[1], w_offset, h_offset def _sample_fix_offset(self, image_w, image_h, crop_w, crop_h): offsets = self.fill_fix_offset(self.more_fix_crop, image_w, image_h, crop_w, crop_h) return random.choice(offsets) @staticmethod def fill_fix_offset(more_fix_crop, image_w, image_h, crop_w, crop_h): w_step = (image_w - crop_w) // 4 h_step = (image_h - crop_h) // 4 ret = list() ret.append((0, 0)) # upper left ret.append((4 * w_step, 0)) # upper right ret.append((0, 4 * h_step)) # lower left ret.append((4 * w_step, 4 * h_step)) # lower right ret.append((2 * w_step, 2 * h_step)) # center if more_fix_crop: ret.append((0, 2 * h_step)) # center left ret.append((4 * w_step, 2 * h_step)) # center right ret.append((2 * w_step, 4 * h_step)) # lower center ret.append((2 * w_step, 0 * h_step)) # upper center ret.append((1 * w_step, 1 * h_step)) # upper left quarter ret.append((3 * w_step, 1 * h_step)) # upper right quarter ret.append((1 * w_step, 3 * h_step)) # lower left quarter ret.append((3 * w_step, 3 * h_step)) # lower righ quarter return ret class Stack(object): def __init__(self, roll=False): self.roll = roll def __call__(self, img_tuple): img_group, label = img_tuple if img_group[0].mode == 'L': return (np.concatenate([np.expand_dims(x, 2) for x in img_group], axis=2), label) elif img_group[0].mode == 'RGB': if self.roll: return (np.concatenate([np.array(x)[:, :, ::-1] for x in img_group], axis=2), label) else: return (np.concatenate(img_group, axis=2), label) class ToTorchFormatTensor(object): """ Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] """ def __init__(self, div=True): self.div = div def __call__(self, pic_tuple): pic, label = pic_tuple if isinstance(pic, np.ndarray): # handle numpy array img = torch.from_numpy(pic).permute(2, 0, 1).contiguous() else: # handle PIL Image img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes())) img = img.view(pic.size[1], pic.size[0], len(pic.mode)) # put it from HWC to CHW format # yikes, this transpose takes 80% of the loading time/CPU img = img.transpose(0, 1).transpose(0, 2).contiguous() return (img.float().div(255.) if self.div else img.float(), label) class IdentityTransform(object): def __call__(self, data): return data
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/transforms.py
import math import sys from typing import Iterable import torch import torch.nn as nn import utils from einops import rearrange from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD def train_one_epoch(model: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float = 0, patch_size: int = 16, normlize_target: bool = True, log_writer=None, lr_scheduler=None, start_steps=None, lr_schedule_values=None, wd_schedule_values=None): model.train() metric_logger = utils.MetricLogger(delimiter=" ") metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) header = 'Epoch: [{}]'.format(epoch) print_freq = 10 loss_func = nn.MSELoss() for step, batch in enumerate(metric_logger.log_every(data_loader, print_freq, header)): # assign learning rate & weight decay for each step it = start_steps + step # global training iteration if lr_schedule_values is not None or wd_schedule_values is not None: for i, param_group in enumerate(optimizer.param_groups): if lr_schedule_values is not None: param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"] if wd_schedule_values is not None and param_group["weight_decay"] > 0: param_group["weight_decay"] = wd_schedule_values[it] videos, bool_masked_pos = batch videos = videos.to(device, non_blocking=True) bool_masked_pos = bool_masked_pos.to(device, non_blocking=True).flatten(1).to(torch.bool) with torch.no_grad(): # calculate the predict label mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(device)[None, :, None, None, None] std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(device)[None, :, None, None, None] unnorm_videos = videos * std + mean # in [0, 1] if normlize_target: videos_squeeze = rearrange(unnorm_videos, 'b c (t p0) (h p1) (w p2) -> b (t h w) (p0 p1 p2) c', p0=2, p1=patch_size, p2=patch_size) videos_norm = (videos_squeeze - videos_squeeze.mean(dim=-2, keepdim=True) ) / (videos_squeeze.var(dim=-2, unbiased=True, keepdim=True).sqrt() + 1e-6) # we find that the mean is about 0.48 and standard deviation is about 0.08. videos_patch = rearrange(videos_norm, 'b n p c -> b n (p c)') else: videos_patch = rearrange(unnorm_videos, 'b c (t p0) (h p1) (w p2) -> b (t h w) (p0 p1 p2 c)', p0=2, p1=patch_size, p2=patch_size) B, _, C = videos_patch.shape labels = videos_patch[bool_masked_pos].reshape(B, -1, C) with torch.cuda.amp.autocast(): outputs = model(videos, bool_masked_pos) loss = loss_func(input=outputs, target=labels) loss_value = loss.item() if not math.isfinite(loss_value): print("Loss is {}, stopping training".format(loss_value)) sys.exit(1) optimizer.zero_grad() # this attribute is added by timm on one optimizer (adahessian) is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=is_second_order) loss_scale_value = loss_scaler.state_dict()["scale"] torch.cuda.synchronize() metric_logger.update(loss=loss_value) metric_logger.update(loss_scale=loss_scale_value) min_lr = 10. max_lr = 0. for group in optimizer.param_groups: min_lr = min(min_lr, group["lr"]) max_lr = max(max_lr, group["lr"]) metric_logger.update(lr=max_lr) metric_logger.update(min_lr=min_lr) weight_decay_value = None for group in optimizer.param_groups: if group["weight_decay"] > 0: weight_decay_value = group["weight_decay"] metric_logger.update(weight_decay=weight_decay_value) metric_logger.update(grad_norm=grad_norm) if log_writer is not None: log_writer.update(loss=loss_value, head="loss") log_writer.update(loss_scale=loss_scale_value, head="opt") log_writer.update(lr=max_lr, head="opt") log_writer.update(min_lr=min_lr, head="opt") log_writer.update(weight_decay=weight_decay_value, head="opt") log_writer.update(grad_norm=grad_norm, head="opt") log_writer.set_step() if lr_scheduler is not None: lr_scheduler.step_update(start_steps + step) # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/engine_for_pretraining.py
import math import torch import torch.nn as nn import torch.nn.functional as F from functools import partial from modeling_finetune import Block, _cfg, PatchEmbed, get_sinusoid_encoding_table from timm.models.registry import register_model from timm.models.layers import trunc_normal_ as __call_trunc_normal_ def trunc_normal_(tensor, mean=0., std=1.): __call_trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std) __all__ = [ 'pretrain_videomae_base_patch16_224', 'pretrain_videomae_large_patch16_224', ] class PretrainVisionTransformerEncoder(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=0, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, tubelet_size=2, use_learnable_pos_emb=False): super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, tubelet_size=tubelet_size) num_patches = self.patch_embed.num_patches # TODO: Add the cls token if use_learnable_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) else: # sine-cosine positional embeddings self.pos_embed = get_sinusoid_encoding_table(num_patches, embed_dim) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values) for i in range(depth)]) self.norm = norm_layer(embed_dim) self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() if use_learnable_pos_emb: trunc_normal_(self.pos_embed, std=.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def get_num_layers(self): return len(self.blocks) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x, mask): _, _, T, _, _ = x.shape x = self.patch_embed(x) x = x + self.pos_embed.type_as(x).to(x.device).clone().detach() B, _, C = x.shape x_vis = x[~mask].reshape(B, -1, C) # ~mask means visible for blk in self.blocks: x_vis = blk(x_vis) x_vis = self.norm(x_vis) return x_vis def forward(self, x, mask): x = self.forward_features(x, mask) x = self.head(x) return x class PretrainVisionTransformerDecoder(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, patch_size=16, num_classes=768, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, num_patches=196, tubelet_size=2 ): super().__init__() self.num_classes = num_classes assert num_classes == 3 * tubelet_size * patch_size ** 2 self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.patch_size = patch_size dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values) for i in range(depth)]) self.norm = norm_layer(embed_dim) self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def get_num_layers(self): return len(self.blocks) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward(self, x, return_token_num): for blk in self.blocks: x = blk(x) if return_token_num > 0: x = self.head(self.norm(x[:, -return_token_num:])) # only return the mask tokens predict pixels else: x = self.head(self.norm(x)) return x class PretrainVisionTransformer(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, img_size=224, patch_size=16, encoder_in_chans=3, encoder_num_classes=0, encoder_embed_dim=768, encoder_depth=12, encoder_num_heads=12, decoder_num_classes=1536, # decoder_num_classes=768, decoder_embed_dim=512, decoder_depth=8, decoder_num_heads=8, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=0., use_learnable_pos_emb=False, tubelet_size=2, num_classes=0, # avoid the error from create_fn in timm in_chans=0, # avoid the error from create_fn in timm ): super().__init__() self.encoder = PretrainVisionTransformerEncoder( img_size=img_size, patch_size=patch_size, in_chans=encoder_in_chans, num_classes=encoder_num_classes, embed_dim=encoder_embed_dim, depth=encoder_depth, num_heads=encoder_num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=drop_path_rate, norm_layer=norm_layer, init_values=init_values, tubelet_size=tubelet_size, use_learnable_pos_emb=use_learnable_pos_emb) self.decoder = PretrainVisionTransformerDecoder( patch_size=patch_size, num_patches=self.encoder.patch_embed.num_patches, num_classes=decoder_num_classes, embed_dim=decoder_embed_dim, depth=decoder_depth, num_heads=decoder_num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop_rate=drop_rate, attn_drop_rate=attn_drop_rate, drop_path_rate=drop_path_rate, norm_layer=norm_layer, init_values=init_values, tubelet_size=tubelet_size) self.encoder_to_decoder = nn.Linear(encoder_embed_dim, decoder_embed_dim, bias=False) self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim)) self.pos_embed = get_sinusoid_encoding_table(self.encoder.patch_embed.num_patches, decoder_embed_dim) trunc_normal_(self.mask_token, std=.02) def _init_weights(self, m): if isinstance(m, nn.Linear): nn.init.xavier_uniform_(m.weight) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def get_num_layers(self): return len(self.blocks) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token', 'mask_token'} def forward(self, x, mask): _, _, T, _, _ = x.shape x_vis = self.encoder(x, mask) # [B, N_vis, C_e] x_vis = self.encoder_to_decoder(x_vis) # [B, N_vis, C_d] B, N, C = x_vis.shape # we don't unshuffle the correct visible token order, # but shuffle the pos embedding accorddingly. expand_pos_embed = self.pos_embed.expand(B, -1, -1).type_as(x).to(x.device).clone().detach() pos_emd_vis = expand_pos_embed[~mask].reshape(B, -1, C) pos_emd_mask = expand_pos_embed[mask].reshape(B, -1, C) x_full = torch.cat([x_vis + pos_emd_vis, self.mask_token + pos_emd_mask], dim=1) # [B, N, C_d] x = self.decoder(x_full, pos_emd_mask.shape[1]) # [B, N_mask, 3 * 16 * 16] return x @register_model def pretrain_mae_small_patch16_224(pretrained=False, **kwargs): model = PretrainVisionTransformer( img_size=224, patch_size=16, encoder_embed_dim=384, encoder_depth=12, encoder_num_heads=6, encoder_num_classes=0, decoder_num_classes=1536, decoder_embed_dim=192, decoder_num_heads=3, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.load( kwargs["init_ckpt"], map_location="cpu" ) model.load_state_dict(checkpoint["model"]) return model @register_model def pretrain_videomae_base_patch16_224(pretrained=False, **kwargs): model = PretrainVisionTransformer( img_size=224, patch_size=16, encoder_embed_dim=768, encoder_depth=12, encoder_num_heads=12, encoder_num_classes=0, decoder_num_classes=1536, decoder_embed_dim=384, decoder_num_heads=6, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.load( kwargs["init_ckpt"], map_location="cpu" ) model.load_state_dict(checkpoint["model"]) return model @register_model def pretrain_videomae_large_patch16_224(pretrained=False, **kwargs): model = PretrainVisionTransformer( img_size=224, patch_size=16, encoder_embed_dim=1024, encoder_depth=24, encoder_num_heads=16, encoder_num_classes=0, decoder_num_classes=1536, decoder_embed_dim=512, decoder_num_heads=8, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() if pretrained: checkpoint = torch.load( kwargs["init_ckpt"], map_location="cpu" ) model.load_state_dict(checkpoint["model"]) return model
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/modeling_pretrain.py
# -*- coding: utf-8 -*- import argparse import numpy as np import torch import torch.backends.cudnn as cudnn from PIL import Image from pathlib import Path from timm.models import create_model from datasets import DataAugmentationForVideoMAE from torchvision.transforms import ToPILImage from einops import rearrange from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from decord import VideoReader, cpu from torchvision import transforms from transforms import * from masking_generator import TubeMaskingGenerator class DataAugmentationForVideoMAE(object): def __init__(self, args): self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupCenterCrop(args.input_size) self.transform = transforms.Compose([ self.train_augmentation, Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube': self.masked_position_generator = TubeMaskingGenerator( args.window_size, args.mask_ratio ) def __call__(self, images): process_data , _ = self.transform(images) return process_data, self.masked_position_generator() def __repr__(self): repr = "(DataAugmentationForVideoMAE,\n" repr += " transform = %s,\n" % str(self.transform) repr += " Masked position generator = %s,\n" % str(self.masked_position_generator) repr += ")" return repr def get_args(): parser = argparse.ArgumentParser('VideoMAE visualization reconstruction script', add_help=False) parser.add_argument('img_path', type=str, help='input video path') parser.add_argument('save_path', type=str, help='save video path') parser.add_argument('model_path', type=str, help='checkpoint path of model') parser.add_argument('--mask_type', default='random', choices=['random', 'tube'], type=str, help='masked strategy of video tokens/patches') parser.add_argument('--num_frames', type=int, default= 16) parser.add_argument('--sampling_rate', type=int, default= 4) parser.add_argument('--decoder_depth', default=4, type=int, help='depth of decoder') parser.add_argument('--input_size', default=224, type=int, help='videos input size for backbone') parser.add_argument('--device', default='cuda:0', help='device to use for training / testing') parser.add_argument('--imagenet_default_mean_and_std', default=True, action='store_true') parser.add_argument('--mask_ratio', default=0.75, type=float, help='ratio of the visual tokens/patches need be masked') # Model parameters parser.add_argument('--model', default='pretrain_videomae_base_patch16_224', type=str, metavar='MODEL', help='Name of model to vis') parser.add_argument('--drop_path', type=float, default=0.0, metavar='PCT', help='Drop path rate (default: 0.1)') return parser.parse_args() def get_model(args): print(f"Creating model: {args.model}") model = create_model( args.model, pretrained=False, drop_path_rate=args.drop_path, drop_block_rate=None, decoder_depth=args.decoder_depth ) return model def main(args): print(args) device = torch.device(args.device) cudnn.benchmark = True model = get_model(args) patch_size = model.encoder.patch_embed.patch_size print("Patch size = %s" % str(patch_size)) args.window_size = (args.num_frames // 2, args.input_size // patch_size[0], args.input_size // patch_size[1]) args.patch_size = patch_size model.to(device) checkpoint = torch.load(args.model_path, map_location='cpu') model.load_state_dict(checkpoint['model']) model.eval() if args.save_path: Path(args.save_path).mkdir(parents=True, exist_ok=True) with open(args.img_path, 'rb') as f: vr = VideoReader(f, ctx=cpu(0)) duration = len(vr) new_length = 1 new_step = 1 skip_length = new_length * new_step # frame_id_list = [1, 5, 9, 13, 17, 21, 25, 29, 33, 37, 41, 45, 49, 53, 57, 61] tmp = np.arange(0,32, 2) + 60 frame_id_list = tmp.tolist() # average_duration = (duration - skip_length + 1) // args.num_frames # if average_duration > 0: # frame_id_list = np.multiply(list(range(args.num_frames)), # average_duration) # frame_id_list = frame_id_list + np.random.randint(average_duration, # size=args.num_frames) video_data = vr.get_batch(frame_id_list).asnumpy() print(video_data.shape) img = [Image.fromarray(video_data[vid, :, :, :]).convert('RGB') for vid, _ in enumerate(frame_id_list)] transforms = DataAugmentationForVideoMAE(args) img, bool_masked_pos = transforms((img, None)) # T*C,H,W # print(img.shape) img = img.view((args.num_frames , 3) + img.size()[-2:]).transpose(0,1) # T*C,H,W -> T,C,H,W -> C,T,H,W # img = img.view(( -1 , args.num_frames) + img.size()[-2:]) bool_masked_pos = torch.from_numpy(bool_masked_pos) with torch.no_grad(): # img = img[None, :] # bool_masked_pos = bool_masked_pos[None, :] img = img.unsqueeze(0) print(img.shape) bool_masked_pos = bool_masked_pos.unsqueeze(0) img = img.to(device, non_blocking=True) bool_masked_pos = bool_masked_pos.to(device, non_blocking=True).flatten(1).to(torch.bool) outputs = model(img, bool_masked_pos) #save original video mean = torch.as_tensor(IMAGENET_DEFAULT_MEAN).to(device)[None, :, None, None, None] std = torch.as_tensor(IMAGENET_DEFAULT_STD).to(device)[None, :, None, None, None] ori_img = img * std + mean # in [0, 1] imgs = [ToPILImage()(ori_img[0,:,vid,:,:].cpu()) for vid, _ in enumerate(frame_id_list) ] for id, im in enumerate(imgs): im.save(f"{args.save_path}/ori_img{id}.jpg") img_squeeze = rearrange(ori_img, 'b c (t p0) (h p1) (w p2) -> b (t h w) (p0 p1 p2) c', p0=2, p1=patch_size[0], p2=patch_size[0]) img_norm = (img_squeeze - img_squeeze.mean(dim=-2, keepdim=True)) / (img_squeeze.var(dim=-2, unbiased=True, keepdim=True).sqrt() + 1e-6) img_patch = rearrange(img_norm, 'b n p c -> b n (p c)') img_patch[bool_masked_pos] = outputs #make mask mask = torch.ones_like(img_patch) mask[bool_masked_pos] = 0 mask = rearrange(mask, 'b n (p c) -> b n p c', c=3) mask = rearrange(mask, 'b (t h w) (p0 p1 p2) c -> b c (t p0) (h p1) (w p2) ', p0=2, p1=patch_size[0], p2=patch_size[1], h=14, w=14) #save reconstruction video rec_img = rearrange(img_patch, 'b n (p c) -> b n p c', c=3) # Notice: To visualize the reconstruction video, we add the predict and the original mean and var of each patch. rec_img = rec_img * (img_squeeze.var(dim=-2, unbiased=True, keepdim=True).sqrt() + 1e-6) + img_squeeze.mean(dim=-2, keepdim=True) rec_img = rearrange(rec_img, 'b (t h w) (p0 p1 p2) c -> b c (t p0) (h p1) (w p2)', p0=2, p1=patch_size[0], p2=patch_size[1], h=14, w=14) imgs = [ ToPILImage()(rec_img[0, :, vid, :, :].cpu().clamp(0,0.996)) for vid, _ in enumerate(frame_id_list) ] for id, im in enumerate(imgs): im.save(f"{args.save_path}/rec_img{id}.jpg") #save masked video img_mask = rec_img * mask imgs = [ToPILImage()(img_mask[0, :, vid, :, :].cpu()) for vid, _ in enumerate(frame_id_list)] for id, im in enumerate(imgs): im.save(f"{args.save_path}/mask_img{id}.jpg") if __name__ == '__main__': opts = get_args() main(opts)
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/run_videomae_vis.py
import os from torchvision import transforms from transforms import * from masking_generator import TubeMaskingGenerator from kinetics import VideoClsDataset, VideoMAE from data.ava import AVAVideoDataset,KineticsDataset,AKDataset from data.transforms import TransformsCfg import alphaction.config.paths_catalog as paths_catalog from alphaction.dataset.collate_batch import batch_different_videos class DataAugmentationForVideoMAE(object): def __init__(self, args): self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) self.transform = transforms.Compose([ self.train_augmentation, Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube': self.masked_position_generator = TubeMaskingGenerator( args.window_size, args.mask_ratio ) def __call__(self, images): process_data , _ = self.transform(images) return process_data, self.masked_position_generator() def __repr__(self): repr = "(DataAugmentationForVideoMAE,\n" repr += " transform = %s,\n" % str(self.transform) repr += " Masked position generator = %s,\n" % str(self.masked_position_generator) repr += ")" return repr def build_pretraining_dataset(args): transform = DataAugmentationForVideoMAE(args) dataset = VideoMAE( root=None, setting=args.data_path, video_ext='mp4', is_color=True, modality='rgb', new_length=args.num_frames, new_step=args.sampling_rate, transform=transform, temporal_jitter=False, video_loader=True, use_decord=True, lazy_init=False) print("Data Aug = %s" % str(transform)) return dataset def build_dataset(is_train, test_mode, args): if args.data_set == 'Kinetics-400': mode = None anno_path = None if is_train == True: mode = 'train' anno_path = os.path.join(args.data_path, 'train.csv') elif test_mode == True: mode = 'test' anno_path = os.path.join(args.data_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.data_path, 'test.csv') dataset = VideoClsDataset( anno_path=anno_path, data_path='/', mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 400 elif args.data_set == 'SSV2': mode = None anno_path = None if is_train == True: mode = 'train' anno_path = os.path.join(args.data_path, 'train.csv') elif test_mode == True: mode = 'test' anno_path = os.path.join(args.data_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.data_path, 'test.csv') dataset = VideoClsDataset( anno_path=anno_path, data_path='/', mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 174 elif args.data_set == 'UCF101': mode = None anno_path = None if is_train == True: mode = 'train' anno_path = os.path.join(args.data_path, 'train.csv') elif test_mode == True: mode = 'test' anno_path = os.path.join(args.data_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.data_path, 'test.csv') dataset = VideoClsDataset( anno_path=anno_path, data_path='/', mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 101 elif args.data_set == 'HMDB51': mode = None anno_path = None if is_train == True: mode = 'train' anno_path = os.path.join(args.data_path, 'train.csv') elif test_mode == True: mode = 'test' anno_path = os.path.join(args.data_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.data_path, 'test.csv') dataset = VideoClsDataset( anno_path=anno_path, data_path='/', mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 51 else: raise NotImplementedError() assert nb_classes == args.nb_classes print("Number of the class = %d" % args.nb_classes) return dataset, nb_classes class BatchCollator(object): """ From a list of samples from the dataset, returns the batched objectimages and targets. This should be passed to the DataLoader """ def __init__(self, size_divisible=0): self.divisible = size_divisible self.size_divisible = self.divisible def __call__(self, batch): transposed_batch = list(zip(*batch)) video_data = batch_different_videos(transposed_batch[0], self.size_divisible) boxes = transposed_batch[1] video_ids = transposed_batch[2] return video_data, boxes, video_ids def build_ava_dataset(is_train, transforms): input_filename = 'ava_video_train_v2.2' if is_train else 'ava_video_val_v2.2' assert input_filename dataset_catalog = paths_catalog.DatasetCatalog data = dataset_catalog.get(input_filename) ava_args = data["args"] ava_args["remove_clips_without_annotations"] = is_train ava_args["frame_span"] = TransformsCfg.FRAME_NUM * TransformsCfg.FRAME_SAMPLE_RATE # 64 if not is_train: ava_args["box_thresh"] = 0.8 # ava_args["action_thresh"] = 0. # else: ava_args["box_file"] = None ava_args["transforms"] = transforms dataset = AVAVideoDataset(**ava_args) return dataset def build_kinetics_dataset(is_train,transforms): input_filename = 'ava_video_train_v2.2' if is_train else 'ava_video_val_v2.2' kinetics_args = {} if is_train: kinetics_args['kinetics_annfile'] = "/mnt/cache/xingsen/xingsen2/kinetics_train_v1.0.json" kinetics_args['box_file'] = None kinetics_args['transforms'] = transforms kinetics_args['remove_clips_without_annotations'] = True kinetics_args['frame_span'] = TransformsCfg.FRAME_NUM * TransformsCfg.FRAME_SAMPLE_RATE # 64 else: kinetics_args['kinetics_annfile'] = "/mnt/cache/xingsen/xingsen2/kinetics_val_v1.0.json" kinetics_args['box_file'] = '/mnt/cache/xingsen/xingsen2/kinetics_person_box.json' kinetics_args['box_thresh'] = 0.8 kinetics_args['action_thresh'] = 0. kinetics_args['transforms'] = transforms kinetics_args['remove_clips_without_annotations'] = True kinetics_args['frame_span'] = TransformsCfg.FRAME_NUM * TransformsCfg.FRAME_SAMPLE_RATE # 64 kinetics_args['eval_file_paths'] = { "csv_gt_file" : '/mnt/cache/xingsen/xingsen2/kinetics_val_gt_v1.0.csv', "labelmap_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_action_list_v2.2_for_activitynet_2019.pbtxt", "exclusion_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_excluded_timestamps_v2.2.csv", } dataset = KineticsDataset(**kinetics_args) return dataset def build_ak_dataset(is_train,transforms): input_filename = "ak_train" if is_train else "ak_val" assert input_filename dataset_catalog = paths_catalog.DatasetCatalog data_args = dataset_catalog.get(input_filename) if is_train: data_args['video_root'] = '/mnt/cache/xingsen/ava_dataset/AVA/clips/trainval/' data_args['ava_annfile'] = "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_train_v2.2_min.json" data_args['kinetics_annfile'] = "/mnt/cache/xingsen/xingsen2/kinetics_train_v1.0.json" data_args['transforms'] = transforms data_args['remove_clips_without_annotations'] = True data_args['frame_span'] = TransformsCfg.FRAME_NUM * TransformsCfg.FRAME_SAMPLE_RATE # 64 data_args["ava_eval_file_path"] = { "csv_gt_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_train_v2.2.csv", "labelmap_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_action_list_v2.2_for_activitynet_2019.pbtxt", "exclusion_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_train_excluded_timestamps_v2.2.csv", } else: data_args['video_root'] = '/mnt/cache/xingsen/ava_dataset/AVA/clips/trainval/' data_args['ava_annfile'] = "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_v2.2_min.json" data_args['kinetics_annfile'] = "/mnt/cache/xingsen/xingsen2/kinetics_val_v1.0.json" data_args['box_thresh'] = 0.8 data_args['action_thresh'] = 0. data_args['transforms'] = transforms data_args['remove_clips_without_annotations'] = True data_args['kinetics_box_file'] = '/mnt/cache/xingsen/xingsen2/kinetics_person_box.json' data_args['ava_box_file'] = '/mnt/cache/xingsen/ava_dataset/AVA/boxes/ava_val_det_person_bbox.json' data_args['frame_span'] = TransformsCfg.FRAME_NUM * TransformsCfg.FRAME_SAMPLE_RATE # 64 data_args['eval_file_paths'] = { "csv_gt_file":'/mnt/cache/xingsen/xingsen2/ak_val_gt.csv', "labelmap_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_action_list_v2.2_for_activitynet_2019.pbtxt", "exclusion_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_excluded_timestamps_v2.2.csv", } data_args["ava_eval_file_path"] = { "csv_gt_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_v2.2.csv", "labelmap_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_action_list_v2.2_for_activitynet_2019.pbtxt", "exclusion_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_excluded_timestamps_v2.2.csv", } data_args['kinetics_eval_file_path'] = { "csv_gt_file" : '/mnt/cache/xingsen/xingsen2/kinetics_val_gt_v1.0.csv', "labelmap_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_action_list_v2.2_for_activitynet_2019.pbtxt", "exclusion_file": "/mnt/cache/xingsen/ava_dataset/AVA/annotations/ava_val_excluded_timestamps_v2.2.csv", } dataset = AKDataset(**data_args) return dataset
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/datasets.py
""" This implementation is based on https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/auto_augment.py pulished under an Apache License 2.0. COMMENT FROM ORIGINAL: AutoAugment, RandAugment, and AugMix for PyTorch This code implements the searched ImageNet policies with various tweaks and improvements and does not include any of the search code. AA and RA Implementation adapted from: https://github.com/tensorflow/tpu/blob/master/models/official/efficientnet/autoaugment.py AugMix adapted from: https://github.com/google-research/augmix Papers: AutoAugment: Learning Augmentation Policies from Data https://arxiv.org/abs/1805.09501 Learning Data Augmentation Strategies for Object Detection https://arxiv.org/abs/1906.11172 RandAugment: Practical automated data augmentation... https://arxiv.org/abs/1909.13719 AugMix: A Simple Data Processing Method to Improve Robustness and Uncertainty https://arxiv.org/abs/1912.02781 Hacked together by / Copyright 2020 Ross Wightman """ import math import numpy as np import random import re import PIL from PIL import Image, ImageEnhance, ImageOps _PIL_VER = tuple([int(x) for x in PIL.__version__.split(".")[:2]]) _FILL = (128, 128, 128) # This signifies the max integer that the controller RNN could predict for the # augmentation scheme. _MAX_LEVEL = 10.0 _HPARAMS_DEFAULT = { "translate_const": 250, "img_mean": _FILL, } _RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) def _interpolation(kwargs): interpolation = kwargs.pop("resample", Image.BILINEAR) if isinstance(interpolation, (list, tuple)): return random.choice(interpolation) else: return interpolation def _check_args_tf(kwargs): if "fillcolor" in kwargs and _PIL_VER < (5, 0): kwargs.pop("fillcolor") kwargs["resample"] = _interpolation(kwargs) def shear_x(img, factor, **kwargs): _check_args_tf(kwargs) return img.transform( img.size, Image.AFFINE, (1, factor, 0, 0, 1, 0), **kwargs ) def shear_y(img, factor, **kwargs): _check_args_tf(kwargs) return img.transform( img.size, Image.AFFINE, (1, 0, 0, factor, 1, 0), **kwargs ) def translate_x_rel(img, pct, **kwargs): pixels = pct * img.size[0] _check_args_tf(kwargs) return img.transform( img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs ) def translate_y_rel(img, pct, **kwargs): pixels = pct * img.size[1] _check_args_tf(kwargs) return img.transform( img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs ) def translate_x_abs(img, pixels, **kwargs): _check_args_tf(kwargs) return img.transform( img.size, Image.AFFINE, (1, 0, pixels, 0, 1, 0), **kwargs ) def translate_y_abs(img, pixels, **kwargs): _check_args_tf(kwargs) return img.transform( img.size, Image.AFFINE, (1, 0, 0, 0, 1, pixels), **kwargs ) def rotate(img, degrees, **kwargs): _check_args_tf(kwargs) if _PIL_VER >= (5, 2): return img.rotate(degrees, **kwargs) elif _PIL_VER >= (5, 0): w, h = img.size post_trans = (0, 0) rotn_center = (w / 2.0, h / 2.0) angle = -math.radians(degrees) matrix = [ round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0, round(-math.sin(angle), 15), round(math.cos(angle), 15), 0.0, ] def transform(x, y, matrix): (a, b, c, d, e, f) = matrix return a * x + b * y + c, d * x + e * y + f matrix[2], matrix[5] = transform( -rotn_center[0] - post_trans[0], -rotn_center[1] - post_trans[1], matrix, ) matrix[2] += rotn_center[0] matrix[5] += rotn_center[1] return img.transform(img.size, Image.AFFINE, matrix, **kwargs) else: return img.rotate(degrees, resample=kwargs["resample"]) def auto_contrast(img, **__): return ImageOps.autocontrast(img) def invert(img, **__): return ImageOps.invert(img) def equalize(img, **__): return ImageOps.equalize(img) def solarize(img, thresh, **__): return ImageOps.solarize(img, thresh) def solarize_add(img, add, thresh=128, **__): lut = [] for i in range(256): if i < thresh: lut.append(min(255, i + add)) else: lut.append(i) if img.mode in ("L", "RGB"): if img.mode == "RGB" and len(lut) == 256: lut = lut + lut + lut return img.point(lut) else: return img def posterize(img, bits_to_keep, **__): if bits_to_keep >= 8: return img return ImageOps.posterize(img, bits_to_keep) def contrast(img, factor, **__): return ImageEnhance.Contrast(img).enhance(factor) def color(img, factor, **__): return ImageEnhance.Color(img).enhance(factor) def brightness(img, factor, **__): return ImageEnhance.Brightness(img).enhance(factor) def sharpness(img, factor, **__): return ImageEnhance.Sharpness(img).enhance(factor) def _randomly_negate(v): """With 50% prob, negate the value""" return -v if random.random() > 0.5 else v def _rotate_level_to_arg(level, _hparams): # range [-30, 30] level = (level / _MAX_LEVEL) * 30.0 level = _randomly_negate(level) return (level,) def _enhance_level_to_arg(level, _hparams): # range [0.1, 1.9] return ((level / _MAX_LEVEL) * 1.8 + 0.1,) def _enhance_increasing_level_to_arg(level, _hparams): # the 'no change' level is 1.0, moving away from that towards 0. or 2.0 increases the enhancement blend # range [0.1, 1.9] level = (level / _MAX_LEVEL) * 0.9 level = 1.0 + _randomly_negate(level) return (level,) def _shear_level_to_arg(level, _hparams): # range [-0.3, 0.3] level = (level / _MAX_LEVEL) * 0.3 level = _randomly_negate(level) return (level,) def _translate_abs_level_to_arg(level, hparams): translate_const = hparams["translate_const"] level = (level / _MAX_LEVEL) * float(translate_const) level = _randomly_negate(level) return (level,) def _translate_rel_level_to_arg(level, hparams): # default range [-0.45, 0.45] translate_pct = hparams.get("translate_pct", 0.45) level = (level / _MAX_LEVEL) * translate_pct level = _randomly_negate(level) return (level,) def _posterize_level_to_arg(level, _hparams): # As per Tensorflow TPU EfficientNet impl # range [0, 4], 'keep 0 up to 4 MSB of original image' # intensity/severity of augmentation decreases with level return (int((level / _MAX_LEVEL) * 4),) def _posterize_increasing_level_to_arg(level, hparams): # As per Tensorflow models research and UDA impl # range [4, 0], 'keep 4 down to 0 MSB of original image', # intensity/severity of augmentation increases with level return (4 - _posterize_level_to_arg(level, hparams)[0],) def _posterize_original_level_to_arg(level, _hparams): # As per original AutoAugment paper description # range [4, 8], 'keep 4 up to 8 MSB of image' # intensity/severity of augmentation decreases with level return (int((level / _MAX_LEVEL) * 4) + 4,) def _solarize_level_to_arg(level, _hparams): # range [0, 256] # intensity/severity of augmentation decreases with level return (int((level / _MAX_LEVEL) * 256),) def _solarize_increasing_level_to_arg(level, _hparams): # range [0, 256] # intensity/severity of augmentation increases with level return (256 - _solarize_level_to_arg(level, _hparams)[0],) def _solarize_add_level_to_arg(level, _hparams): # range [0, 110] return (int((level / _MAX_LEVEL) * 110),) LEVEL_TO_ARG = { "AutoContrast": None, "Equalize": None, "Invert": None, "Rotate": _rotate_level_to_arg, # There are several variations of the posterize level scaling in various Tensorflow/Google repositories/papers "Posterize": _posterize_level_to_arg, "PosterizeIncreasing": _posterize_increasing_level_to_arg, "PosterizeOriginal": _posterize_original_level_to_arg, "Solarize": _solarize_level_to_arg, "SolarizeIncreasing": _solarize_increasing_level_to_arg, "SolarizeAdd": _solarize_add_level_to_arg, "Color": _enhance_level_to_arg, "ColorIncreasing": _enhance_increasing_level_to_arg, "Contrast": _enhance_level_to_arg, "ContrastIncreasing": _enhance_increasing_level_to_arg, "Brightness": _enhance_level_to_arg, "BrightnessIncreasing": _enhance_increasing_level_to_arg, "Sharpness": _enhance_level_to_arg, "SharpnessIncreasing": _enhance_increasing_level_to_arg, "ShearX": _shear_level_to_arg, "ShearY": _shear_level_to_arg, "TranslateX": _translate_abs_level_to_arg, "TranslateY": _translate_abs_level_to_arg, "TranslateXRel": _translate_rel_level_to_arg, "TranslateYRel": _translate_rel_level_to_arg, } NAME_TO_OP = { "AutoContrast": auto_contrast, "Equalize": equalize, "Invert": invert, "Rotate": rotate, "Posterize": posterize, "PosterizeIncreasing": posterize, "PosterizeOriginal": posterize, "Solarize": solarize, "SolarizeIncreasing": solarize, "SolarizeAdd": solarize_add, "Color": color, "ColorIncreasing": color, "Contrast": contrast, "ContrastIncreasing": contrast, "Brightness": brightness, "BrightnessIncreasing": brightness, "Sharpness": sharpness, "SharpnessIncreasing": sharpness, "ShearX": shear_x, "ShearY": shear_y, "TranslateX": translate_x_abs, "TranslateY": translate_y_abs, "TranslateXRel": translate_x_rel, "TranslateYRel": translate_y_rel, } class AugmentOp: """ Apply for video. """ def __init__(self, name, prob=0.5, magnitude=10, hparams=None): hparams = hparams or _HPARAMS_DEFAULT self.aug_fn = NAME_TO_OP[name] self.level_fn = LEVEL_TO_ARG[name] self.prob = prob self.magnitude = magnitude self.hparams = hparams.copy() self.kwargs = { "fillcolor": hparams["img_mean"] if "img_mean" in hparams else _FILL, "resample": hparams["interpolation"] if "interpolation" in hparams else _RANDOM_INTERPOLATION, } # If magnitude_std is > 0, we introduce some randomness # in the usually fixed policy and sample magnitude from a normal distribution # with mean `magnitude` and std-dev of `magnitude_std`. # NOTE This is my own hack, being tested, not in papers or reference impls. self.magnitude_std = self.hparams.get("magnitude_std", 0) def __call__(self, img_list): if self.prob < 1.0 and random.random() > self.prob: return img_list magnitude = self.magnitude if self.magnitude_std and self.magnitude_std > 0: magnitude = random.gauss(magnitude, self.magnitude_std) magnitude = min(_MAX_LEVEL, max(0, magnitude)) # clip to valid range level_args = ( self.level_fn(magnitude, self.hparams) if self.level_fn is not None else () ) if isinstance(img_list, list): return [ self.aug_fn(img, *level_args, **self.kwargs) for img in img_list ] else: return self.aug_fn(img_list, *level_args, **self.kwargs) _RAND_TRANSFORMS = [ "AutoContrast", "Equalize", "Invert", "Rotate", "Posterize", "Solarize", "SolarizeAdd", "Color", "Contrast", "Brightness", "Sharpness", "ShearX", "ShearY", "TranslateXRel", "TranslateYRel", ] _RAND_INCREASING_TRANSFORMS = [ "AutoContrast", "Equalize", "Invert", "Rotate", "PosterizeIncreasing", "SolarizeIncreasing", "SolarizeAdd", "ColorIncreasing", "ContrastIncreasing", "BrightnessIncreasing", "SharpnessIncreasing", "ShearX", "ShearY", "TranslateXRel", "TranslateYRel", ] # These experimental weights are based loosely on the relative improvements mentioned in paper. # They may not result in increased performance, but could likely be tuned to so. _RAND_CHOICE_WEIGHTS_0 = { "Rotate": 0.3, "ShearX": 0.2, "ShearY": 0.2, "TranslateXRel": 0.1, "TranslateYRel": 0.1, "Color": 0.025, "Sharpness": 0.025, "AutoContrast": 0.025, "Solarize": 0.005, "SolarizeAdd": 0.005, "Contrast": 0.005, "Brightness": 0.005, "Equalize": 0.005, "Posterize": 0, "Invert": 0, } def _select_rand_weights(weight_idx=0, transforms=None): transforms = transforms or _RAND_TRANSFORMS assert weight_idx == 0 # only one set of weights currently rand_weights = _RAND_CHOICE_WEIGHTS_0 probs = [rand_weights[k] for k in transforms] probs /= np.sum(probs) return probs def rand_augment_ops(magnitude=10, hparams=None, transforms=None): hparams = hparams or _HPARAMS_DEFAULT transforms = transforms or _RAND_TRANSFORMS return [ AugmentOp(name, prob=0.5, magnitude=magnitude, hparams=hparams) for name in transforms ] class RandAugment: def __init__(self, ops, num_layers=2, choice_weights=None): self.ops = ops self.num_layers = num_layers self.choice_weights = choice_weights def __call__(self, img): # no replacement when using weighted choice ops = np.random.choice( self.ops, self.num_layers, replace=self.choice_weights is None, p=self.choice_weights, ) for op in ops: img = op(img) return img def rand_augment_transform(config_str, hparams): """ RandAugment: Practical automated data augmentation... - https://arxiv.org/abs/1909.13719 Create a RandAugment transform :param config_str: String defining configuration of random augmentation. Consists of multiple sections separated by dashes ('-'). The first section defines the specific variant of rand augment (currently only 'rand'). The remaining sections, not order sepecific determine 'm' - integer magnitude of rand augment 'n' - integer num layers (number of transform ops selected per image) 'w' - integer probabiliy weight index (index of a set of weights to influence choice of op) 'mstd' - float std deviation of magnitude noise applied 'inc' - integer (bool), use augmentations that increase in severity with magnitude (default: 0) Ex 'rand-m9-n3-mstd0.5' results in RandAugment with magnitude 9, num_layers 3, magnitude_std 0.5 'rand-mstd1-w0' results in magnitude_std 1.0, weights 0, default magnitude of 10 and num_layers 2 :param hparams: Other hparams (kwargs) for the RandAugmentation scheme :return: A PyTorch compatible Transform """ magnitude = _MAX_LEVEL # default to _MAX_LEVEL for magnitude (currently 10) num_layers = 2 # default to 2 ops per image weight_idx = None # default to no probability weights for op choice transforms = _RAND_TRANSFORMS config = config_str.split("-") assert config[0] == "rand" config = config[1:] for c in config: cs = re.split(r"(\d.*)", c) if len(cs) < 2: continue key, val = cs[:2] if key == "mstd": # noise param injected via hparams for now hparams.setdefault("magnitude_std", float(val)) elif key == "inc": if bool(val): transforms = _RAND_INCREASING_TRANSFORMS elif key == "m": magnitude = int(val) elif key == "n": num_layers = int(val) elif key == "w": weight_idx = int(val) else: assert NotImplementedError ra_ops = rand_augment_ops( magnitude=magnitude, hparams=hparams, transforms=transforms ) choice_weights = ( None if weight_idx is None else _select_rand_weights(weight_idx) ) return RandAugment(ra_ops, num_layers, choice_weights=choice_weights)
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/rand_augment.py
import numpy as np from PIL import Image import torch def convert_img(img): """Converts (H, W, C) numpy.ndarray to (C, W, H) format """ if len(img.shape) == 3: img = img.transpose(2, 0, 1) if len(img.shape) == 2: img = np.expand_dims(img, 0) return img class ClipToTensor(object): """Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0] """ def __init__(self, channel_nb=3, div_255=True, numpy=False): self.channel_nb = channel_nb self.div_255 = div_255 self.numpy = numpy def __call__(self, clip): """ Args: clip (list of numpy.ndarray): clip (list of images) to be converted to tensor. """ # Retrieve shape if isinstance(clip[0], np.ndarray): h, w, ch = clip[0].shape assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format( ch) elif isinstance(clip[0], Image.Image): w, h = clip[0].size else: raise TypeError('Expected numpy.ndarray or PIL.Image\ but got list of {0}'.format(type(clip[0]))) np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)]) # Convert for img_idx, img in enumerate(clip): if isinstance(img, np.ndarray): pass elif isinstance(img, Image.Image): img = np.array(img, copy=False) else: raise TypeError('Expected numpy.ndarray or PIL.Image\ but got list of {0}'.format(type(clip[0]))) img = convert_img(img) np_clip[:, img_idx, :, :] = img if self.numpy: if self.div_255: np_clip = np_clip / 255.0 return np_clip else: tensor_clip = torch.from_numpy(np_clip) if not isinstance(tensor_clip, torch.FloatTensor): tensor_clip = tensor_clip.float() if self.div_255: tensor_clip = torch.div(tensor_clip, 255) return tensor_clip # Note this norms data to -1/1 class ClipToTensor_K(object): """Convert a list of m (H x W x C) numpy.ndarrays in the range [0, 255] to a torch.FloatTensor of shape (C x m x H x W) in the range [0, 1.0] """ def __init__(self, channel_nb=3, div_255=True, numpy=False): self.channel_nb = channel_nb self.div_255 = div_255 self.numpy = numpy def __call__(self, clip): """ Args: clip (list of numpy.ndarray): clip (list of images) to be converted to tensor. """ # Retrieve shape if isinstance(clip[0], np.ndarray): h, w, ch = clip[0].shape assert ch == self.channel_nb, 'Got {0} instead of 3 channels'.format( ch) elif isinstance(clip[0], Image.Image): w, h = clip[0].size else: raise TypeError('Expected numpy.ndarray or PIL.Image\ but got list of {0}'.format(type(clip[0]))) np_clip = np.zeros([self.channel_nb, len(clip), int(h), int(w)]) # Convert for img_idx, img in enumerate(clip): if isinstance(img, np.ndarray): pass elif isinstance(img, Image.Image): img = np.array(img, copy=False) else: raise TypeError('Expected numpy.ndarray or PIL.Image\ but got list of {0}'.format(type(clip[0]))) img = convert_img(img) np_clip[:, img_idx, :, :] = img if self.numpy: if self.div_255: np_clip = (np_clip - 127.5) / 127.5 return np_clip else: tensor_clip = torch.from_numpy(np_clip) if not isinstance(tensor_clip, torch.FloatTensor): tensor_clip = tensor_clip.float() if self.div_255: tensor_clip = torch.div(torch.sub(tensor_clip, 127.5), 127.5) return tensor_clip class ToTensor(object): """Converts numpy array to tensor """ def __call__(self, array): tensor = torch.from_numpy(array) return tensor
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/volume_transforms.py
import argparse import datetime from operator import is_ import numpy as np import time import torch import torch.backends.cudnn as cudnn import json import os from functools import partial from pathlib import Path from collections import OrderedDict from timm.data.mixup import Mixup from timm.models import create_model from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy from timm.utils import ModelEma from optim_factory import create_optimizer, get_parameter_groups, LayerDecayValueAssigner from datasets import build_ava_dataset, BatchCollator, build_kinetics_dataset, build_ak_dataset from engine_for_finetuning import train_one_epoch, validation_one_epoch, final_test, merge from utils import NativeScalerWithGradNormCount as NativeScaler from utils import multiple_samples_collate import utils import modeling_finetune from data.transforms import build_transforms def get_args(): parser = argparse.ArgumentParser('VideoMAE fine-tuning and evaluation script for video classification', add_help=False) parser.add_argument('--batch_size', default=64, type=int) # parser.add_argument('--epochs', default=30, type=int) # parser.add_argument('--update_freq', default=1, type=int) parser.add_argument('--save_ckpt_freq', default=100, type=int) # parser.add_argument('--val_freq', default=2, type=int) # parser.add_argument('--enable_randaug', action='store_true', default=False) # parser.add_argument('--aug_type',default='strong', type=str, choices=['strong','default','default_w_cutout','default_wo_affine']) # # Model parameters parser.add_argument('--model', default='vit_base_patch16_224', type=str, metavar='MODEL', help='Name of model to train') parser.add_argument('--tubelet_size', type=int, default=2) parser.add_argument('--input_size', default=224, type=int, help='videos input size') parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)') parser.add_argument('--attn_drop_rate', type=float, default=0.0, metavar='PCT', help='Attention dropout rate (default: 0.)') parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)') parser.add_argument('--disable_eval_during_finetuning', action='store_true', default=False) parser.add_argument('--model_ema', action='store_true', default=False) parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='') parser.add_argument('--model_ema_force_cpu', action='store_true', default=False, help='') parser.add_argument('--head_type',type=str, default='linear', choices=['linear', 'acar']) # Optimizer parameters parser.add_argument('--accumulation_step',type=int,default=1) parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"') parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)') parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)') parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)') parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the weight decay. We use a cosine schedule for WD and using a larger decay by the end of training improves performance for ViTs.""") parser.add_argument('--lr', type=float, default=1e-3, metavar='LR', help='learning rate (default: 1e-3)') parser.add_argument('--layer_decay', type=float, default=0.75) parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR', help='warmup learning rate (default: 1e-6)') parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') parser.add_argument('--warmup_epochs', type=int, default=2, metavar='N', # help='epochs to warmup LR, if scheduler supports') parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N', help='num of steps to warmup LR, will overload warmup_epochs if set > 0') # Augmentation parameters parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT', # no use help='Color jitter factor (default: 0.4)') parser.add_argument('--num_sample', type=int, default=2, # 1 help='Repeated_aug (default: 2)') parser.add_argument('--aa', type=str, default='rand-m7-n4-mstd0.5-inc1', metavar='NAME', # no use help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m7-n4-mstd0.5-inc1)'), parser.add_argument('--smoothing', type=float, default=0.1, # 0. help='Label smoothing (default: 0.1)') parser.add_argument('--train_interpolation', type=str, default='bicubic', # help='Training interpolation (random, bilinear, bicubic default: "bicubic")') # Evaluation parameters, no use parser.add_argument('--crop_pct', type=float, default=None) parser.add_argument('--short_side_size', type=int, default=224) parser.add_argument('--test_num_segment', type=int, default=5) parser.add_argument('--test_num_crop', type=int, default=3) # Random Erase params, no use parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob (default: 0.25)') parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")') parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)') parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split') # Mixup params, no use parser.add_argument('--mixup', type=float, default=0.8, help='mixup alpha, mixup enabled if > 0.') parser.add_argument('--cutmix', type=float, default=1.0, help='cutmix alpha, cutmix enabled if > 0.') parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') parser.add_argument('--mixup_prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled') parser.add_argument('--mixup_switch_prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled') parser.add_argument('--mixup_mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') # Finetuning params parser.add_argument('--finetune', default='', help='finetune from checkpoint') parser.add_argument('--model_key', default='model|module', type=str) parser.add_argument('--model_prefix', default='', type=str) parser.add_argument('--init_scale', default=0.001, type=float) # head init parser.add_argument('--use_mean_pooling', action='store_true') # cls_token parser.set_defaults(use_mean_pooling=True) parser.add_argument('--use_cls', action='store_false', dest='use_mean_pooling') # Dataset parameters parser.add_argument('--data_path', default='/path/to/list_kinetics-400', type=str, help='dataset path') # parser.add_argument('--eval_data_path', default=None, type=str, # help='dataset path for evaluation') parser.add_argument('--nb_classes', default=80, type=int, # help='number of the classification types') parser.add_argument('--imagenet_default_mean_and_std', default=True, action='store_true') parser.add_argument('--num_segments', type=int, default=1) parser.add_argument('--num_frames', type=int, default=16) parser.add_argument('--sampling_rate', type=int, default=4) parser.add_argument('--data_set', default='ava', choices=['ava','ava-kinetics'], type=str, help='dataset') # parser.add_argument('--output_dir', default='', # help='path where to save, empty for no saving') parser.add_argument('--log_dir', default=None, # help='path where to tensorboard log') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--auto_resume', action='store_true') parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume') parser.set_defaults(auto_resume=True) parser.add_argument('--save_ckpt', action='store_true') parser.add_argument('--no_save_ckpt', action='store_false', dest='save_ckpt') parser.set_defaults(save_ckpt=True) parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--eval', action='store_true', help='Perform evaluation only') parser.add_argument('--dist_eval', action='store_true', default=False, # help='Enabling distributed evaluation') parser.add_argument('--num_workers', default=10, type=int) parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') parser.set_defaults(pin_mem=True) # distributed training parameters parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--local_rank', default=-1, type=int) parser.add_argument('--dist_on_itp', action='store_true') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') parser.add_argument('--eval_kinetics',action='store_true',default=False) parser.add_argument('--enable_deepspeed', action='store_true', default=False) known_args, _ = parser.parse_known_args() if known_args.enable_deepspeed: try: import deepspeed from deepspeed import DeepSpeedConfig parser = deepspeed.add_config_arguments(parser) ds_init = deepspeed.initialize except: print("Please 'pip install deepspeed'") exit(0) else: ds_init = None return parser.parse_args(), ds_init def main(args, ds_init): utils.init_distributed_mode(args) if ds_init is not None: utils.create_ds_config(args) print(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) cudnn.benchmark = True transform_train = build_transforms(is_train=True,args=args) transform_val = build_transforms(is_train=False,args=args) if args.data_set == 'ava': dataset_train = build_ava_dataset(is_train=True, transforms=transform_train) else: dataset_train = build_ak_dataset(is_train=True, transforms=transform_train) if args.eval_kinetics: dataset_val = build_kinetics_dataset(is_train=False, transforms=transform_val) else: dataset_val = build_ava_dataset(is_train=False, transforms=transform_val) num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_train = torch.utils.data.DistributedSampler( dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True ) print("Sampler_train = %s" % str(sampler_train)) if args.dist_eval: if len(dataset_val) % num_tasks != 0: print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. ' 'This will slightly alter validation results as extra duplicate entries are added to achieve ' 'equal num of samples per-process.') sampler_val = torch.utils.data.DistributedSampler( dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False) # sampler_test = torch.utils.data.DistributedSampler( # dataset_test, num_replicas=num_tasks, rank=global_rank, shuffle=False) else: # sampler_val = torch.utils.data.SequentialSampler(dataset_val) if global_rank == 0 and args.log_dir is not None: os.makedirs(args.log_dir, exist_ok=True) log_writer = utils.TensorboardLogger(log_dir=args.log_dir) else: log_writer = None collate_func = BatchCollator(size_divisible=16) data_loader_train = torch.utils.data.DataLoader( dataset_train, sampler=sampler_train, batch_size=args.batch_size, # 8 num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True, collate_fn=collate_func, ) data_loader_train.num_samples = len(dataset_train) if dataset_val is not None: data_loader_val = torch.utils.data.DataLoader( dataset_val, sampler=sampler_val, batch_size=4, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=False, collate_fn=collate_func, ) data_loader_val.num_samples = len(dataset_val) else: data_loader_val = None mixup_fn = None model = create_model( args.model, pretrained=False, num_classes=args.nb_classes, all_frames=args.num_frames * args.num_segments, tubelet_size=args.tubelet_size, drop_rate=args.drop, drop_path_rate=args.drop_path, attn_drop_rate=args.attn_drop_rate, drop_block_rate=None, use_mean_pooling=args.use_mean_pooling, init_scale=args.init_scale, head_type=args.head_type ) patch_size = model.patch_embed.patch_size # 16 print("Patch size = %s" % str(patch_size)) # no use args.window_size = (args.num_frames // 2, args.input_size // patch_size[0], args.input_size // patch_size[1]) args.patch_size = patch_size if args.finetune: if args.finetune.startswith('https'): checkpoint = torch.hub.load_state_dict_from_url( args.finetune, map_location='cpu', check_hash=True) else: checkpoint = torch.load(args.finetune, map_location='cpu') print("Load ckpt from %s" % args.finetune) checkpoint_model = None for model_key in args.model_key.split('|'): if model_key in checkpoint: checkpoint_model = checkpoint[model_key] print("Load state_dict by model_key = %s" % model_key) break if checkpoint_model is None: checkpoint_model = checkpoint state_dict = model.state_dict() try: for k in ['head.weight', 'head.bias']: # if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape: print(f"Removing key {k} from pretrained checkpoint") del checkpoint_model[k] except KeyError: del checkpoint_model['head.weight'] del checkpoint_model['head.bias'] all_keys = list(checkpoint_model.keys()) new_dict = OrderedDict() for key in all_keys: if key.startswith('backbone.'): new_dict[key[9:]] = checkpoint_model[key] elif key.startswith('encoder.'): new_dict[key[8:]] = checkpoint_model[key] else: new_dict[key] = checkpoint_model[key] checkpoint_model = new_dict # interpolate position embedding if 'pos_embed' in checkpoint_model: pos_embed_checkpoint = checkpoint_model['pos_embed'] embedding_size = pos_embed_checkpoint.shape[-1] # channel dim num_patches = model.patch_embed.num_patches # num_extra_tokens = model.pos_embed.shape[-2] - num_patches # 0/1 # height (== width) for the checkpoint position embedding orig_size = int(((pos_embed_checkpoint.shape[-2] - num_extra_tokens) // ( args.num_frames // model.patch_embed.tubelet_size)) ** 0.5) # height (== width) for the new position embedding new_size = int((num_patches // (args.num_frames // model.patch_embed.tubelet_size)) ** 0.5) # class_token and dist_token are kept unchanged if orig_size != new_size: print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] # only the position tokens are interpolated pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] # B, L, C -> BT, H, W, C -> BT, C, H, W pos_tokens = pos_tokens.reshape(-1, args.num_frames // model.patch_embed.tubelet_size, orig_size, orig_size, embedding_size) pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) pos_tokens = torch.nn.functional.interpolate( pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) # BT, C, H, W -> BT, H, W, C -> B, T, H, W, C pos_tokens = pos_tokens.permute(0, 2, 3, 1).reshape(-1, args.num_frames // model.patch_embed.tubelet_size, new_size, new_size, embedding_size) pos_tokens = pos_tokens.flatten(1, 3) # B, L, C new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) checkpoint_model['pos_embed'] = new_pos_embed utils.load_state_dict(model, checkpoint_model, prefix=args.model_prefix) model.to(device) model_ema = None model_without_ddp = model n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) print("Model = %s" % str(model_without_ddp)) print('number of params:', n_parameters) total_batch_size = args.batch_size * args.update_freq * utils.get_world_size() num_training_steps_per_epoch = len(dataset_train) // total_batch_size args.lr = args.lr * total_batch_size / 256 args.min_lr = args.min_lr * total_batch_size / 256 args.warmup_lr = args.warmup_lr * total_batch_size / 256 print("LR = %.8f" % args.lr) print("Batch size = %d" % total_batch_size) print("Update frequent = %d" % args.update_freq) print("Number of training examples = %d" % len(dataset_train)) print("Number of training training per epoch = %d" % num_training_steps_per_epoch) num_layers = model_without_ddp.get_num_layers() if args.layer_decay < 1.0: assigner = LayerDecayValueAssigner( list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2))) else: assigner = None if assigner is not None: print("Assigned values = %s" % str(assigner.values)) skip_weight_decay_list = model.no_weight_decay() print("Skip weight decay list: ", skip_weight_decay_list) if args.enable_deepspeed: loss_scaler = None optimizer_params = get_parameter_groups( model, args.weight_decay, skip_weight_decay_list, assigner.get_layer_id if assigner is not None else None, assigner.get_scale if assigner is not None else None) model, optimizer, _, _ = ds_init( args=args, model=model, model_parameters=optimizer_params, dist_init_required=not args.distributed, ) print("model.gradient_accumulation_steps() = %d" % model.gradient_accumulation_steps()) assert model.gradient_accumulation_steps() == args.update_freq else: if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True) model_without_ddp = model.module optimizer = create_optimizer( args, model_without_ddp, skip_list=skip_weight_decay_list, get_num_layer=assigner.get_layer_id if assigner is not None else None, get_layer_scale=assigner.get_scale if assigner is not None else None) loss_scaler = NativeScaler() print("Use step level LR scheduler!") lr_schedule_values = utils.cosine_scheduler( args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch, warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps, ) if args.weight_decay_end is None: args.weight_decay_end = args.weight_decay wd_schedule_values = utils.cosine_scheduler( args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch) print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values))) utils.auto_load_model( args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema) if args.eval: validation_one_epoch(data_loader_val, model, device, args.output_dir, args.start_epoch, log_writer) exit(0) print(f"Start training for {args.epochs} epochs") start_time = time.time() for epoch in range(args.start_epoch, args.epochs): if args.distributed: data_loader_train.sampler.set_epoch(epoch) if log_writer is not None: log_writer.set_step(epoch * num_training_steps_per_epoch * args.update_freq) train_stats = train_one_epoch( model, data_loader_train, optimizer, device, epoch, loss_scaler, args.clip_grad, model_ema, mixup_fn, log_writer=log_writer, start_steps=epoch * num_training_steps_per_epoch, lr_schedule_values=lr_schedule_values, wd_schedule_values=wd_schedule_values, num_training_steps_per_epoch=num_training_steps_per_epoch, update_freq=args.update_freq, ) # 1. save ckpt if args.output_dir and args.save_ckpt: if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs: utils.save_model( args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch, model_ema=model_ema) # 2. log log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, 'epoch': epoch, 'n_parameters': n_parameters} if args.output_dir and utils.is_main_process(): if log_writer is not None: log_writer.flush() with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f: f.write(json.dumps(log_stats) + "\n") # 3. eval if data_loader_val is not None and ( (epoch + 1) % args.val_freq == 0 or epoch + 1 == args.epochs): validation_one_epoch(data_loader_val, model, device, args.output_dir, epoch, log_writer) torch.distributed.barrier() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str)) if __name__ == '__main__': opts, ds_init = get_args() if opts.output_dir: Path(opts.output_dir).mkdir(parents=True, exist_ok=True) main(opts, ds_init)
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/run_class_finetuning.py
import argparse import datetime import numpy as np import time import torch import torch.backends.cudnn as cudnn import json import os from pathlib import Path from timm.models import create_model from optim_factory import create_optimizer from datasets import build_pretraining_dataset from engine_for_pretraining import train_one_epoch from utils import NativeScalerWithGradNormCount as NativeScaler import utils import modeling_pretrain def get_args(): parser = argparse.ArgumentParser('VideoMAE pre-training script', add_help=False) parser.add_argument('--batch_size', default=64, type=int) parser.add_argument('--epochs', default=800, type=int) parser.add_argument('--save_ckpt_freq', default=50, type=int) # Model parameters parser.add_argument('--model', default='pretrain_videomae_base_patch16_224', type=str, metavar='MODEL', help='Name of model to train') parser.add_argument('--decoder_depth', default=4, type=int, help='depth of decoder') parser.add_argument('--mask_type', default='tube', choices=['random', 'tube'], type=str, help='masked strategy of video tokens/patches') parser.add_argument('--mask_ratio', default=0.75, type=float, help='ratio of the visual tokens/patches need be masked') parser.add_argument('--input_size', default=224, type=int, help='videos input size for backbone') parser.add_argument('--drop_path', type=float, default=0.0, metavar='PCT', help='Drop path rate (default: 0.1)') parser.add_argument('--normlize_target', default=True, type=bool, help='normalized the target patch pixels') # Optimizer parameters parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"') parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)') parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)') parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)') parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the weight decay. We use a cosine schedule for WD. (Set the same value with args.weight_decay to keep weight decay no change)""") parser.add_argument('--lr', type=float, default=1.5e-4, metavar='LR', help='learning rate (default: 1.5e-4)') parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR', help='warmup learning rate (default: 1e-6)') parser.add_argument('--min_lr', type=float, default=1e-5, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') parser.add_argument('--warmup_epochs', type=int, default=40, metavar='N', help='epochs to warmup LR, if scheduler supports') parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N', help='epochs to warmup LR, if scheduler supports') # Augmentation parameters parser.add_argument('--color_jitter', type=float, default=0.0, metavar='PCT', help='Color jitter factor (default: 0.4)') parser.add_argument('--train_interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")') # Dataset parameters parser.add_argument('--data_path', default='/path/to/list_kinetics-400', type=str, help='dataset path') parser.add_argument('--imagenet_default_mean_and_std', default=True, action='store_true') parser.add_argument('--num_frames', type=int, default=16) parser.add_argument('--sampling_rate', type=int, default=4) parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving') parser.add_argument('--log_dir', default=None, help='path where to tensorboard log') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--auto_resume', action='store_true') parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume') parser.set_defaults(auto_resume=True) parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--num_workers', default=10, type=int) parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem', help='') parser.set_defaults(pin_mem=True) # distributed training parameters parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--local_rank', default=-1, type=int) parser.add_argument('--dist_on_itp', action='store_true') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') return parser.parse_args() def get_model(args): print(f"Creating model: {args.model}") model = create_model( args.model, pretrained=False, drop_path_rate=args.drop_path, drop_block_rate=None, decoder_depth=args.decoder_depth ) return model def main(args): utils.init_distributed_mode(args) print(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) cudnn.benchmark = True model = get_model(args) patch_size = model.encoder.patch_embed.patch_size print("Patch size = %s" % str(patch_size)) args.window_size = (args.num_frames // 2, args.input_size // patch_size[0], args.input_size // patch_size[1]) args.patch_size = patch_size # get dataset dataset_train = build_pretraining_dataset(args) num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_rank = global_rank num_training_steps_per_epoch = len(dataset_train) // args.batch_size // num_tasks sampler_train = torch.utils.data.DistributedSampler( dataset_train, num_replicas=num_tasks, rank=sampler_rank, shuffle=True ) print("Sampler_train = %s" % str(sampler_train)) if global_rank == 0 and args.log_dir is not None: os.makedirs(args.log_dir, exist_ok=True) log_writer = utils.TensorboardLogger(log_dir=args.log_dir) else: log_writer = None data_loader_train = torch.utils.data.DataLoader( dataset_train, sampler=sampler_train, batch_size=args.batch_size, num_workers=args.num_workers, pin_memory=args.pin_mem, drop_last=True, worker_init_fn=utils.seed_worker ) model.to(device) model_without_ddp = model n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) print("Model = %s" % str(model_without_ddp)) print('number of params: {} M'.format(n_parameters / 1e6)) total_batch_size = args.batch_size * utils.get_world_size() args.lr = args.lr * total_batch_size / 256 args.min_lr = args.min_lr * total_batch_size / 256 args.warmup_lr = args.warmup_lr * total_batch_size / 256 print("LR = %.8f" % args.lr) print("Batch size = %d" % total_batch_size) print("Number of training steps = %d" % num_training_steps_per_epoch) print("Number of training examples per epoch = %d" % (total_batch_size * num_training_steps_per_epoch)) if args.distributed: model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True) model_without_ddp = model.module optimizer = create_optimizer( args, model_without_ddp) loss_scaler = NativeScaler() print("Use step level LR & WD scheduler!") lr_schedule_values = utils.cosine_scheduler( args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch, warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps, ) if args.weight_decay_end is None: args.weight_decay_end = args.weight_decay wd_schedule_values = utils.cosine_scheduler( args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch) print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values))) # resume utils.auto_load_model( args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler) torch.cuda.empty_cache() print(f"Start training for {args.epochs} epochs") start_time = time.time() for epoch in range(args.start_epoch, args.epochs): if args.distributed: data_loader_train.sampler.set_epoch(epoch) if log_writer is not None: log_writer.set_step(epoch * num_training_steps_per_epoch) train_stats = train_one_epoch( model, data_loader_train, optimizer, device, epoch, loss_scaler, args.clip_grad, log_writer=log_writer, start_steps=epoch * num_training_steps_per_epoch, lr_schedule_values=lr_schedule_values, wd_schedule_values=wd_schedule_values, patch_size=patch_size[0], normlize_target=args.normlize_target, ) if args.output_dir: if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs: utils.save_model( args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler, epoch=epoch) log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, 'epoch': epoch, 'n_parameters': n_parameters} if args.output_dir and utils.is_main_process(): if log_writer is not None: log_writer.flush() with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f: f.write(json.dumps(log_stats) + "\n") total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('Training time {}'.format(total_time_str)) if __name__ == '__main__': opts = get_args() if opts.output_dir: Path(opts.output_dir).mkdir(parents=True, exist_ok=True) main(opts)
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/run_mae_pretraining.py
import numbers import cv2 import numpy as np import PIL import torch def _is_tensor_clip(clip): return torch.is_tensor(clip) and clip.ndimension() == 4 def crop_clip(clip, min_h, min_w, h, w): if isinstance(clip[0], np.ndarray): cropped = [img[min_h:min_h + h, min_w:min_w + w, :] for img in clip] elif isinstance(clip[0], PIL.Image.Image): cropped = [ img.crop((min_w, min_h, min_w + w, min_h + h)) for img in clip ] else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) return cropped def resize_clip(clip, size, interpolation='bilinear'): if isinstance(clip[0], np.ndarray): if isinstance(size, numbers.Number): im_h, im_w, im_c = clip[0].shape # Min spatial dim already matches minimal size if (im_w <= im_h and im_w == size) or (im_h <= im_w and im_h == size): return clip new_h, new_w = get_resize_sizes(im_h, im_w, size) size = (new_w, new_h) else: size = size[0], size[1] if interpolation == 'bilinear': np_inter = cv2.INTER_LINEAR else: np_inter = cv2.INTER_NEAREST scaled = [ cv2.resize(img, size, interpolation=np_inter) for img in clip ] elif isinstance(clip[0], PIL.Image.Image): if isinstance(size, numbers.Number): im_w, im_h = clip[0].size # Min spatial dim already matches minimal size if (im_w <= im_h and im_w == size) or (im_h <= im_w and im_h == size): return clip new_h, new_w = get_resize_sizes(im_h, im_w, size) size = (new_w, new_h) else: size = size[1], size[0] if interpolation == 'bilinear': pil_inter = PIL.Image.BILINEAR else: pil_inter = PIL.Image.NEAREST scaled = [img.resize(size, pil_inter) for img in clip] else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) return scaled def get_resize_sizes(im_h, im_w, size): if im_w < im_h: ow = size oh = int(size * im_h / im_w) else: oh = size ow = int(size * im_w / im_h) return oh, ow def normalize(clip, mean, std, inplace=False): if not _is_tensor_clip(clip): raise TypeError('tensor is not a torch clip.') if not inplace: clip = clip.clone() dtype = clip.dtype mean = torch.as_tensor(mean, dtype=dtype, device=clip.device) std = torch.as_tensor(std, dtype=dtype, device=clip.device) clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None]) return clip
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/functional.py
import io import os import math import time import json from collections import defaultdict, deque import datetime import numpy as np from timm.utils import get_state_dict from torch.utils.data._utils.collate import default_collate from pathlib import Path import subprocess import torch import torch.distributed as dist from torch._six import inf import random from tensorboardX import SummaryWriter class SmoothedValue(object): """Track a series of values and provide access to smoothed values over a window or the global series average. """ def __init__(self, window_size=20, fmt=None): if fmt is None: fmt = "{median:.4f} ({global_avg:.4f})" self.deque = deque(maxlen=window_size) self.total = 0.0 self.count = 0 self.fmt = fmt def update(self, value, n=1): self.deque.append(value) self.count += n self.total += value * n def synchronize_between_processes(self): """ Warning: does not synchronize the deque! """ if not is_dist_avail_and_initialized(): return t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') dist.barrier() dist.all_reduce(t) t = t.tolist() self.count = int(t[0]) self.total = t[1] @property def median(self): d = torch.tensor(list(self.deque)) return d.median().item() @property def avg(self): d = torch.tensor(list(self.deque), dtype=torch.float32) return d.mean().item() @property def global_avg(self): return self.total / self.count @property def max(self): return max(self.deque) @property def value(self): return self.deque[-1] def __str__(self): return self.fmt.format( median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value) class MetricLogger(object): def __init__(self, delimiter="\t"): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for k, v in kwargs.items(): if v is None: continue if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.meters[k].update(v) def __getattr__(self, attr): if attr in self.meters: return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] raise AttributeError("'{}' object has no attribute '{}'".format( type(self).__name__, attr)) def __str__(self): loss_str = [] for name, meter in self.meters.items(): loss_str.append( "{}: {}".format(name, str(meter)) ) return self.delimiter.join(loss_str) def synchronize_between_processes(self): for meter in self.meters.values(): meter.synchronize_between_processes() def add_meter(self, name, meter): self.meters[name] = meter def log_every(self, iterable, print_freq, header=None): i = 0 if not header: header = '' start_time = time.time() end = time.time() iter_time = SmoothedValue(fmt='{avg:.4f}') data_time = SmoothedValue(fmt='{avg:.4f}') space_fmt = ':' + str(len(str(len(iterable)))) + 'd' log_msg = [ header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}', 'time: {time}', 'data: {data}' ] if torch.cuda.is_available(): log_msg.append('max mem: {memory:.0f}') log_msg = self.delimiter.join(log_msg) MB = 1024.0 * 1024.0 for obj in iterable: data_time.update(time.time() - end) yield obj iter_time.update(time.time() - end) if i % print_freq == 0 or i == len(iterable) - 1: eta_seconds = iter_time.global_avg * (len(iterable) - i) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if torch.cuda.is_available(): print(log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=torch.cuda.max_memory_allocated() / MB)) else: print(log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time))) i += 1 end = time.time() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print('{} Total time: {} ({:.4f} s / it)'.format( header, total_time_str, total_time / len(iterable))) class TensorboardLogger(object): def __init__(self, log_dir): self.writer = SummaryWriter(logdir=log_dir) self.step = 0 def set_step(self, step=None): if step is not None: self.step = step else: self.step += 1 def update(self, head='scalar', step=None, **kwargs): for k, v in kwargs.items(): if v is None: continue if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.writer.add_scalar(head + "/" + k, v, self.step if step is None else step) def flush(self): self.writer.flush() def seed_worker(worker_id): worker_seed = torch.initial_seed() % 2**32 np.random.seed(worker_seed) random.seed(worker_seed) def _load_checkpoint_for_ema(model_ema, checkpoint): """ Workaround for ModelEma._load_checkpoint to accept an already-loaded object """ mem_file = io.BytesIO() torch.save(checkpoint, mem_file) mem_file.seek(0) model_ema._load_checkpoint(mem_file) def setup_for_distributed(is_master): """ This function disables printing when not in master process """ import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop('force', False) if is_master or force: builtin_print(*args, **kwargs) __builtin__.print = print def is_dist_avail_and_initialized(): if not dist.is_available(): return False if not dist.is_initialized(): return False return True def get_world_size(): if not is_dist_avail_and_initialized(): return 1 return dist.get_world_size() def get_rank(): if not is_dist_avail_and_initialized(): return 0 return dist.get_rank() def is_main_process(): return get_rank() == 0 def save_on_master(*args, **kwargs): if is_main_process(): torch.save(*args, **kwargs) def init_distributed_mode(args): if args.dist_on_itp: args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']) os.environ['LOCAL_RANK'] = str(args.gpu) os.environ['RANK'] = str(args.rank) os.environ['WORLD_SIZE'] = str(args.world_size) elif 'SLURM_PROCID' in os.environ: args.rank = int(os.environ['SLURM_PROCID']) args.gpu = int(os.environ['SLURM_LOCALID']) args.world_size = int(os.environ['SLURM_NTASKS']) os.environ['RANK'] = str(args.rank) os.environ['LOCAL_RANK'] = str(args.gpu) os.environ['WORLD_SIZE'] = str(args.world_size) node_list = os.environ['SLURM_NODELIST'] addr = subprocess.getoutput( f'scontrol show hostname {node_list} | head -n1') if 'MASTER_ADDR' not in os.environ: os.environ['MASTER_ADDR'] = addr elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: args.rank = int(os.environ["RANK"]) args.world_size = int(os.environ['WORLD_SIZE']) args.gpu = int(os.environ['LOCAL_RANK']) else: print('Not using distributed mode') args.distributed = False return args.distributed = True torch.cuda.set_device(args.gpu) args.dist_backend = 'nccl' print('| distributed init (rank {}): {}, gpu {}'.format( args.rank, args.dist_url, args.gpu), flush=True) torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank) torch.distributed.barrier() # assert torch.distributed.is_initialized() setup_for_distributed(args.rank == 0) def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"): missing_keys = [] unexpected_keys = [] error_msgs = [] metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get( prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') load(model, prefix=prefix) warn_missing_keys = [] ignore_missing_keys = [] for key in missing_keys: keep_flag = True for ignore_key in ignore_missing.split('|'): if ignore_key in key: keep_flag = False break if keep_flag: warn_missing_keys.append(key) else: ignore_missing_keys.append(key) missing_keys = warn_missing_keys if len(missing_keys) > 0: print("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: print("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if len(ignore_missing_keys) > 0: print("Ignored weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, ignore_missing_keys)) if len(error_msgs) > 0: print('\n'.join(error_msgs)) class NativeScalerWithGradNormCount: state_dict_key = "amp_scaler" def __init__(self): self._scaler = torch.cuda.amp.GradScaler() def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True): self._scaler.scale(loss).backward(create_graph=create_graph) if update_grad: if clip_grad is not None: assert parameters is not None self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad) else: self._scaler.unscale_(optimizer) norm = get_grad_norm_(parameters) self._scaler.step(optimizer) self._scaler.update() else: norm = None return norm def state_dict(self): return self._scaler.state_dict() def load_state_dict(self, state_dict): self._scaler.load_state_dict(state_dict) def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor: if isinstance(parameters, torch.Tensor): parameters = [parameters] parameters = [p for p in parameters if p.grad is not None] norm_type = float(norm_type) if len(parameters) == 0: return torch.tensor(0.) device = parameters[0].grad.device if norm_type == inf: total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters) else: total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type) return total_norm def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0, warmup_steps=-1): warmup_schedule = np.array([]) warmup_iters = warmup_epochs * niter_per_ep if warmup_steps > 0: warmup_iters = warmup_steps print("Set warmup steps = %d" % warmup_iters) if warmup_epochs > 0: warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters) iters = np.arange(epochs * niter_per_ep - warmup_iters) schedule = np.array( [final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters]) schedule = np.concatenate((warmup_schedule, schedule)) assert len(schedule) == epochs * niter_per_ep return schedule def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None): output_dir = Path(args.output_dir) epoch_name = str(epoch) if loss_scaler is not None: checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)] for checkpoint_path in checkpoint_paths: to_save = { 'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch, 'scaler': loss_scaler.state_dict(), 'args': args, } if model_ema is not None: to_save['model_ema'] = get_state_dict(model_ema) save_on_master(to_save, checkpoint_path) else: client_state = {'epoch': epoch} if model_ema is not None: client_state['model_ema'] = get_state_dict(model_ema) model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state) def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None): output_dir = Path(args.output_dir) if loss_scaler is not None: # torch.amp if args.auto_resume and len(args.resume) == 0: import glob all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth')) latest_ckpt = -1 for ckpt in all_checkpoints: t = ckpt.split('-')[-1].split('.')[0] if t.isdigit(): latest_ckpt = max(int(t), latest_ckpt) if latest_ckpt >= 0: args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt) print("Auto resume checkpoint: %s" % args.resume) if args.resume: if args.resume.startswith('https'): checkpoint = torch.hub.load_state_dict_from_url( args.resume, map_location='cpu', check_hash=True) else: checkpoint = torch.load(args.resume, map_location='cpu') model_without_ddp.load_state_dict(checkpoint['model']) print("Resume checkpoint %s" % args.resume) if 'optimizer' in checkpoint and 'epoch' in checkpoint: optimizer.load_state_dict(checkpoint['optimizer']) args.start_epoch = checkpoint['epoch'] + 1 if hasattr(args, 'model_ema') and args.model_ema: _load_checkpoint_for_ema(model_ema, checkpoint['model_ema']) if 'scaler' in checkpoint: loss_scaler.load_state_dict(checkpoint['scaler']) print("With optim & sched!") else: # deepspeed, only support '--auto_resume'. if args.auto_resume: import glob all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*')) latest_ckpt = -1 for ckpt in all_checkpoints: t = ckpt.split('-')[-1].split('.')[0] if t.isdigit(): latest_ckpt = max(int(t), latest_ckpt) if latest_ckpt >= 0: args.resume = os.path.join(output_dir, 'checkpoint-%d' % latest_ckpt) print("Auto resume checkpoint: %d" % latest_ckpt) _, client_states = model.load_checkpoint(args.output_dir, tag='checkpoint-%d' % latest_ckpt) args.start_epoch = client_states['epoch'] + 1 if model_ema is not None: if args.model_ema: _load_checkpoint_for_ema(model_ema, client_states['model_ema']) def create_ds_config(args): args.deepspeed_config = os.path.join(args.output_dir, "deepspeed_config.json") with open(args.deepspeed_config, mode="w") as writer: ds_config = { "train_batch_size": args.batch_size * args.update_freq * get_world_size(), "train_micro_batch_size_per_gpu": args.batch_size, "steps_per_print": 1000, "optimizer": { "type": "Adam", "adam_w_mode": True, "params": { "lr": args.lr, "weight_decay": args.weight_decay, "bias_correction": True, "betas": [ 0.9, 0.999 ], "eps": 1e-8 } }, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 7, "loss_scale_window": 128 } } writer.write(json.dumps(ds_config, indent=2)) def multiple_samples_collate(batch, fold=False): """ Collate function for repeated augmentation. Each instance in the batch has more than one sample. Args: batch (tuple or list): data batch to collate. Returns: (tuple): collated data batch. """ inputs, labels, video_idx, extra_data = zip(*batch) inputs = [item for sublist in inputs for item in sublist] labels = [item for sublist in labels for item in sublist] video_idx = [item for sublist in video_idx for item in sublist] inputs, labels, video_idx, extra_data = ( default_collate(inputs), default_collate(labels), default_collate(video_idx), default_collate(extra_data), ) if fold: return [inputs], labels, video_idx, extra_data else: return inputs, labels, video_idx, extra_data
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/utils.py
""" This implementation is based on https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/random_erasing.py pulished under an Apache License 2.0. """ import math import random import torch def _get_pixels( per_pixel, rand_color, patch_size, dtype=torch.float32, device="cuda" ): # NOTE I've seen CUDA illegal memory access errors being caused by the normal_() # paths, flip the order so normal is run on CPU if this becomes a problem # Issue has been fixed in master https://github.com/pytorch/pytorch/issues/19508 if per_pixel: return torch.empty(patch_size, dtype=dtype, device=device).normal_() elif rand_color: return torch.empty( (patch_size[0], 1, 1), dtype=dtype, device=device ).normal_() else: return torch.zeros((patch_size[0], 1, 1), dtype=dtype, device=device) class RandomErasing: """Randomly selects a rectangle region in an image and erases its pixels. 'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/pdf/1708.04896.pdf This variant of RandomErasing is intended to be applied to either a batch or single image tensor after it has been normalized by dataset mean and std. Args: probability: Probability that the Random Erasing operation will be performed. min_area: Minimum percentage of erased area wrt input image area. max_area: Maximum percentage of erased area wrt input image area. min_aspect: Minimum aspect ratio of erased area. mode: pixel color mode, one of 'const', 'rand', or 'pixel' 'const' - erase block is constant color of 0 for all channels 'rand' - erase block is same per-channel random (normal) color 'pixel' - erase block is per-pixel random (normal) color max_count: maximum number of erasing blocks per image, area per box is scaled by count. per-image count is randomly chosen between 1 and this value. """ def __init__( self, probability=0.5, min_area=0.02, max_area=1 / 3, min_aspect=0.3, max_aspect=None, mode="const", min_count=1, max_count=None, num_splits=0, device="cuda", cube=True, ): self.probability = probability self.min_area = min_area self.max_area = max_area max_aspect = max_aspect or 1 / min_aspect self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect)) self.min_count = min_count self.max_count = max_count or min_count self.num_splits = num_splits mode = mode.lower() self.rand_color = False self.per_pixel = False self.cube = cube if mode == "rand": self.rand_color = True # per block random normal elif mode == "pixel": self.per_pixel = True # per pixel random normal else: assert not mode or mode == "const" self.device = device def _erase(self, img, chan, img_h, img_w, dtype): if random.random() > self.probability: return area = img_h * img_w count = ( self.min_count if self.min_count == self.max_count else random.randint(self.min_count, self.max_count) ) for _ in range(count): for _ in range(10): target_area = ( random.uniform(self.min_area, self.max_area) * area / count ) aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) h = int(round(math.sqrt(target_area * aspect_ratio))) w = int(round(math.sqrt(target_area / aspect_ratio))) if w < img_w and h < img_h: top = random.randint(0, img_h - h) left = random.randint(0, img_w - w) img[:, top : top + h, left : left + w] = _get_pixels( self.per_pixel, self.rand_color, (chan, h, w), dtype=dtype, device=self.device, ) break def _erase_cube( self, img, batch_start, batch_size, chan, img_h, img_w, dtype, ): if random.random() > self.probability: return area = img_h * img_w count = ( self.min_count if self.min_count == self.max_count else random.randint(self.min_count, self.max_count) ) for _ in range(count): for _ in range(100): target_area = ( random.uniform(self.min_area, self.max_area) * area / count ) aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio)) h = int(round(math.sqrt(target_area * aspect_ratio))) w = int(round(math.sqrt(target_area / aspect_ratio))) if w < img_w and h < img_h: top = random.randint(0, img_h - h) left = random.randint(0, img_w - w) for i in range(batch_start, batch_size): img_instance = img[i] img_instance[ :, top : top + h, left : left + w ] = _get_pixels( self.per_pixel, self.rand_color, (chan, h, w), dtype=dtype, device=self.device, ) break def __call__(self, input): if len(input.size()) == 3: self._erase(input, *input.size(), input.dtype) else: batch_size, chan, img_h, img_w = input.size() # skip first slice of batch if num_splits is set (for clean portion of samples) batch_start = ( batch_size // self.num_splits if self.num_splits > 1 else 0 ) if self.cube: self._erase_cube( input, batch_start, batch_size, chan, img_h, img_w, input.dtype, ) else: for i in range(batch_start, batch_size): self._erase(input[i], chan, img_h, img_w, input.dtype) return input
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/random_erasing.py
#!/usr/bin/env python3 import math import numpy as np import random import torch import torchvision.transforms.functional as F from PIL import Image from torchvision import transforms from rand_augment import rand_augment_transform from random_erasing import RandomErasing import numbers import PIL import torchvision import functional as FF _pil_interpolation_to_str = { Image.NEAREST: "PIL.Image.NEAREST", Image.BILINEAR: "PIL.Image.BILINEAR", Image.BICUBIC: "PIL.Image.BICUBIC", Image.LANCZOS: "PIL.Image.LANCZOS", Image.HAMMING: "PIL.Image.HAMMING", Image.BOX: "PIL.Image.BOX", } _RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC) def _pil_interp(method): if method == "bicubic": return Image.BICUBIC elif method == "lanczos": return Image.LANCZOS elif method == "hamming": return Image.HAMMING else: return Image.BILINEAR def random_short_side_scale_jitter( images, min_size, max_size, boxes=None, inverse_uniform_sampling=False ): """ Perform a spatial short scale jittering on the given images and corresponding boxes. Args: images (tensor): images to perform scale jitter. Dimension is `num frames` x `channel` x `height` x `width`. min_size (int): the minimal size to scale the frames. max_size (int): the maximal size to scale the frames. boxes (ndarray): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. inverse_uniform_sampling (bool): if True, sample uniformly in [1 / max_scale, 1 / min_scale] and take a reciprocal to get the scale. If False, take a uniform sample from [min_scale, max_scale]. Returns: (tensor): the scaled images with dimension of `num frames` x `channel` x `new height` x `new width`. (ndarray or None): the scaled boxes with dimension of `num boxes` x 4. """ if inverse_uniform_sampling: size = int( round(1.0 / np.random.uniform(1.0 / max_size, 1.0 / min_size)) ) else: size = int(round(np.random.uniform(min_size, max_size))) height = images.shape[2] width = images.shape[3] if (width <= height and width == size) or ( height <= width and height == size ): return images, boxes new_width = size new_height = size if width < height: new_height = int(math.floor((float(height) / width) * size)) if boxes is not None: boxes = boxes * float(new_height) / height else: new_width = int(math.floor((float(width) / height) * size)) if boxes is not None: boxes = boxes * float(new_width) / width return ( torch.nn.functional.interpolate( images, size=(new_height, new_width), mode="bilinear", align_corners=False, ), boxes, ) def crop_boxes(boxes, x_offset, y_offset): """ Peform crop on the bounding boxes given the offsets. Args: boxes (ndarray or None): bounding boxes to peform crop. The dimension is `num boxes` x 4. x_offset (int): cropping offset in the x axis. y_offset (int): cropping offset in the y axis. Returns: cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ cropped_boxes = boxes.copy() cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset return cropped_boxes def random_crop(images, size, boxes=None): """ Perform random spatial crop on the given images and corresponding boxes. Args: images (tensor): images to perform random crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): the size of height and width to crop on the image. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: cropped (tensor): cropped images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ if images.shape[2] == size and images.shape[3] == size: return images height = images.shape[2] width = images.shape[3] y_offset = 0 if height > size: y_offset = int(np.random.randint(0, height - size)) x_offset = 0 if width > size: x_offset = int(np.random.randint(0, width - size)) cropped = images[ :, :, y_offset : y_offset + size, x_offset : x_offset + size ] cropped_boxes = ( crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None ) return cropped, cropped_boxes def horizontal_flip(prob, images, boxes=None): """ Perform horizontal flip on the given images and corresponding boxes. Args: prob (float): probility to flip the images. images (tensor): images to perform horizontal flip, the dimension is `num frames` x `channel` x `height` x `width`. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. Returns: images (tensor): images with dimension of `num frames` x `channel` x `height` x `width`. flipped_boxes (ndarray or None): the flipped boxes with dimension of `num boxes` x 4. """ if boxes is None: flipped_boxes = None else: flipped_boxes = boxes.copy() if np.random.uniform() < prob: images = images.flip((-1)) if len(images.shape) == 3: width = images.shape[2] elif len(images.shape) == 4: width = images.shape[3] else: raise NotImplementedError("Dimension does not supported") if boxes is not None: flipped_boxes[:, [0, 2]] = width - boxes[:, [2, 0]] - 1 return images, flipped_boxes def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None): """ Perform uniform spatial sampling on the images and corresponding boxes. Args: images (tensor): images to perform uniform crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): size of height and weight to crop the images. spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width is larger than height. Or 0, 1, or 2 for top, center, and bottom crop if height is larger than width. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. scale_size (int): optinal. If not None, resize the images to scale_size before performing any crop. Returns: cropped (tensor): images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ assert spatial_idx in [0, 1, 2] ndim = len(images.shape) if ndim == 3: images = images.unsqueeze(0) height = images.shape[2] width = images.shape[3] if scale_size is not None: if width <= height: width, height = scale_size, int(height / width * scale_size) else: width, height = int(width / height * scale_size), scale_size images = torch.nn.functional.interpolate( images, size=(height, width), mode="bilinear", align_corners=False, ) y_offset = int(math.ceil((height - size) / 2)) x_offset = int(math.ceil((width - size) / 2)) if height > width: if spatial_idx == 0: y_offset = 0 elif spatial_idx == 2: y_offset = height - size else: if spatial_idx == 0: x_offset = 0 elif spatial_idx == 2: x_offset = width - size cropped = images[ :, :, y_offset : y_offset + size, x_offset : x_offset + size ] cropped_boxes = ( crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None ) if ndim == 3: cropped = cropped.squeeze(0) return cropped, cropped_boxes def clip_boxes_to_image(boxes, height, width): """ Clip an array of boxes to an image with the given height and width. Args: boxes (ndarray): bounding boxes to perform clipping. Dimension is `num boxes` x 4. height (int): given image height. width (int): given image width. Returns: clipped_boxes (ndarray): the clipped boxes with dimension of `num boxes` x 4. """ clipped_boxes = boxes.copy() clipped_boxes[:, [0, 2]] = np.minimum( width - 1.0, np.maximum(0.0, boxes[:, [0, 2]]) ) clipped_boxes[:, [1, 3]] = np.minimum( height - 1.0, np.maximum(0.0, boxes[:, [1, 3]]) ) return clipped_boxes def blend(images1, images2, alpha): """ Blend two images with a given weight alpha. Args: images1 (tensor): the first images to be blended, the dimension is `num frames` x `channel` x `height` x `width`. images2 (tensor): the second images to be blended, the dimension is `num frames` x `channel` x `height` x `width`. alpha (float): the blending weight. Returns: (tensor): blended images, the dimension is `num frames` x `channel` x `height` x `width`. """ return images1 * alpha + images2 * (1 - alpha) def grayscale(images): """ Get the grayscale for the input images. The channels of images should be in order BGR. Args: images (tensor): the input images for getting grayscale. Dimension is `num frames` x `channel` x `height` x `width`. Returns: img_gray (tensor): blended images, the dimension is `num frames` x `channel` x `height` x `width`. """ # R -> 0.299, G -> 0.587, B -> 0.114. img_gray = torch.tensor(images) gray_channel = ( 0.299 * images[:, 2] + 0.587 * images[:, 1] + 0.114 * images[:, 0] ) img_gray[:, 0] = gray_channel img_gray[:, 1] = gray_channel img_gray[:, 2] = gray_channel return img_gray def color_jitter(images, img_brightness=0, img_contrast=0, img_saturation=0): """ Perfrom a color jittering on the input images. The channels of images should be in order BGR. Args: images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. img_brightness (float): jitter ratio for brightness. img_contrast (float): jitter ratio for contrast. img_saturation (float): jitter ratio for saturation. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. """ jitter = [] if img_brightness != 0: jitter.append("brightness") if img_contrast != 0: jitter.append("contrast") if img_saturation != 0: jitter.append("saturation") if len(jitter) > 0: order = np.random.permutation(np.arange(len(jitter))) for idx in range(0, len(jitter)): if jitter[order[idx]] == "brightness": images = brightness_jitter(img_brightness, images) elif jitter[order[idx]] == "contrast": images = contrast_jitter(img_contrast, images) elif jitter[order[idx]] == "saturation": images = saturation_jitter(img_saturation, images) return images def brightness_jitter(var, images): """ Perfrom brightness jittering on the input images. The channels of images should be in order BGR. Args: var (float): jitter ratio for brightness. images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. """ alpha = 1.0 + np.random.uniform(-var, var) img_bright = torch.zeros(images.shape) images = blend(images, img_bright, alpha) return images def contrast_jitter(var, images): """ Perfrom contrast jittering on the input images. The channels of images should be in order BGR. Args: var (float): jitter ratio for contrast. images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. """ alpha = 1.0 + np.random.uniform(-var, var) img_gray = grayscale(images) img_gray[:] = torch.mean(img_gray, dim=(1, 2, 3), keepdim=True) images = blend(images, img_gray, alpha) return images def saturation_jitter(var, images): """ Perfrom saturation jittering on the input images. The channels of images should be in order BGR. Args: var (float): jitter ratio for saturation. images (tensor): images to perform color jitter. Dimension is `num frames` x `channel` x `height` x `width`. Returns: images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. """ alpha = 1.0 + np.random.uniform(-var, var) img_gray = grayscale(images) images = blend(images, img_gray, alpha) return images def lighting_jitter(images, alphastd, eigval, eigvec): """ Perform AlexNet-style PCA jitter on the given images. Args: images (tensor): images to perform lighting jitter. Dimension is `num frames` x `channel` x `height` x `width`. alphastd (float): jitter ratio for PCA jitter. eigval (list): eigenvalues for PCA jitter. eigvec (list[list]): eigenvectors for PCA jitter. Returns: out_images (tensor): the jittered images, the dimension is `num frames` x `channel` x `height` x `width`. """ if alphastd == 0: return images # generate alpha1, alpha2, alpha3. alpha = np.random.normal(0, alphastd, size=(1, 3)) eig_vec = np.array(eigvec) eig_val = np.reshape(eigval, (1, 3)) rgb = np.sum( eig_vec * np.repeat(alpha, 3, axis=0) * np.repeat(eig_val, 3, axis=0), axis=1, ) out_images = torch.zeros_like(images) if len(images.shape) == 3: # C H W channel_dim = 0 elif len(images.shape) == 4: # T C H W channel_dim = 1 else: raise NotImplementedError(f"Unsupported dimension {len(images.shape)}") for idx in range(images.shape[channel_dim]): # C H W if len(images.shape) == 3: out_images[idx] = images[idx] + rgb[2 - idx] # T C H W elif len(images.shape) == 4: out_images[:, idx] = images[:, idx] + rgb[2 - idx] else: raise NotImplementedError( f"Unsupported dimension {len(images.shape)}" ) return out_images def color_normalization(images, mean, stddev): """ Perform color nomration on the given images. Args: images (tensor): images to perform color normalization. Dimension is `num frames` x `channel` x `height` x `width`. mean (list): mean values for normalization. stddev (list): standard deviations for normalization. Returns: out_images (tensor): the noramlized images, the dimension is `num frames` x `channel` x `height` x `width`. """ if len(images.shape) == 3: assert ( len(mean) == images.shape[0] ), "channel mean not computed properly" assert ( len(stddev) == images.shape[0] ), "channel stddev not computed properly" elif len(images.shape) == 4: assert ( len(mean) == images.shape[1] ), "channel mean not computed properly" assert ( len(stddev) == images.shape[1] ), "channel stddev not computed properly" else: raise NotImplementedError(f"Unsupported dimension {len(images.shape)}") out_images = torch.zeros_like(images) for idx in range(len(mean)): # C H W if len(images.shape) == 3: out_images[idx] = (images[idx] - mean[idx]) / stddev[idx] elif len(images.shape) == 4: out_images[:, idx] = (images[:, idx] - mean[idx]) / stddev[idx] else: raise NotImplementedError( f"Unsupported dimension {len(images.shape)}" ) return out_images def _get_param_spatial_crop( scale, ratio, height, width, num_repeat=10, log_scale=True, switch_hw=False ): """ Given scale, ratio, height and width, return sampled coordinates of the videos. """ for _ in range(num_repeat): area = height * width target_area = random.uniform(*scale) * area if log_scale: log_ratio = (math.log(ratio[0]), math.log(ratio[1])) aspect_ratio = math.exp(random.uniform(*log_ratio)) else: aspect_ratio = random.uniform(*ratio) w = int(round(math.sqrt(target_area * aspect_ratio))) h = int(round(math.sqrt(target_area / aspect_ratio))) if np.random.uniform() < 0.5 and switch_hw: w, h = h, w if 0 < w <= width and 0 < h <= height: i = random.randint(0, height - h) j = random.randint(0, width - w) return i, j, h, w # Fallback to central crop in_ratio = float(width) / float(height) if in_ratio < min(ratio): w = width h = int(round(w / min(ratio))) elif in_ratio > max(ratio): h = height w = int(round(h * max(ratio))) else: # whole image w = width h = height i = (height - h) // 2 j = (width - w) // 2 return i, j, h, w def random_resized_crop( images, target_height, target_width, scale=(0.8, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0), ): """ Crop the given images to random size and aspect ratio. A crop of random size (default: of 0.08 to 1.0) of the original size and a random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop is finally resized to given size. This is popularly used to train the Inception networks. Args: images: Images to perform resizing and cropping. target_height: Desired height after cropping. target_width: Desired width after cropping. scale: Scale range of Inception-style area based random resizing. ratio: Aspect ratio range of Inception-style area based random resizing. """ height = images.shape[2] width = images.shape[3] i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width) cropped = images[:, :, i : i + h, j : j + w] return torch.nn.functional.interpolate( cropped, size=(target_height, target_width), mode="bilinear", align_corners=False, ) def random_resized_crop_with_shift( images, target_height, target_width, scale=(0.8, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0), ): """ This is similar to random_resized_crop. However, it samples two different boxes (for cropping) for the first and last frame. It then linearly interpolates the two boxes for other frames. Args: images: Images to perform resizing and cropping. target_height: Desired height after cropping. target_width: Desired width after cropping. scale: Scale range of Inception-style area based random resizing. ratio: Aspect ratio range of Inception-style area based random resizing. """ t = images.shape[1] height = images.shape[2] width = images.shape[3] i, j, h, w = _get_param_spatial_crop(scale, ratio, height, width) i_, j_, h_, w_ = _get_param_spatial_crop(scale, ratio, height, width) i_s = [int(i) for i in torch.linspace(i, i_, steps=t).tolist()] j_s = [int(i) for i in torch.linspace(j, j_, steps=t).tolist()] h_s = [int(i) for i in torch.linspace(h, h_, steps=t).tolist()] w_s = [int(i) for i in torch.linspace(w, w_, steps=t).tolist()] out = torch.zeros((3, t, target_height, target_width)) for ind in range(t): out[:, ind : ind + 1, :, :] = torch.nn.functional.interpolate( images[ :, ind : ind + 1, i_s[ind] : i_s[ind] + h_s[ind], j_s[ind] : j_s[ind] + w_s[ind], ], size=(target_height, target_width), mode="bilinear", align_corners=False, ) return out def create_random_augment( input_size, auto_augment=None, interpolation="bilinear", ): """ Get video randaug transform. Args: input_size: The size of the input video in tuple. auto_augment: Parameters for randaug. An example: "rand-m7-n4-mstd0.5-inc1" (m is the magnitude and n is the number of operations to apply). interpolation: Interpolation method. """ if isinstance(input_size, tuple): img_size = input_size[-2:] else: img_size = input_size if auto_augment: assert isinstance(auto_augment, str) if isinstance(img_size, tuple): img_size_min = min(img_size) else: img_size_min = img_size aa_params = {"translate_const": int(img_size_min * 0.45)} if interpolation and interpolation != "random": aa_params["interpolation"] = _pil_interp(interpolation) if auto_augment.startswith("rand"): return transforms.Compose( [rand_augment_transform(auto_augment, aa_params)] ) raise NotImplementedError def random_sized_crop_img( im, size, jitter_scale=(0.08, 1.0), jitter_aspect=(3.0 / 4.0, 4.0 / 3.0), max_iter=10, ): """ Performs Inception-style cropping (used for training). """ assert ( len(im.shape) == 3 ), "Currently only support image for random_sized_crop" h, w = im.shape[1:3] i, j, h, w = _get_param_spatial_crop( scale=jitter_scale, ratio=jitter_aspect, height=h, width=w, num_repeat=max_iter, log_scale=False, switch_hw=True, ) cropped = im[:, i : i + h, j : j + w] return torch.nn.functional.interpolate( cropped.unsqueeze(0), size=(size, size), mode="bilinear", align_corners=False, ).squeeze(0) # The following code are modified based on timm lib, we will replace the following # contents with dependency from PyTorchVideo. # https://github.com/facebookresearch/pytorchvideo class RandomResizedCropAndInterpolation: """Crop the given PIL Image to random size and aspect ratio with random interpolation. A crop of random size (default: of 0.08 to 1.0) of the original size and a random aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop is finally resized to given size. This is popularly used to train the Inception networks. Args: size: expected output size of each edge scale: range of size of the origin size cropped ratio: range of aspect ratio of the origin aspect ratio cropped interpolation: Default: PIL.Image.BILINEAR """ def __init__( self, size, scale=(0.08, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0), interpolation="bilinear", ): if isinstance(size, tuple): self.size = size else: self.size = (size, size) if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): print("range should be of kind (min, max)") if interpolation == "random": self.interpolation = _RANDOM_INTERPOLATION else: self.interpolation = _pil_interp(interpolation) self.scale = scale self.ratio = ratio @staticmethod def get_params(img, scale, ratio): """Get parameters for ``crop`` for a random sized crop. Args: img (PIL Image): Image to be cropped. scale (tuple): range of size of the origin size cropped ratio (tuple): range of aspect ratio of the origin aspect ratio cropped Returns: tuple: params (i, j, h, w) to be passed to ``crop`` for a random sized crop. """ area = img.size[0] * img.size[1] for _ in range(10): target_area = random.uniform(*scale) * area log_ratio = (math.log(ratio[0]), math.log(ratio[1])) aspect_ratio = math.exp(random.uniform(*log_ratio)) w = int(round(math.sqrt(target_area * aspect_ratio))) h = int(round(math.sqrt(target_area / aspect_ratio))) if w <= img.size[0] and h <= img.size[1]: i = random.randint(0, img.size[1] - h) j = random.randint(0, img.size[0] - w) return i, j, h, w # Fallback to central crop in_ratio = img.size[0] / img.size[1] if in_ratio < min(ratio): w = img.size[0] h = int(round(w / min(ratio))) elif in_ratio > max(ratio): h = img.size[1] w = int(round(h * max(ratio))) else: # whole image w = img.size[0] h = img.size[1] i = (img.size[1] - h) // 2 j = (img.size[0] - w) // 2 return i, j, h, w def __call__(self, img): """ Args: img (PIL Image): Image to be cropped and resized. Returns: PIL Image: Randomly cropped and resized image. """ i, j, h, w = self.get_params(img, self.scale, self.ratio) if isinstance(self.interpolation, (tuple, list)): interpolation = random.choice(self.interpolation) else: interpolation = self.interpolation return F.resized_crop(img, i, j, h, w, self.size, interpolation) def __repr__(self): if isinstance(self.interpolation, (tuple, list)): interpolate_str = " ".join( [_pil_interpolation_to_str[x] for x in self.interpolation] ) else: interpolate_str = _pil_interpolation_to_str[self.interpolation] format_string = self.__class__.__name__ + "(size={0}".format(self.size) format_string += ", scale={0}".format( tuple(round(s, 4) for s in self.scale) ) format_string += ", ratio={0}".format( tuple(round(r, 4) for r in self.ratio) ) format_string += ", interpolation={0})".format(interpolate_str) return format_string def transforms_imagenet_train( img_size=224, scale=None, ratio=None, hflip=0.5, vflip=0.0, color_jitter=0.4, auto_augment=None, interpolation="random", use_prefetcher=False, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), re_prob=0.0, re_mode="const", re_count=1, re_num_splits=0, separate=False, ): """ If separate==True, the transforms are returned as a tuple of 3 separate transforms for use in a mixing dataset that passes * all data through the first (primary) transform, called the 'clean' data * a portion of the data through the secondary transform * normalizes and converts the branches above with the third, final transform """ if isinstance(img_size, tuple): img_size = img_size[-2:] else: img_size = img_size scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range ratio = tuple( ratio or (3.0 / 4.0, 4.0 / 3.0) ) # default imagenet ratio range primary_tfl = [ RandomResizedCropAndInterpolation( img_size, scale=scale, ratio=ratio, interpolation=interpolation ) ] if hflip > 0.0: primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)] if vflip > 0.0: primary_tfl += [transforms.RandomVerticalFlip(p=vflip)] secondary_tfl = [] if auto_augment: assert isinstance(auto_augment, str) if isinstance(img_size, tuple): img_size_min = min(img_size) else: img_size_min = img_size aa_params = dict( translate_const=int(img_size_min * 0.45), img_mean=tuple([min(255, round(255 * x)) for x in mean]), ) if interpolation and interpolation != "random": aa_params["interpolation"] = _pil_interp(interpolation) if auto_augment.startswith("rand"): secondary_tfl += [rand_augment_transform(auto_augment, aa_params)] elif auto_augment.startswith("augmix"): raise NotImplementedError("Augmix not implemented") else: raise NotImplementedError("Auto aug not implemented") elif color_jitter is not None: # color jitter is enabled when not using AA if isinstance(color_jitter, (list, tuple)): # color jitter should be a 3-tuple/list if spec brightness/contrast/saturation # or 4 if also augmenting hue assert len(color_jitter) in (3, 4) else: # if it's a scalar, duplicate for brightness, contrast, and saturation, no hue color_jitter = (float(color_jitter),) * 3 secondary_tfl += [transforms.ColorJitter(*color_jitter)] final_tfl = [] final_tfl += [ transforms.ToTensor(), transforms.Normalize(mean=torch.tensor(mean), std=torch.tensor(std)), ] if re_prob > 0.0: final_tfl.append( RandomErasing( re_prob, mode=re_mode, max_count=re_count, num_splits=re_num_splits, device="cpu", cube=False, ) ) if separate: return ( transforms.Compose(primary_tfl), transforms.Compose(secondary_tfl), transforms.Compose(final_tfl), ) else: return transforms.Compose(primary_tfl + secondary_tfl + final_tfl) ############################################################################################################ ############################################################################################################ class Compose(object): """Composes several transforms Args: transforms (list of ``Transform`` objects): list of transforms to compose """ def __init__(self, transforms): self.transforms = transforms def __call__(self, clip): for t in self.transforms: clip = t(clip) return clip class RandomHorizontalFlip(object): """Horizontally flip the list of given images randomly with a probability 0.5 """ def __call__(self, clip): """ Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Randomly flipped clip """ if random.random() < 0.5: if isinstance(clip[0], np.ndarray): return [np.fliplr(img) for img in clip] elif isinstance(clip[0], PIL.Image.Image): return [ img.transpose(PIL.Image.FLIP_LEFT_RIGHT) for img in clip ] else: raise TypeError('Expected numpy.ndarray or PIL.Image' + ' but got list of {0}'.format(type(clip[0]))) return clip class RandomResize(object): """Resizes a list of (H x W x C) numpy.ndarray to the final size The larger the original image is, the more times it takes to interpolate Args: interpolation (str): Can be one of 'nearest', 'bilinear' defaults to nearest size (tuple): (widht, height) """ def __init__(self, ratio=(3. / 4., 4. / 3.), interpolation='nearest'): self.ratio = ratio self.interpolation = interpolation def __call__(self, clip): scaling_factor = random.uniform(self.ratio[0], self.ratio[1]) if isinstance(clip[0], np.ndarray): im_h, im_w, im_c = clip[0].shape elif isinstance(clip[0], PIL.Image.Image): im_w, im_h = clip[0].size new_w = int(im_w * scaling_factor) new_h = int(im_h * scaling_factor) new_size = (new_w, new_h) resized = FF.resize_clip( clip, new_size, interpolation=self.interpolation) return resized class Resize(object): """Resizes a list of (H x W x C) numpy.ndarray to the final size The larger the original image is, the more times it takes to interpolate Args: interpolation (str): Can be one of 'nearest', 'bilinear' defaults to nearest size (tuple): (widht, height) """ def __init__(self, size, interpolation='nearest'): self.size = size self.interpolation = interpolation def __call__(self, clip): resized = FF.resize_clip( clip, self.size, interpolation=self.interpolation) return resized class RandomCrop(object): """Extract random crop at the same location for a list of images Args: size (sequence or int): Desired output size for the crop in format (h, w) """ def __init__(self, size): if isinstance(size, numbers.Number): size = (size, size) self.size = size def __call__(self, clip): """ Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Cropped list of images """ h, w = self.size if isinstance(clip[0], np.ndarray): im_h, im_w, im_c = clip[0].shape elif isinstance(clip[0], PIL.Image.Image): im_w, im_h = clip[0].size else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) if w > im_w or h > im_h: error_msg = ( 'Initial image size should be larger then ' 'cropped size but got cropped sizes : ({w}, {h}) while ' 'initial image is ({im_w}, {im_h})'.format( im_w=im_w, im_h=im_h, w=w, h=h)) raise ValueError(error_msg) x1 = random.randint(0, im_w - w) y1 = random.randint(0, im_h - h) cropped = FF.crop_clip(clip, y1, x1, h, w) return cropped class ThreeCrop(object): """Extract random crop at the same location for a list of images Args: size (sequence or int): Desired output size for the crop in format (h, w) """ def __init__(self, size): if isinstance(size, numbers.Number): size = (size, size) self.size = size def __call__(self, clip): """ Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Cropped list of images """ h, w = self.size if isinstance(clip[0], np.ndarray): im_h, im_w, im_c = clip[0].shape elif isinstance(clip[0], PIL.Image.Image): im_w, im_h = clip[0].size else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) if w != im_w and h != im_h: clip = FF.resize_clip(clip, self.size, interpolation="bilinear") im_h, im_w, im_c = clip[0].shape step = np.max((np.max((im_w, im_h)) - self.size[0]) // 2, 0) cropped = [] for i in range(3): if (im_h > self.size[0]): x1 = 0 y1 = i * step cropped.extend(FF.crop_clip(clip, y1, x1, h, w)) else: x1 = i * step y1 = 0 cropped.extend(FF.crop_clip(clip, y1, x1, h, w)) return cropped class RandomRotation(object): """Rotate entire clip randomly by a random angle within given bounds Args: degrees (sequence or int): Range of degrees to select from If degrees is a number instead of sequence like (min, max), the range of degrees, will be (-degrees, +degrees). """ def __init__(self, degrees): if isinstance(degrees, numbers.Number): if degrees < 0: raise ValueError('If degrees is a single number,' 'must be positive') degrees = (-degrees, degrees) else: if len(degrees) != 2: raise ValueError('If degrees is a sequence,' 'it must be of len 2.') self.degrees = degrees def __call__(self, clip): """ Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Cropped list of images """ import skimage angle = random.uniform(self.degrees[0], self.degrees[1]) if isinstance(clip[0], np.ndarray): rotated = [skimage.transform.rotate(img, angle) for img in clip] elif isinstance(clip[0], PIL.Image.Image): rotated = [img.rotate(angle) for img in clip] else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) return rotated class CenterCrop(object): """Extract center crop at the same location for a list of images Args: size (sequence or int): Desired output size for the crop in format (h, w) """ def __init__(self, size): if isinstance(size, numbers.Number): size = (size, size) self.size = size def __call__(self, clip): """ Args: img (PIL.Image or numpy.ndarray): List of images to be cropped in format (h, w, c) in numpy.ndarray Returns: PIL.Image or numpy.ndarray: Cropped list of images """ h, w = self.size if isinstance(clip[0], np.ndarray): im_h, im_w, im_c = clip[0].shape elif isinstance(clip[0], PIL.Image.Image): im_w, im_h = clip[0].size else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) if w > im_w or h > im_h: error_msg = ( 'Initial image size should be larger then ' 'cropped size but got cropped sizes : ({w}, {h}) while ' 'initial image is ({im_w}, {im_h})'.format( im_w=im_w, im_h=im_h, w=w, h=h)) raise ValueError(error_msg) x1 = int(round((im_w - w) / 2.)) y1 = int(round((im_h - h) / 2.)) cropped = FF.crop_clip(clip, y1, x1, h, w) return cropped class ColorJitter(object): """Randomly change the brightness, contrast and saturation and hue of the clip Args: brightness (float): How much to jitter brightness. brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]. contrast (float): How much to jitter contrast. contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]. saturation (float): How much to jitter saturation. saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]. hue(float): How much to jitter hue. hue_factor is chosen uniformly from [-hue, hue]. Should be >=0 and <= 0.5. """ def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): self.brightness = brightness self.contrast = contrast self.saturation = saturation self.hue = hue def get_params(self, brightness, contrast, saturation, hue): if brightness > 0: brightness_factor = random.uniform( max(0, 1 - brightness), 1 + brightness) else: brightness_factor = None if contrast > 0: contrast_factor = random.uniform( max(0, 1 - contrast), 1 + contrast) else: contrast_factor = None if saturation > 0: saturation_factor = random.uniform( max(0, 1 - saturation), 1 + saturation) else: saturation_factor = None if hue > 0: hue_factor = random.uniform(-hue, hue) else: hue_factor = None return brightness_factor, contrast_factor, saturation_factor, hue_factor def __call__(self, clip): """ Args: clip (list): list of PIL.Image Returns: list PIL.Image : list of transformed PIL.Image """ if isinstance(clip[0], np.ndarray): raise TypeError( 'Color jitter not yet implemented for numpy arrays') elif isinstance(clip[0], PIL.Image.Image): brightness, contrast, saturation, hue = self.get_params( self.brightness, self.contrast, self.saturation, self.hue) # Create img transform function sequence img_transforms = [] if brightness is not None: img_transforms.append(lambda img: torchvision.transforms.functional.adjust_brightness(img, brightness)) if saturation is not None: img_transforms.append(lambda img: torchvision.transforms.functional.adjust_saturation(img, saturation)) if hue is not None: img_transforms.append(lambda img: torchvision.transforms.functional.adjust_hue(img, hue)) if contrast is not None: img_transforms.append(lambda img: torchvision.transforms.functional.adjust_contrast(img, contrast)) random.shuffle(img_transforms) # Apply to all images jittered_clip = [] for img in clip: for func in img_transforms: jittered_img = func(img) jittered_clip.append(jittered_img) else: raise TypeError('Expected numpy.ndarray or PIL.Image' + 'but got list of {0}'.format(type(clip[0]))) return jittered_clip class Normalize(object): """Normalize a clip with mean and standard deviation. Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform will normalize each channel of the input ``torch.*Tensor`` i.e. ``input[channel] = (input[channel] - mean[channel]) / std[channel]`` .. note:: This transform acts out of place, i.e., it does not mutates the input tensor. Args: mean (sequence): Sequence of means for each channel. std (sequence): Sequence of standard deviations for each channel. """ def __init__(self, mean, std): self.mean = mean self.std = std def __call__(self, clip): """ Args: clip (Tensor): Tensor clip of size (T, C, H, W) to be normalized. Returns: Tensor: Normalized Tensor clip. """ return FF.normalize(clip, self.mean, self.std) def __repr__(self): return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/video_transforms.py
import os import numpy as np from numpy.lib.function_base import disp import torch import decord from PIL import Image from torchvision import transforms from random_erasing import RandomErasing import warnings from decord import VideoReader, cpu from torch.utils.data import Dataset import video_transforms as video_transforms import volume_transforms as volume_transforms class VideoClsDataset(Dataset): """Load your own video classification dataset.""" def __init__(self, anno_path, data_path, mode='train', clip_len=8, frame_sample_rate=2, crop_size=224, short_side_size=256, new_height=256, new_width=340, keep_aspect_ratio=True, num_segment=1, num_crop=1, test_num_segment=10, test_num_crop=3,args=None): self.anno_path = anno_path self.data_path = data_path self.mode = mode self.clip_len = clip_len self.frame_sample_rate = frame_sample_rate self.crop_size = crop_size self.short_side_size = short_side_size self.new_height = new_height self.new_width = new_width self.keep_aspect_ratio = keep_aspect_ratio self.num_segment = num_segment self.test_num_segment = test_num_segment self.num_crop = num_crop self.test_num_crop = test_num_crop self.args = args self.aug = False self.rand_erase = False if self.mode in ['train']: self.aug = True if self.args.reprob > 0: self.rand_erase = True if VideoReader is None: raise ImportError("Unable to import `decord` which is required to read videos.") import pandas as pd cleaned = pd.read_csv(self.anno_path, header=None, delimiter=' ') self.dataset_samples = list(cleaned.values[:, 0]) self.label_array = list(cleaned.values[:, 1]) if (mode == 'train'): pass elif (mode == 'validation'): self.data_transform = video_transforms.Compose([ video_transforms.Resize(self.short_side_size, interpolation='bilinear'), video_transforms.CenterCrop(size=(self.crop_size, self.crop_size)), volume_transforms.ClipToTensor(), video_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) elif mode == 'test': self.data_resize = video_transforms.Compose([ video_transforms.Resize(size=(short_side_size), interpolation='bilinear') ]) self.data_transform = video_transforms.Compose([ volume_transforms.ClipToTensor(), video_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) self.test_seg = [] self.test_dataset = [] self.test_label_array = [] for ck in range(self.test_num_segment): for cp in range(self.test_num_crop): for idx in range(len(self.label_array)): sample_label = self.label_array[idx] self.test_label_array.append(sample_label) self.test_dataset.append(self.dataset_samples[idx]) self.test_seg.append((ck, cp)) def __getitem__(self, index): if self.mode == 'train': args = self.args scale_t = 1 sample = self.dataset_samples[index] buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C if len(buffer) == 0: while len(buffer) == 0: warnings.warn("video {} not correctly loaded during training".format(sample)) index = np.random.randint(self.__len__()) sample = self.dataset_samples[index] buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) if args.num_sample > 1: frame_list = [] label_list = [] index_list = [] for _ in range(args.num_sample): new_frames = self._aug_frame(buffer, args) label = self.label_array[index] frame_list.append(new_frames) label_list.append(label) index_list.append(index) return frame_list, label_list, index_list, {} else: buffer = self._aug_frame(buffer, args) return buffer, self.label_array[index], index, {} elif self.mode == 'validation': sample = self.dataset_samples[index] buffer = self.loadvideo_decord(sample) if len(buffer) == 0: while len(buffer) == 0: warnings.warn("video {} not correctly loaded during validation".format(sample)) index = np.random.randint(self.__len__()) sample = self.dataset_samples[index] buffer = self.loadvideo_decord(sample) buffer = self.data_transform(buffer) return buffer, self.label_array[index], sample.split("/")[-1].split(".")[0] elif self.mode == 'test': sample = self.test_dataset[index] chunk_nb, split_nb = self.test_seg[index] buffer = self.loadvideo_decord(sample) while len(buffer) == 0: warnings.warn("video {}, temporal {}, spatial {} not found during testing".format(\ str(self.test_dataset[index]), chunk_nb, split_nb)) index = np.random.randint(self.__len__()) sample = self.test_dataset[index] chunk_nb, split_nb = self.test_seg[index] buffer = self.loadvideo_decord(sample) buffer = self.data_resize(buffer) if isinstance(buffer, list): buffer = np.stack(buffer, 0) spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \ / (self.test_num_crop - 1) temporal_step = max(1.0 * (buffer.shape[0] - self.clip_len) \ / (self.test_num_segment - 1), 0) temporal_start = int(chunk_nb * temporal_step) spatial_start = int(split_nb * spatial_step) if buffer.shape[1] >= buffer.shape[2]: buffer = buffer[temporal_start:temporal_start + self.clip_len, \ spatial_start:spatial_start + self.short_side_size, :, :] else: buffer = buffer[temporal_start:temporal_start + self.clip_len, \ :, spatial_start:spatial_start + self.short_side_size, :] buffer = self.data_transform(buffer) return buffer, self.test_label_array[index], sample.split("/")[-1].split(".")[0], \ chunk_nb, split_nb else: raise NameError('mode {} unkown'.format(self.mode)) def _aug_frame( self, buffer, args, ): aug_transform = video_transforms.create_random_augment( input_size=(self.crop_size, self.crop_size), auto_augment=args.aa, interpolation=args.train_interpolation, ) buffer = [ transforms.ToPILImage()(frame) for frame in buffer ] buffer = aug_transform(buffer) buffer = [transforms.ToTensor()(img) for img in buffer] buffer = torch.stack(buffer) # T C H W buffer = buffer.permute(0, 2, 3, 1) # T H W C # T H W C buffer = tensor_normalize( buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225] ) # T H W C -> C T H W. buffer = buffer.permute(3, 0, 1, 2) # Perform data augmentation. scl, asp = ( [0.08, 1.0], [0.75, 1.3333], ) buffer = spatial_sampling( buffer, spatial_idx=-1, min_scale=256, max_scale=320, crop_size=self.crop_size, random_horizontal_flip=False if args.data_set == 'SSV2' else True , inverse_uniform_sampling=False, aspect_ratio=asp, scale=scl, motion_shift=False ) if self.rand_erase: erase_transform = RandomErasing( args.reprob, mode=args.remode, max_count=args.recount, num_splits=args.recount, device="cpu", ) buffer = buffer.permute(1, 0, 2, 3) buffer = erase_transform(buffer) buffer = buffer.permute(1, 0, 2, 3) return buffer def loadvideo_decord(self, sample, sample_rate_scale=1): """Load video content using Decord""" fname = sample if not (os.path.exists(fname)): return [] # avoid hanging issue if os.path.getsize(fname) < 1 * 1024: print('SKIP: ', fname, " - ", os.path.getsize(fname)) return [] try: if self.keep_aspect_ratio: vr = VideoReader(fname, num_threads=1, ctx=cpu(0)) else: vr = VideoReader(fname, width=self.new_width, height=self.new_height, num_threads=1, ctx=cpu(0)) except: print("video cannot be loaded by decord: ", fname) return [] if self.mode == 'test': all_index = [x for x in range(0, len(vr), self.frame_sample_rate)] while len(all_index) < self.clip_len: all_index.append(all_index[-1]) vr.seek(0) buffer = vr.get_batch(all_index).asnumpy() return buffer # handle temporal segments converted_len = int(self.clip_len * self.frame_sample_rate) seg_len = len(vr) // self.num_segment all_index = [] for i in range(self.num_segment): if seg_len <= converted_len: index = np.linspace(0, seg_len, num=seg_len // self.frame_sample_rate) index = np.concatenate((index, np.ones(self.clip_len - seg_len // self.frame_sample_rate) * seg_len)) index = np.clip(index, 0, seg_len - 1).astype(np.int64) else: end_idx = np.random.randint(converted_len, seg_len) str_idx = end_idx - converted_len index = np.linspace(str_idx, end_idx, num=self.clip_len) index = np.clip(index, str_idx, end_idx - 1).astype(np.int64) index = index + i*seg_len all_index.extend(list(index)) all_index = all_index[::int(sample_rate_scale)] vr.seek(0) buffer = vr.get_batch(all_index).asnumpy() return buffer def __len__(self): if self.mode != 'test': return len(self.dataset_samples) else: return len(self.test_dataset) def spatial_sampling( frames, spatial_idx=-1, min_scale=256, max_scale=320, crop_size=224, random_horizontal_flip=True, inverse_uniform_sampling=False, aspect_ratio=None, scale=None, motion_shift=False, ): """ Perform spatial sampling on the given video frames. If spatial_idx is -1, perform random scale, random crop, and random flip on the given frames. If spatial_idx is 0, 1, or 2, perform spatial uniform sampling with the given spatial_idx. Args: frames (tensor): frames of images sampled from the video. The dimension is `num frames` x `height` x `width` x `channel`. spatial_idx (int): if -1, perform random spatial sampling. If 0, 1, or 2, perform left, center, right crop if width is larger than height, and perform top, center, buttom crop if height is larger than width. min_scale (int): the minimal size of scaling. max_scale (int): the maximal size of scaling. crop_size (int): the size of height and width used to crop the frames. inverse_uniform_sampling (bool): if True, sample uniformly in [1 / max_scale, 1 / min_scale] and take a reciprocal to get the scale. If False, take a uniform sample from [min_scale, max_scale]. aspect_ratio (list): Aspect ratio range for resizing. scale (list): Scale range for resizing. motion_shift (bool): Whether to apply motion shift for resizing. Returns: frames (tensor): spatially sampled frames. """ assert spatial_idx in [-1, 0, 1, 2] if spatial_idx == -1: if aspect_ratio is None and scale is None: frames, _ = video_transforms.random_short_side_scale_jitter( images=frames, min_size=min_scale, max_size=max_scale, inverse_uniform_sampling=inverse_uniform_sampling, ) frames, _ = video_transforms.random_crop(frames, crop_size) else: transform_func = ( video_transforms.random_resized_crop_with_shift if motion_shift else video_transforms.random_resized_crop ) frames = transform_func( images=frames, target_height=crop_size, target_width=crop_size, scale=scale, ratio=aspect_ratio, ) if random_horizontal_flip: frames, _ = video_transforms.horizontal_flip(0.5, frames) else: # The testing is deterministic and no jitter should be performed. # min_scale, max_scale, and crop_size are expect to be the same. assert len({min_scale, max_scale, crop_size}) == 1 frames, _ = video_transforms.random_short_side_scale_jitter( frames, min_scale, max_scale ) frames, _ = video_transforms.uniform_crop(frames, crop_size, spatial_idx) return frames def tensor_normalize(tensor, mean, std): """ Normalize a given tensor by subtracting the mean and dividing the std. Args: tensor (tensor): tensor to normalize. mean (tensor or list): mean value to subtract. std (tensor or list): std to divide. """ if tensor.dtype == torch.uint8: tensor = tensor.float() tensor = tensor / 255.0 if type(mean) == list: mean = torch.tensor(mean) if type(std) == list: std = torch.tensor(std) tensor = tensor - mean tensor = tensor / std return tensor class VideoMAE(torch.utils.data.Dataset): """Load your own video classification dataset. Parameters ---------- root : str, required. Path to the root folder storing the dataset. setting : str, required. A text file describing the dataset, each line per video sample. There are three items in each line: (1) video path; (2) video length and (3) video label. train : bool, default True. Whether to load the training or validation set. test_mode : bool, default False. Whether to perform evaluation on the test set. Usually there is three-crop or ten-crop evaluation strategy involved. name_pattern : str, default None. The naming pattern of the decoded video frames. For example, img_00012.jpg. video_ext : str, default 'mp4'. If video_loader is set to True, please specify the video format accordinly. is_color : bool, default True. Whether the loaded image is color or grayscale. modality : str, default 'rgb'. Input modalities, we support only rgb video frames for now. Will add support for rgb difference image and optical flow image later. num_segments : int, default 1. Number of segments to evenly divide the video into clips. A useful technique to obtain global video-level information. Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016. num_crop : int, default 1. Number of crops for each image. default is 1. Common choices are three crops and ten crops during evaluation. new_length : int, default 1. The length of input video clip. Default is a single image, but it can be multiple video frames. For example, new_length=16 means we will extract a video clip of consecutive 16 frames. new_step : int, default 1. Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames. new_step=2 means we will extract a video clip of every other frame. temporal_jitter : bool, default False. Whether to temporally jitter if new_step > 1. video_loader : bool, default False. Whether to use video loader to load data. use_decord : bool, default True. Whether to use Decord video loader to load data. Otherwise use mmcv video loader. transform : function, default None. A function that takes data and label and transforms them. data_aug : str, default 'v1'. Different types of data augmentation auto. Supports v1, v2, v3 and v4. lazy_init : bool, default False. If set to True, build a dataset instance without loading any dataset. """ def __init__(self, root, setting, train=True, test_mode=False, name_pattern='img_%05d.jpg', video_ext='mp4', is_color=True, modality='rgb', num_segments=1, num_crop=1, new_length=1, new_step=1, transform=None, temporal_jitter=False, video_loader=False, use_decord=False, lazy_init=False): super(VideoMAE, self).__init__() self.root = root self.setting = setting self.train = train self.test_mode = test_mode self.is_color = is_color self.modality = modality self.num_segments = num_segments self.num_crop = num_crop self.new_length = new_length self.new_step = new_step self.skip_length = self.new_length * self.new_step self.temporal_jitter = temporal_jitter self.name_pattern = name_pattern self.video_loader = video_loader self.video_ext = video_ext self.use_decord = use_decord self.transform = transform self.lazy_init = lazy_init if not self.lazy_init: self.clips = self._make_dataset(root, setting) if len(self.clips) == 0: raise(RuntimeError("Found 0 video clips in subfolders of: " + root + "\n" "Check your data directory (opt.data-dir).")) def __getitem__(self, index): directory, target = self.clips[index] if self.video_loader: if '.' in directory.split('/')[-1]: # data in the "setting" file already have extension, e.g., demo.mp4 video_name = directory else: # data in the "setting" file do not have extension, e.g., demo # So we need to provide extension (i.e., .mp4) to complete the file name. video_name = '{}.{}'.format(directory, self.video_ext) decord_vr = decord.VideoReader(video_name, num_threads=1) duration = len(decord_vr) segment_indices, skip_offsets = self._sample_train_indices(duration) images = self._video_TSN_decord_batch_loader(directory, decord_vr, duration, segment_indices, skip_offsets) process_data, mask = self.transform((images, None)) # T*C,H,W process_data = process_data.view((self.new_length, 3) + process_data.size()[-2:]).transpose(0,1) # T*C,H,W -> T,C,H,W -> C,T,H,W return (process_data, mask) def __len__(self): return len(self.clips) def _make_dataset(self, directory, setting): if not os.path.exists(setting): raise(RuntimeError("Setting file %s doesn't exist. Check opt.train-list and opt.val-list. " % (setting))) clips = [] with open(setting) as split_f: data = split_f.readlines() for line in data: line_info = line.split(' ') # line format: video_path, video_duration, video_label if len(line_info) < 2: raise(RuntimeError('Video input format is not correct, missing one or more element. %s' % line)) clip_path = os.path.join(line_info[0]) target = int(line_info[1]) item = (clip_path, target) clips.append(item) return clips def _sample_train_indices(self, num_frames): average_duration = (num_frames - self.skip_length + 1) // self.num_segments if average_duration > 0: offsets = np.multiply(list(range(self.num_segments)), average_duration) offsets = offsets + np.random.randint(average_duration, size=self.num_segments) elif num_frames > max(self.num_segments, self.skip_length): offsets = np.sort(np.random.randint( num_frames - self.skip_length + 1, size=self.num_segments)) else: offsets = np.zeros((self.num_segments,)) if self.temporal_jitter: skip_offsets = np.random.randint( self.new_step, size=self.skip_length // self.new_step) else: skip_offsets = np.zeros( self.skip_length // self.new_step, dtype=int) return offsets + 1, skip_offsets def _video_TSN_decord_batch_loader(self, directory, video_reader, duration, indices, skip_offsets): sampled_list = [] frame_id_list = [] for seg_ind in indices: offset = int(seg_ind) for i, _ in enumerate(range(0, self.skip_length, self.new_step)): if offset + skip_offsets[i] <= duration: frame_id = offset + skip_offsets[i] - 1 else: frame_id = offset - 1 frame_id_list.append(frame_id) if offset + self.new_step < duration: offset += self.new_step try: video_data = video_reader.get_batch(frame_id_list).asnumpy() sampled_list = [Image.fromarray(video_data[vid, :, :, :]).convert('RGB') for vid, _ in enumerate(frame_id_list)] except: raise RuntimeError('Error occured in reading frames {} from video {} of duration {}.'.format(frame_id_list, directory, duration)) return sampled_list
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/kinetics.py
from functools import partial import numpy as np from typing import Tuple import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import drop_path, to_2tuple, trunc_normal_ from timm.models.registry import register_model from dataclasses import dataclass from alphaction.modeling.poolers import make_3d_pooler import math import pdb def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 400, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } class HR2O_NL(nn.Module): def __init__(self, hidden_dim=512, kernel_size=3, mlp_1x1=False): super(HR2O_NL, self).__init__() self.hidden_dim = hidden_dim padding = kernel_size // 2 self.conv_q = nn.Conv2d(hidden_dim, hidden_dim, kernel_size, padding=padding, bias=False) self.conv_k = nn.Conv2d(hidden_dim, hidden_dim, kernel_size, padding=padding, bias=False) self.conv_v = nn.Conv2d(hidden_dim, hidden_dim, kernel_size, padding=padding, bias=False) self.conv = nn.Conv2d( hidden_dim, hidden_dim, 1 if mlp_1x1 else kernel_size, padding=0 if mlp_1x1 else padding, bias=False ) self.norm = nn.GroupNorm(1, hidden_dim, affine=True) self.dp = nn.Dropout(0.2) def forward(self, x): query = self.conv_q(x).unsqueeze(1) key = self.conv_k(x).unsqueeze(0) att = (query * key).sum(2) / (self.hidden_dim ** 0.5) att = nn.Softmax(dim=1)(att) value = self.conv_v(x) virt_feats = (att.unsqueeze(2) * value).sum(1) virt_feats = self.norm(virt_feats) virt_feats = nn.functional.relu(virt_feats) virt_feats = self.conv(virt_feats) virt_feats = self.dp(virt_feats) x = x + virt_feats return x class ACARHead(nn.Module): def __init__(self, num_classes=60, dropout=0., bias=False, reduce_dim=1024, hidden_dim=512, downsample='max2x2', depth=2, kernel_size=3, mlp_1x1=False): super(ACARHead, self).__init__() # actor-context feature encoder self.conv1 = nn.Conv2d(reduce_dim * 2, hidden_dim, 1, bias=False) self.conv2 = nn.Conv2d(hidden_dim, hidden_dim, 3, bias=False) # down-sampling before HR2O assert downsample in ['none', 'max2x2'] if downsample == 'none': self.downsample = nn.Identity() elif downsample == 'max2x2': self.downsample = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) # high-order relation reasoning operator (HR2O_NL) layers = [] for _ in range(depth): layers.append(HR2O_NL(hidden_dim, kernel_size, mlp_1x1)) self.hr2o = nn.Sequential(*layers) # classification self.gap = nn.AdaptiveAvgPool2d(1) self.fc1 = nn.Linear(reduce_dim, hidden_dim, bias=False) self.fc2 = nn.Linear(hidden_dim * 2, num_classes, bias=bias) if dropout > 0: self.dp = nn.Dropout(dropout) else: self.dp = None # data: features, rois, num_rois, roi_ids, sizes_before_padding # returns: outputs def forward(self, roi_feats, roi_ids, img_features): """ roi_feats: [num_rois, emb_dim] roi_ids: [num_rois] img_features: [bs,emb_dim,t,h,w] """ high_order_feats = [] cur_roi_id = 0 # pdb.set_trace() for idx in range(img_features.shape[0]): # iterate over mini-batch n_rois = roi_ids[idx] if n_rois == 0: continue # eff_h, eff_w = math.ceil(h * sizes_before_padding[idx][1]), math.ceil(w * sizes_before_padding[idx][0]) bg_feats = img_features[idx] bg_feats = bg_feats.unsqueeze(0).repeat((n_rois, 1, 1, 1)) actor_feats = roi_feats[cur_roi_id:cur_roi_id+roi_ids[idx]] cur_roi_id += n_rois tiled_actor_feats = actor_feats.unsqueeze(2).unsqueeze(2).expand_as(bg_feats) interact_feats = torch.cat([bg_feats, tiled_actor_feats], dim=1) interact_feats = self.conv1(interact_feats) interact_feats = nn.functional.relu(interact_feats) interact_feats = self.conv2(interact_feats) interact_feats = nn.functional.relu(interact_feats) interact_feats = self.downsample(interact_feats) interact_feats = self.hr2o(interact_feats) interact_feats = self.gap(interact_feats) high_order_feats.append(interact_feats) high_order_feats = torch.cat(high_order_feats, dim=0).view(np.sum(np.array(roi_ids)), -1) outputs = self.fc1(roi_feats) outputs = nn.functional.relu(outputs) outputs = torch.cat([outputs, high_order_feats], dim=1) if self.dp is not None: outputs = self.dp(outputs) outputs = self.fc2(outputs) return outputs class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) def extra_repr(self) -> str: return 'p={}'.format(self.drop_prob) class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) # x = self.drop(x) # commit this for the orignal BERT implement x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., attn_head_dim=None): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads if attn_head_dim is not None: head_dim = attn_head_dim all_head_dim = head_dim * self.num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) if qkv_bias: self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) else: self.q_bias = None self.v_bias = None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(all_head_dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv_bias = None if self.q_bias is not None: qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) # qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) q = q * self.scale attn = (q @ k.transpose(-2, -1)) attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, -1) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, attn_head_dim=None): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, attn_head_dim=attn_head_dim) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) if init_values > 0: self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)), requires_grad=True) else: self.gamma_1, self.gamma_2 = None, None def forward(self, x): if self.gamma_1 is None: x = x + self.drop_path(self.attn(self.norm1(x))) x = x + self.drop_path(self.mlp(self.norm2(x))) else: x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x))) x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) return x class PatchEmbed(nn.Module): """ Image to Patch Embedding """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, num_frames=16, tubelet_size=2): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.tubelet_size = int(tubelet_size) num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) * ( num_frames // self.tubelet_size) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches self.proj = nn.Conv3d(in_channels=in_chans, out_channels=embed_dim, kernel_size=(self.tubelet_size, patch_size[0], patch_size[1]), stride=(self.tubelet_size, patch_size[0], patch_size[1])) def forward(self, x, **kwargs): # B, C, T, H, W = x.shape # FIXME look at relaxing size constraints # assert H == self.img_size[0] and W == self.img_size[1], \ # f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." # x = self.proj(x).flatten(2).transpose(1, 2) x = self.proj(x) return x # sin-cos position encoding # https://github.com/jadore801120/attention-is-all-you-need-pytorch/blob/master/transformer/Models.py#L31 def get_sinusoid_encoding_table(n_position, d_hid): ''' Sinusoid position encoding table ''' # TODO: make it with torch instead of numpy def get_position_angle_vec(position): return [position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid)] sinusoid_table = np.array([get_position_angle_vec(pos_i) for pos_i in range(n_position)]) sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 return torch.FloatTensor(sinusoid_table).unsqueeze(0) @dataclass class ROIPoolingCfg: POOLER_RESOLUTION: int = 7 POOLER_SCALE: float = 0.0625 POOLER_SAMPLING_RATIO: int = 0 POOLER_TYPE: str = 'align3d' MEAN_BEFORE_POOLER: bool = True class VisionTransformer(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=80, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=0., use_learnable_pos_emb=False, init_scale=0., all_frames=16, tubelet_size=2, head_type='linear', use_mean_pooling=True): super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.tubelet_size = tubelet_size self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, num_frames=all_frames, tubelet_size=self.tubelet_size) num_patches = self.patch_embed.num_patches # 8x14x14 self.grid_size = [img_size//patch_size, img_size//patch_size] # [14,14] self.head_type = head_type if use_learnable_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches, embed_dim)) else: # sine-cosine positional embeddings is on the way self.pos_embed = get_sinusoid_encoding_table(num_patches, embed_dim) self.pos_drop = nn.Dropout(p=drop_rate) dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values) for i in range(depth)]) # self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) # self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None self.norm = norm_layer(embed_dim) # 这一项是预训练权重中没有的 self.fc_norm = None if head_type == 'acar': self.head = ACARHead(num_classes=num_classes, hidden_dim=embed_dim, reduce_dim=embed_dim) if num_classes > 0 else nn.Identity() trunc_normal_(self.head.fc2.weight, std=.02) self.head.fc2.weight.data.mul_(init_scale) # self.head.fc2.bias.data.mul_(init_scale) else: self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() trunc_normal_(self.head.weight, std=.02) self.head.weight.data.mul_(init_scale) self.head.bias.data.mul_(init_scale) # rois setting self.head_cfg = ROIPoolingCfg() self.pooler = make_3d_pooler(self.head_cfg) resolution = self.head_cfg.POOLER_RESOLUTION self.max_pooler = nn.MaxPool2d((resolution, resolution)) self.test_ext = (0.1, 0.05) self.proposal_per_clip = 100 if use_learnable_pos_emb: trunc_normal_(self.pos_embed, std=.02) self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def get_num_layers(self): return len(self.blocks) @torch.jit.ignore def no_weight_decay(self): return {'pos_embed', 'cls_token'} def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x, proposals): x = self.patch_embed(x) B, width, t, h, w = x.size() x = x.flatten(2).transpose(1, 2) # B, _, _ = x.size() if self.pos_embed is not None: # 预测阶段插值 # positional_embedding=[1 8*14*14 768]->[1 8*16*29 768] pos_embed = self.pos_embed.reshape(t, -1, width) pos_embed = interpolate_pos_embed_online( pos_embed, self.grid_size, [h, w], 0).reshape(1, -1, width) x = x + pos_embed.expand(B, -1, -1).type_as(x).to(x.device).clone().detach() x = self.pos_drop(x) for blk in self.blocks: x = blk(x) x = self.norm(x) # [b thw=8x14x14 c=768] # if self.fc_norm is not None: # default # return self.fc_norm(x.mean(1)) # else: # cls_token # return x[:, 0] # rois x = x.reshape(B, t, h, w, -1).permute(0, 4, 1, 2, 3) # [b c t h w] x = x.mean(dim=2, keepdim=False) # [b c h w] rois = self.pooler(x, proposals) # [n c 7 7] rois = self.max_pooler(rois).view(rois.size(0), -1) # [n c] return rois, x def sample_box(self, boxes): proposals = [] num_proposals = self.proposal_per_clip for boxes_per_image in boxes: num_boxes = len(boxes_per_image) if num_boxes > num_proposals: choice_inds = torch.randperm(num_boxes)[:num_proposals] proposals_per_image = boxes_per_image[choice_inds] else: proposals_per_image = boxes_per_image proposals_per_image = proposals_per_image.random_aug(0.2, 0.1, 0.1, 0.05) proposals.append(proposals_per_image) return proposals def forward(self, x, boxes): if self.training: proposals = self.sample_box(boxes) # 暂不考虑训练时限制action数量 else: proposals = [box.extend(self.test_ext) for box in boxes] rois, x = self.forward_features(x, proposals) if self.head_type == 'acar': roi_ids = [len(i) for i in proposals] rois = self.head(rois,roi_ids,x) else: rois = self.head(rois) return rois @register_model def vit_small_patch16_224(pretrained=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=384, depth=12, num_heads=6, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() return model @register_model def vit_base_patch16_224(pretrained=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() return model @register_model def vit_base_patch16_384(pretrained=False, **kwargs): model = VisionTransformer( img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() return model @register_model def vit_large_patch16_224(pretrained=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() return model @register_model def vit_large_patch16_384(pretrained=False, **kwargs): model = VisionTransformer( img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() return model @register_model def vit_large_patch16_512(pretrained=False, **kwargs): model = VisionTransformer( img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() return model @register_model def vit_huge_patch16_224(pretrained=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=1280, depth=32, num_heads=16, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() return model @register_model def vit_giant_patch14_224(pretrained=False, **kwargs): model = VisionTransformer(patch_size=14, embed_dim=1408, depth=40, num_heads=16, mlp_ratio=48 / 11, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() return model def interpolate_pos_embed_online( pos_embed, orig_size: Tuple[int], new_size: Tuple[int], num_extra_tokens: int ): extra_tokens = pos_embed[:, :num_extra_tokens] pos_tokens = pos_embed[:, num_extra_tokens:] embedding_size = pos_tokens.shape[-1] pos_tokens = pos_tokens.reshape( -1, orig_size[0], orig_size[1], embedding_size ).permute(0, 3, 1, 2) pos_tokens = torch.nn.functional.interpolate( pos_tokens, size=new_size, mode="bicubic", align_corners=False, ) pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) return new_pos_embed if __name__ == '__main__': # test forward # create proposal from alphaction.structures.bounding_box import BoxList im_w, im_h = 464, 256 n = 2 xy = torch.zeros([n, 2]) w = torch.rand([n, 1]) * 464 h = torch.rand([n, 1]) * 256 boxes = torch.cat([xy, w, h], dim=1) boxes_tensor = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) # guard against no boxes boxes = BoxList(boxes_tensor, (im_w, im_h), mode="xywh").convert("xyxy") # print(boxes.bbox) bs = 2 t = 16 proposals = [boxes, boxes] # bs=2 x = torch.rand([bs, 3, t, im_h, im_w]) visual_transformer = vit_base_patch16_224(head_type='acar') print(visual_transformer) rois = visual_transformer(x, proposals) print(rois.shape) # [4,num_classes] # pos_embed = get_sinusoid_encoding_table(8*14*14, 384) # print(pos_embed.shape)
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/modeling_finetune.py
import torch from torch import optim as optim from timm.optim.adafactor import Adafactor from timm.optim.adahessian import Adahessian from timm.optim.adamp import AdamP from timm.optim.lookahead import Lookahead from timm.optim.nadam import Nadam from timm.optim.novograd import NovoGrad from timm.optim.nvnovograd import NvNovoGrad from timm.optim.radam import RAdam from timm.optim.rmsprop_tf import RMSpropTF from timm.optim.sgdp import SGDP import json try: from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD has_apex = True except ImportError: has_apex = False def get_num_layer_for_vit(var_name, num_max_layer): if var_name in ("cls_token", "mask_token", "pos_embed"): return 0 elif var_name.startswith("patch_embed") or var_name.startswith("encoder.patch_embed"): return 0 elif var_name.startswith("rel_pos_bias"): return num_max_layer - 1 elif var_name.startswith("blocks") or var_name.startswith("encoder.blocks"): if var_name.startswith("encoder.blocks"): var_name = var_name[8:] layer_id = int(var_name.split('.')[1]) return layer_id + 1 else: return num_max_layer - 1 class LayerDecayValueAssigner(object): def __init__(self, values): self.values = values def get_scale(self, layer_id): return self.values[layer_id] def get_layer_id(self, var_name): return get_num_layer_for_vit(var_name, len(self.values)) def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None, lr_scale=1.0): parameter_group_names = {} parameter_group_vars = {} # 在这里修改区分scale,encoder一个学习率,其他人一个学习率 # layer_decay: 需要加上get_num_layer和get_layer_scale for name, param in model.named_parameters(): if not param.requires_grad: continue # frozen weights if (len(param.shape) == 1 or name.endswith(".bias") or name in skip_list) and name.startswith('encoder.'): group_name = "no_decay_encoder" this_weight_decay = 0. scale = 1.0 elif len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: group_name = "no_decay_others" this_weight_decay = 0. scale = lr_scale elif name.startswith('encoder.'): group_name = "decay_encoder" this_weight_decay = weight_decay scale = 1.0 else: group_name = "decay_others" this_weight_decay = weight_decay scale = lr_scale if get_num_layer is not None: layer_id = get_num_layer(name) group_name = "layer_%d_%s" % (layer_id, group_name) else: layer_id = None if group_name not in parameter_group_names: if get_layer_scale is not None: scale = get_layer_scale(layer_id) * scale parameter_group_names[group_name] = { "weight_decay": this_weight_decay, "params": [], "lr_scale": scale } parameter_group_vars[group_name] = { "weight_decay": this_weight_decay, "params": [], "lr_scale": scale } parameter_group_vars[group_name]["params"].append(param) parameter_group_names[group_name]["params"].append(name) print("Param groups = %s" % json.dumps(parameter_group_names, indent=2)) return list(parameter_group_vars.values()) def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None, lr_scale=1.0): opt_lower = args.opt.lower() weight_decay = args.weight_decay if weight_decay and filter_bias_and_bn: skip = {} if skip_list is not None: skip = skip_list elif hasattr(model, 'no_weight_decay'): skip = model.no_weight_decay() parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale, lr_scale=lr_scale) weight_decay = 0. else: parameters = model.parameters() if 'fused' in opt_lower: assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' opt_args = dict(lr=args.lr, weight_decay=weight_decay) if hasattr(args, 'opt_eps') and args.opt_eps is not None: opt_args['eps'] = args.opt_eps if hasattr(args, 'opt_betas') and args.opt_betas is not None: opt_args['betas'] = args.opt_betas print("optimizer settings:", opt_args) opt_split = opt_lower.split('_') opt_lower = opt_split[-1] if opt_lower == 'sgd' or opt_lower == 'nesterov': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'momentum': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) elif opt_lower == 'adam': optimizer = optim.Adam(parameters, **opt_args) elif opt_lower == 'adamw': optimizer = optim.AdamW(parameters, **opt_args) elif opt_lower == 'nadam': optimizer = Nadam(parameters, **opt_args) elif opt_lower == 'radam': optimizer = RAdam(parameters, **opt_args) elif opt_lower == 'adamp': optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) elif opt_lower == 'sgdp': optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'adadelta': optimizer = optim.Adadelta(parameters, **opt_args) elif opt_lower == 'adafactor': if not args.lr: opt_args['lr'] = None optimizer = Adafactor(parameters, **opt_args) elif opt_lower == 'adahessian': optimizer = Adahessian(parameters, **opt_args) elif opt_lower == 'rmsprop': optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args) elif opt_lower == 'rmsproptf': optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args) elif opt_lower == 'novograd': optimizer = NovoGrad(parameters, **opt_args) elif opt_lower == 'nvnovograd': optimizer = NvNovoGrad(parameters, **opt_args) elif opt_lower == 'fusedsgd': opt_args.pop('eps', None) optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'fusedmomentum': opt_args.pop('eps', None) optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) elif opt_lower == 'fusedadam': optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) elif opt_lower == 'fusedadamw': optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) elif opt_lower == 'fusedlamb': optimizer = FusedLAMB(parameters, **opt_args) elif opt_lower == 'fusednovograd': opt_args.setdefault('betas', (0.95, 0.98)) optimizer = FusedNovoGrad(parameters, **opt_args) else: assert False and "Invalid optimizer" raise ValueError if len(opt_split) > 1: if opt_split[0] == 'lookahead': optimizer = Lookahead(optimizer) return optimizer
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/optim_factory.py
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/alphaction/__init__.py
import torch from torch import nn from torch.autograd import Function from torch.autograd.function import once_differentiable # import alphaction._custom_cuda_ext as _C class _SoftmaxFocalLoss(Function): @staticmethod def forward(ctx, logits, targets, gamma, alpha): ctx.gamma = gamma ctx.alpha = alpha losses, P = _C.softmax_focalloss_forward( logits, targets, gamma, alpha ) ctx.save_for_backward(logits, targets, P) return losses @staticmethod @once_differentiable def backward(ctx, d_loss): logits, targets, P = ctx.saved_tensors gamma = ctx.gamma alpha = ctx.alpha d_logits = _C.softmax_focalloss_backward( logits, targets, P, d_loss, gamma, alpha ) return d_logits, None, None, None def softmax_focal_loss(logits, targets, gamma, alpha, reduction='mean'): assert reduction in ["none", "mean", "sum"], "Unsupported reduction type \"{}\"".format(reduction) logits = logits.float() targets = targets.int() ret = _SoftmaxFocalLoss.apply(logits, targets, gamma, alpha) if reduction != "none": ret = torch.mean(ret) if reduction == "mean" else torch.sum(ret) return ret class SoftmaxFocalLoss(nn.Module): def __init__(self, gamma, alpha, reduction="mean"): super(SoftmaxFocalLoss, self).__init__() self.gamma = gamma self.alpha = alpha self.reduction = reduction def forward(self, logits, targets): loss = softmax_focal_loss(logits, targets, self.gamma, self.alpha, self.reduction) return loss def __repr__(self): tmpstr = self.__class__.__name__ + "(" tmpstr += "gamma=" + str(self.gamma) tmpstr += ", alpha=" + str(self.alpha) tmpstr += ")" return tmpstr
InternVideo-main
Downstream/Spatial-Temporal-Action-Localization/alphaction/layers/softmax_focal_loss.py