|
| 1 | +# model settings |
| 2 | +model = dict( |
| 3 | + type='Recognizer3D', |
| 4 | + backbone=dict( |
| 5 | + type='ResNet3d', |
| 6 | + pretrained2d=True, |
| 7 | + pretrained='torchvision://resnet50', |
| 8 | + depth=50, |
| 9 | + conv_cfg=dict(type='Conv3d'), |
| 10 | + norm_eval=False, |
| 11 | + inflate=((1, 1, 1), (1, 0, 1, 0), (1, 0, 1, 0, 1, 0), (0, 1, 0)), |
| 12 | + zero_init_residual=False), |
| 13 | + cls_head=dict( |
| 14 | + type='I3DHead', |
| 15 | + num_classes=400, |
| 16 | + in_channels=2048, |
| 17 | + spatial_type='avg', |
| 18 | + dropout_ratio=0.5, |
| 19 | + init_std=0.01)) |
| 20 | +# model training and testing settings |
| 21 | +train_cfg = None |
| 22 | +test_cfg = dict(average_clips='prob') |
| 23 | +# dataset settings |
| 24 | +dataset_type = 'VideoDataset' |
| 25 | +data_root = 'data/kinetics400/videos_train' |
| 26 | +data_root_val = 'data/kinetics400/videos_val' |
| 27 | +ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt' |
| 28 | +ann_file_val = 'data/kinetics400/kinetics400_val_list_videos.txt' |
| 29 | +ann_file_test = 'data/kinetics400/kinetics400_val_list_videos.txt' |
| 30 | +img_norm_cfg = dict( |
| 31 | + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False) |
| 32 | +train_pipeline = [ |
| 33 | + dict(type='DecordInit'), |
| 34 | + dict(type='SampleFrames', clip_len=32, frame_interval=2, num_clips=1), |
| 35 | + dict(type='DecordDecode'), |
| 36 | + dict(type='Resize', scale=(-1, 256)), |
| 37 | + dict( |
| 38 | + type='MultiScaleCrop', |
| 39 | + input_size=224, |
| 40 | + scales=(1, 0.8), |
| 41 | + random_crop=False, |
| 42 | + max_wh_scale_gap=0), |
| 43 | + dict(type='Resize', scale=(224, 224), keep_ratio=False), |
| 44 | + dict( |
| 45 | + type='Imgaug', |
| 46 | + transforms=[ |
| 47 | + dict(type='Fliplr', p=0.5), |
| 48 | + dict(type='Rotate', rotate=(-20, 20)), |
| 49 | + dict(type='Dropout', p=(0, 0.05)) |
| 50 | + ]), |
| 51 | + # dict(type='Imgaug', transforms='default'), |
| 52 | + dict(type='Normalize', **img_norm_cfg), |
| 53 | + dict(type='FormatShape', input_format='NCTHW'), |
| 54 | + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), |
| 55 | + dict(type='ToTensor', keys=['imgs', 'label']) |
| 56 | +] |
| 57 | +val_pipeline = [ |
| 58 | + dict(type='DecordInit'), |
| 59 | + dict( |
| 60 | + type='SampleFrames', |
| 61 | + clip_len=32, |
| 62 | + frame_interval=2, |
| 63 | + num_clips=1, |
| 64 | + test_mode=True), |
| 65 | + dict(type='DecordDecode'), |
| 66 | + dict(type='Resize', scale=(-1, 256)), |
| 67 | + dict(type='CenterCrop', crop_size=224), |
| 68 | + dict(type='Flip', flip_ratio=0), |
| 69 | + dict(type='Normalize', **img_norm_cfg), |
| 70 | + dict(type='FormatShape', input_format='NCTHW'), |
| 71 | + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), |
| 72 | + dict(type='ToTensor', keys=['imgs']) |
| 73 | +] |
| 74 | +test_pipeline = [ |
| 75 | + dict(type='DecordInit'), |
| 76 | + dict( |
| 77 | + type='SampleFrames', |
| 78 | + clip_len=32, |
| 79 | + frame_interval=2, |
| 80 | + num_clips=10, |
| 81 | + test_mode=True), |
| 82 | + dict(type='DecordDecode'), |
| 83 | + dict(type='Resize', scale=(-1, 256)), |
| 84 | + dict(type='ThreeCrop', crop_size=256), |
| 85 | + dict(type='Flip', flip_ratio=0), |
| 86 | + dict(type='Normalize', **img_norm_cfg), |
| 87 | + dict(type='FormatShape', input_format='NCTHW'), |
| 88 | + dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]), |
| 89 | + dict(type='ToTensor', keys=['imgs']) |
| 90 | +] |
| 91 | +data = dict( |
| 92 | + videos_per_gpu=8, |
| 93 | + workers_per_gpu=4, |
| 94 | + train=dict( |
| 95 | + type=dataset_type, |
| 96 | + ann_file=ann_file_train, |
| 97 | + data_prefix=data_root, |
| 98 | + pipeline=train_pipeline), |
| 99 | + val=dict( |
| 100 | + type=dataset_type, |
| 101 | + ann_file=ann_file_val, |
| 102 | + data_prefix=data_root_val, |
| 103 | + pipeline=val_pipeline), |
| 104 | + test=dict( |
| 105 | + type=dataset_type, |
| 106 | + ann_file=ann_file_val, |
| 107 | + data_prefix=data_root_val, |
| 108 | + pipeline=test_pipeline)) |
| 109 | +# optimizer |
| 110 | +optimizer = dict( |
| 111 | + type='SGD', lr=0.01, momentum=0.9, |
| 112 | + weight_decay=0.0001) # this lr is used for 8 gpus |
| 113 | +optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) |
| 114 | +# learning policy |
| 115 | +lr_config = dict(policy='step', step=[40, 80]) |
| 116 | +total_epochs = 100 |
| 117 | +checkpoint_config = dict(interval=5) |
| 118 | +evaluation = dict( |
| 119 | + interval=5, metrics=['top_k_accuracy', 'mean_class_accuracy']) |
| 120 | +log_config = dict( |
| 121 | + interval=20, |
| 122 | + hooks=[ |
| 123 | + dict(type='TextLoggerHook'), |
| 124 | + # dict(type='TensorboardLoggerHook'), |
| 125 | + ]) |
| 126 | +# runtime settings |
| 127 | +dist_params = dict(backend='nccl') |
| 128 | +log_level = 'INFO' |
| 129 | +work_dir = './work_dirs/i3d_r50_video_3d_32x2x1_100e_kinetics400_rgb/' |
| 130 | +load_from = None |
| 131 | +resume_from = None |
| 132 | +workflow = [('train', 1)] |
0 commit comments