diff --git a/configs/datasets/high-quality-fall_runner_k400-hyperparams.py b/configs/datasets/high-quality-fall_runner_k400-hyperparams.py
new file mode 100644
index 0000000..29bddfd
--- /dev/null
+++ b/configs/datasets/high-quality-fall_runner_k400-hyperparams.py
@@ -0,0 +1,113 @@
+"""Base `Runner` config for high-quality-fall dataset."""
+
+dataset_type = "HighQualityFallDataset"
+
+label_strategy = dict(
+ type="PriorityLabel",
+ label_description=dict(
+ names=["fall", "lying", "other"],
+ start_timestamp_names=["fall_start", "lying_start"],
+ end_timestamp_names=["fall_end", "lying_end"],
+ visible_names=["fall_visible", "lying_visible"],
+ other_class=2,
+ ),
+)
+
+sampling_strategy = dict(type="UniformSampling", clip_len=10)
+
+
+# TRAIN
+ann_file_train = "data/Fall_Simulation_Data/annotations_train.csv"
+
+# TODO: Add shape comments
+# TODO: Think about augmentation steps
+train_pipeline = [
+ dict(type="DecordInit"),
+ dict(type="ClipVideo"),
+ dict(type="SampleFrames", clip_len=16, frame_interval=4, num_clips=1),
+ dict(type="DecordDecode"),
+ dict(type="Resize", scale=(-1, 224)),
+ dict(type="RandomCrop", size=224),
+ dict(type="Resize", scale=(224, 224), keep_ratio=False),
+ dict(type="Flip", flip_ratio=0.5),
+ dict(type="FormatShape", input_format="NCTHW"),
+ dict(type="PackActionInputs"),
+]
+
+train_dataloader = dict(
+ batch_size=12, # From VideoMAEv2 repo
+ num_workers=8,
+ persistent_workers=False,
+ sampler=dict(type="DefaultSampler", shuffle=True),
+ dataset=dict(
+ type=dataset_type,
+ sampling_strategy=sampling_strategy,
+ label_strategy=label_strategy,
+ ann_file=ann_file_train,
+ pipeline=train_pipeline,
+ num_classes=3,
+ # indices=100,
+ ),
+)
+
+# VALIDATION
+ann_file_val = "data/Fall_Simulation_Data/annotations_val.csv"
+
+val_pipeline = [
+ dict(type="DecordInit"),
+ dict(type="ClipVideo"),
+ dict(
+ type="SampleFrames", clip_len=16, frame_interval=4, num_clips=1, test_mode=True
+ ),
+ dict(type="DecordDecode"),
+ dict(type="Resize", scale=(-1, 224)),
+ dict(type="CenterCrop", crop_size=224), # From VideoMAEv2 repo
+ dict(type="FormatShape", input_format="NCTHW"),
+ dict(type="PackActionInputs"),
+]
+
+# val_dataloader = train_dataloader
+val_dataloader = dict(
+ batch_size=12, # From VideoMAEv2 repo
+ num_workers=8,
+ persistent_workers=False,
+ sampler=dict(type="DefaultSampler", shuffle=False),
+ dataset=dict(
+ type=dataset_type,
+ sampling_strategy=sampling_strategy,
+ label_strategy=label_strategy,
+ ann_file=ann_file_val,
+ pipeline=val_pipeline,
+ num_classes=3,
+ ),
+)
+
+# TEST
+ann_file_test = "data/Fall_Simulation_Data/annotations_test.csv"
+
+test_pipeline = [
+ dict(type="DecordInit"),
+ dict(
+ type="SampleFrames", clip_len=16, frame_interval=4, num_clips=5, test_mode=True
+ ), # From VideoMAEv2 repo
+ dict(type="DecordDecode"),
+ dict(type="Resize", scale=(-1, 224)),
+ dict(type="ThreeCrop", crop_size=224), # From VideoMAEv2 repo
+ dict(type="FormatShape", input_format="NCTHW"),
+ dict(type="PackActionInputs"),
+]
+
+test_dataloader = dict(
+ batch_size=1, # From VideoMAEv2 repo
+ num_workers=8,
+ persistent_workers=False,
+ sampler=dict(type="DefaultSampler", shuffle=False),
+ dataset=dict(
+ type=dataset_type,
+ sampling_strategy=sampling_strategy,
+ label_strategy=label_strategy,
+ ann_file=ann_file_test,
+ pipeline=test_pipeline,
+ num_classes=3,
+ ),
+)
diff --git a/configs/experiments/overfitting_run.py b/configs/experiments/overfitting_run.py
index 06c3afc..06800f4 100644
--- a/configs/experiments/overfitting_run.py
+++ b/configs/experiments/overfitting_run.py
@@ -1,5 +1,5 @@
_base_ = [
- "../models/vit-s-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_base.py"
+ "../models/vit-s-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_k400-hyperparams.py"
]
EXPERIMENT_NAME = "overfitting_run"
@@ -10,6 +10,7 @@
# Overrides
train_dataloader = dict(
+ batch_size=1,
sampler=dict(type="DefaultSampler", shuffle=False),
dataset=dict(
indices=100,
@@ -19,6 +20,9 @@
ann_file_val = "data/Fall_Simulation_Data/annotations_train.csv"
val_dataloader = dict(
+ num_workers=0,
+ persistent_workers=False,
+ batch_size=1,
dataset=dict(
ann_file=ann_file_val,
indices=100,
@@ -26,4 +30,4 @@
)
default_hooks = dict(checkpoint=dict(interval=0))
-custom_hooks = [dict(type="CustomVisualizationHook", enable=True, interval=10)]
+custom_hooks = [dict(type="CustomVisualizationHook", enable=True, interval=1)]
diff --git a/configs/experiments/vit-b_frame-int-8_gaussian-sampling-5s-clips-30-drop_priority-labeling_k400-hyperparams.py b/configs/experiments/vit-b_frame-int-8_gaussian-sampling-5s-clips-30-drop_priority-labeling_k400-hyperparams.py
new file mode 100644
index 0000000..a1bf91a
--- /dev/null
+++ b/configs/experiments/vit-b_frame-int-8_gaussian-sampling-5s-clips-30-drop_priority-labeling_k400-hyperparams.py
@@ -0,0 +1,78 @@
+_base_ = [
+ "../models/vit-s-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_k400-hyperparams.py"
+]
+
+EXPERIMENT_NAME = "vit-b_frame-int-8_gaussian-sampling-5s-clips-30-drop_priority-labeling_k400-hyperparams"
+visualizer = dict(
+ vis_backends=dict(save_dir=f"experiments/tensorboard/{EXPERIMENT_NAME}/")
+)
+work_dir = f"experiments/{EXPERIMENT_NAME}"
+
+# Overrides
+default_hooks = dict(checkpoint=dict(interval=1))
+
+# 1487 samples in val -> 92 batches per node -> We want around 10 images
+custom_hooks = [dict(type="CustomVisualizationHook", enable=True, interval=300)]
+
+# Use ViT-B/16
+model = dict(
+ backbone=dict(embed_dims=768, depth=12, num_heads=12),
+ cls_head=dict(in_channels=768),
+)
+load_from = "weights/vit-base-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_20230510-3e7f93b2.pth"
+
+# Use frame_interval 8
+train_pipeline = [
+ dict(type="DecordInit"),
+ dict(type="ClipVideo"),
+ dict(
+ type="SampleFrames", clip_len=16, frame_interval=8, num_clips=1
+ ), # This has changed
+ dict(type="DecordDecode"),
+ dict(type="Resize", scale=(-1, 224)),
+ dict(type="RandomCrop", size=224),
+ dict(type="Resize", scale=(224, 224), keep_ratio=False),
+ dict(type="Flip", flip_ratio=0.5),
+ dict(type="FormatShape", input_format="NCTHW"),
+ dict(type="PackActionInputs"),
+]
+
+
+# Use Gaussian sampling
+train_dataloader = dict(
+ dataset=dict(
+ sampling_strategy=dict(
+ type="GaussianSampling",
+ clip_len=5,
+ fallback_sampler=dict(
+ type="UniformSampling", clip_len=5, stride=5, overlap=False
+ ),
+ ),
+ drop_ratios=[0.0, 0.0, 0.30],
+ pipeline=train_pipeline,
+ )
+)
+# We are not changing the val/test dataloaders since gaussian sampling requires labels
+# and we cannot have a valid validation if we use labels in the preprocessing
+
+val_pipeline = [
+ dict(type="DecordInit"),
+ dict(type="ClipVideo"),
+ dict(
+ type="SampleFrames", clip_len=16, frame_interval=8, num_clips=1, test_mode=True
+ ),
+ dict(type="DecordDecode"),
+ dict(type="Resize", scale=(-1, 224)),
+ dict(type="CenterCrop", crop_size=224), # From VideoMAEv2 repo
+ dict(type="FormatShape", input_format="NCTHW"),
+ dict(type="PackActionInputs"),
+]
+
+val_dataloader = dict(
+ dataset=dict(
+ sampling_strategy=dict(
+ type="UniformSampling", clip_len=5, stride=0, overlap=False
+ ),
+ pipeline=val_pipeline,
+ ),
+)
diff --git a/configs/experiments/vit-b_gaussian-sampling-5s-clips-30-drop_priority-labeling_k400-hyperparams.py b/configs/experiments/vit-b_gaussian-sampling-5s-clips-30-drop_priority-labeling_k400-hyperparams.py
new file mode 100644
index 0000000..b1d81d7
--- /dev/null
+++ b/configs/experiments/vit-b_gaussian-sampling-5s-clips-30-drop_priority-labeling_k400-hyperparams.py
@@ -0,0 +1,47 @@
+_base_ = [
+ "../models/vit-s-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_k400-hyperparams.py"
+]
+
+EXPERIMENT_NAME = (
+ "vit-b_gaussian-sampling-5s-clips-30-drop_priority-labeling_k400-hyperparams"
+)
+visualizer = dict(
+ vis_backends=dict(save_dir=f"experiments/tensorboard/{EXPERIMENT_NAME}/")
+)
+work_dir = f"experiments/{EXPERIMENT_NAME}"
+
+# Overrides
+default_hooks = dict(checkpoint=dict(interval=1))
+
+# 1487 samples in val -> 92 batches per node -> We want around 10 images
+custom_hooks = [dict(type="CustomVisualizationHook", enable=True, interval=300)]
+
+# Use ViT-B/16
+model = dict(
+ backbone=dict(embed_dims=768, depth=12, num_heads=12),
+ cls_head=dict(in_channels=768),
+)
+load_from = "weights/vit-base-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_20230510-3e7f93b2.pth"
+
+# Use Gaussian sampling
+train_dataloader = dict(
+ dataset=dict(
+ sampling_strategy=dict(
+ type="GaussianSampling",
+ clip_len=5,
+ fallback_sampler=dict(
+ type="UniformSampling", clip_len=5, stride=5, overlap=False
+ ),
+ ),
+ drop_ratios=[0.0, 0.0, 0.30],
+ )
+)
+# We are not changing the val/test dataloaders since gaussian sampling requires labels
+# and we cannot have a valid validation if we use labels in the preprocessing
+val_dataloader = dict(
+ dataset=dict(
+ sampling_strategy=dict(
+ type="UniformSampling", clip_len=5, stride=0, overlap=False
+ ),
+ ),
+)
diff --git a/configs/experiments/vit-b_gaussian-sampling_priority-labeling_k400-hyperparams.py b/configs/experiments/vit-b_gaussian-sampling_priority-labeling_k400-hyperparams.py
new file mode 100644
index 0000000..3cc96a5
--- /dev/null
+++ b/configs/experiments/vit-b_gaussian-sampling_priority-labeling_k400-hyperparams.py
@@ -0,0 +1,29 @@
+_base_ = [
+ "../models/vit-s-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_k400-hyperparams.py"
+]
+
+EXPERIMENT_NAME = "vit-b_gaussian-sampling_priority-labeling_k400-hyperparams"
+visualizer = dict(
+ vis_backends=dict(save_dir=f"experiments/tensorboard/{EXPERIMENT_NAME}/")
+)
+work_dir = f"experiments/{EXPERIMENT_NAME}"
+
+# Overrides
+default_hooks = dict(checkpoint=dict(interval=1))
+
+# 1487 samples in val -> 92 batches per node -> We want around 10 images
+custom_hooks = [dict(type="CustomVisualizationHook", enable=True, interval=150)]
+
+# Use ViT-B/16
+model = dict(
+ backbone=dict(embed_dims=768, depth=12, num_heads=12),
+ cls_head=dict(in_channels=768),
+)
+load_from = "weights/vit-base-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_20230510-3e7f93b2.pth"
+
+# Use Gaussian sampling
+train_dataloader = dict(
+ dataset=dict(sampling_strategy=dict(type="GaussianSampling", clip_len=10))
+)
+# We are not changing the val/test dataloaders since gaussian sampling requires labels
+# and we cannot have a valid validation if we use labels in the preprocessing
diff --git a/configs/experiments/vit-b_gaussian-sampling_priority-labeling_paper-hyperparams.py b/configs/experiments/vit-b_gaussian-sampling_priority-labeling_paper-hyperparams.py
new file mode 100644
index 0000000..03352ac
--- /dev/null
+++ b/configs/experiments/vit-b_gaussian-sampling_priority-labeling_paper-hyperparams.py
@@ -0,0 +1,30 @@
+_base_ = [
+ "../models/vit-s-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_base.py"
+]
+
+EXPERIMENT_NAME = "vit-b_gaussian-sampling_priority-labeling_paper-hyperparams"
+visualizer = dict(
+ vis_backends=dict(save_dir=f"experiments/tensorboard/{EXPERIMENT_NAME}/")
+)
+work_dir = f"experiments/{EXPERIMENT_NAME}"
+
+# Overrides
+default_hooks = dict(checkpoint=dict(interval=3))
+
+# 1487 samples in val -> 372 per node -> 124 batches per node -> We want around 10 images
+# -> Interval = 124 / 10 = 12
+custom_hooks = [dict(type="CustomVisualizationHook", enable=True, interval=10)]
+
+# Use ViT-B/16
+model = dict(
+ backbone=dict(embed_dims=768, depth=12, num_heads=12),
+ cls_head=dict(in_channels=768),
+)
+load_from = "weights/vit-base-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_20230510-3e7f93b2.pth"
+
+# Use Gaussian sampling
+train_dataloader = dict(
+ dataset=dict(sampling_strategy=dict(type="GaussianSampling", clip_len=10))
+)
+# We are not changing the val/test dataloaders since gaussian sampling requires labels
+# and we cannot have a valid validation if we use labels in the preprocessing
diff --git a/configs/experiments/vit-b_gaussian-sampling_priority-labeling_paper-hyperparams_weighted-ce-loss.py b/configs/experiments/vit-b_gaussian-sampling_priority-labeling_paper-hyperparams_weighted-ce-loss.py
new file mode 100644
index 0000000..e892cf2
--- /dev/null
+++ b/configs/experiments/vit-b_gaussian-sampling_priority-labeling_paper-hyperparams_weighted-ce-loss.py
@@ -0,0 +1,40 @@
+_base_ = [
+ "../models/vit-s-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_base.py"
+]
+
+EXPERIMENT_NAME = (
+ "vit-b_gaussian-sampling_priority-labeling_paper-hyperparams_weighted-ce-loss"
+)
+visualizer = dict(
+ vis_backends=dict(save_dir=f"experiments/tensorboard/{EXPERIMENT_NAME}/")
+)
+work_dir = f"experiments/{EXPERIMENT_NAME}"
+
+# Overrides
+default_hooks = dict(checkpoint=dict(interval=3))
+
+# 1487 samples in val -> 372 per node -> 124 batches per node -> We want around 10 images
+# -> Interval = 124 / 10 = 12
+custom_hooks = [dict(type="CustomVisualizationHook", enable=True, interval=10)]
+
+# Use ViT-B/16
+# Add weighted CE loss
+# weight_for_class_i = total_samples / (num_samples_in_class_i * num_classes)
+model = dict(
+ backbone=dict(embed_dims=768, depth=12, num_heads=12),
+ cls_head=dict(
+ in_channels=768,
+ loss_cls=dict(
+ type="CrossEntropyLoss",
+ class_weight=[26.38235294117647, 37.901408450704224, 3.7168508287292816],
+ ),
+ ),
+)
+load_from = "weights/vit-base-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_20230510-3e7f93b2.pth"
+
+# Use Gaussian sampling
+train_dataloader = dict(
+ dataset=dict(sampling_strategy=dict(type="GaussianSampling", clip_len=10))
+)
+# We are not changing the val/test dataloaders since gaussian sampling requires labels
+# and we cannot have a valid validation if we use labels in the preprocessing
diff --git a/configs/models/vit-s-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_k400-hyperparams.py b/configs/models/vit-s-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_k400-hyperparams.py
new file mode 100644
index 0000000..cbcf715
--- /dev/null
+++ b/configs/models/vit-s-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_k400-hyperparams.py
@@ -0,0 +1,416 @@
+_base_ = [
+ "../default_runtime.py",
+ "../datasets/high-quality-fall_runner_k400-hyperparams.py",
+]
+
+# Finetuning parameters are from VideoMAEv2 repo
+# https://github.com/OpenGVLab/VideoMAEv2/blob/master/scripts/finetune/vit_b_k400_ft.sh
+
+
+# ViT-S-P16
+model = dict(
+ type="Recognizer3D",
+ backbone=dict(
+ type="VisionTransformer",
+ img_size=224,
+ patch_size=16,
+ embed_dims=384,
+ depth=12,
+ num_heads=6,
+ mlp_ratio=4,
+ qkv_bias=True,
+ num_frames=16,
+ norm_cfg=dict(type="LN", eps=1e-6),
+ drop_path_rate=0.3, # From VideoMAEv2 repo
+ ),
+ cls_head=dict(
+ type="TimeSformerHead",
+ num_classes=3,
+ in_channels=384,
+ average_clips="prob",
+ topk=(1,),
+ ),
+ data_preprocessor=dict(
+ type="ActionDataPreprocessor",
+ mean=[102.17311096191406, 98.78225708007812, 92.68714141845703],
+ std=[58.04566192626953, 57.004024505615234, 57.3704948425293],
+ format_shape="NCTHW",
+ ),
+)
+
+# Loading weights
+load_from = "weights/vit-small-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_20230510-25c748fd.pth"
+
+# TRAINING CONFIG
+train_cfg = dict(type="EpochBasedTrainLoop", max_epochs=90, val_interval=1)
+
+# TODO: Think about fine-tuning param scheduler
+param_scheduler = [
+ dict(
+ type="LinearLR",
+ by_epoch=True,
+ convert_to_iter_based=True,
+ start_factor=1e-3,
+ end_factor=1,
+ begin=0,
+ end=5,
+ ), # From VideoMAEv2 repo - Warmup
+ dict(
+ type="CosineAnnealingLR",
+ by_epoch=True,
+ convert_to_iter_based=True,
+ eta_min=1e-6,
+ begin=5,
+ end=35,
+ ),
+]
+
+auto_scale_lr = dict(enable=True, base_batch_size=256)
+
+# Layer Decay and Weight Decay module configs
+vit_b_layer_decay_75_custom_keys = {
+ "backbone.patch_embed.projection.weight": {
+ "lr_mult": 0.023757264018058777,
+ "decay_mult": 1,
+ },
+ "backbone.patch_embed.projection.bias": {
+ "lr_mult": 0.023757264018058777,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.0.norm1.weight": {"lr_mult": 0.03167635202407837, "decay_mult": 0},
+ "backbone.blocks.0.norm1.bias": {"lr_mult": 0.03167635202407837, "decay_mult": 0},
+ "backbone.blocks.0.attn.q_bias": {"lr_mult": 0.03167635202407837, "decay_mult": 0},
+ "backbone.blocks.0.attn.v_bias": {"lr_mult": 0.03167635202407837, "decay_mult": 0},
+ "backbone.blocks.0.attn.proj.bias": {
+ "lr_mult": 0.03167635202407837,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.0.norm2.weight": {"lr_mult": 0.03167635202407837, "decay_mult": 0},
+ "backbone.blocks.0.norm2.bias": {"lr_mult": 0.03167635202407837, "decay_mult": 0},
+ "backbone.blocks.0.mlp.layers.0.0.bias": {
+ "lr_mult": 0.03167635202407837,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.0.mlp.layers.1.bias": {
+ "lr_mult": 0.03167635202407837,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.0.attn.qkv.weight": {
+ "lr_mult": 0.03167635202407837,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.0.attn.proj.weight": {
+ "lr_mult": 0.03167635202407837,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.0.mlp.layers.0.0.weight": {
+ "lr_mult": 0.03167635202407837,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.0.mlp.layers.1.weight": {
+ "lr_mult": 0.03167635202407837,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.1.norm1.weight": {"lr_mult": 0.04223513603210449, "decay_mult": 0},
+ "backbone.blocks.1.norm1.bias": {"lr_mult": 0.04223513603210449, "decay_mult": 0},
+ "backbone.blocks.1.attn.q_bias": {"lr_mult": 0.04223513603210449, "decay_mult": 0},
+ "backbone.blocks.1.attn.v_bias": {"lr_mult": 0.04223513603210449, "decay_mult": 0},
+ "backbone.blocks.1.attn.proj.bias": {
+ "lr_mult": 0.04223513603210449,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.1.norm2.weight": {"lr_mult": 0.04223513603210449, "decay_mult": 0},
+ "backbone.blocks.1.norm2.bias": {"lr_mult": 0.04223513603210449, "decay_mult": 0},
+ "backbone.blocks.1.mlp.layers.0.0.bias": {
+ "lr_mult": 0.04223513603210449,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.1.mlp.layers.1.bias": {
+ "lr_mult": 0.04223513603210449,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.1.attn.qkv.weight": {
+ "lr_mult": 0.04223513603210449,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.1.attn.proj.weight": {
+ "lr_mult": 0.04223513603210449,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.1.mlp.layers.0.0.weight": {
+ "lr_mult": 0.04223513603210449,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.1.mlp.layers.1.weight": {
+ "lr_mult": 0.04223513603210449,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.2.norm1.weight": {
+ "lr_mult": 0.056313514709472656,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.2.norm1.bias": {"lr_mult": 0.056313514709472656, "decay_mult": 0},
+ "backbone.blocks.2.attn.q_bias": {"lr_mult": 0.056313514709472656, "decay_mult": 0},
+ "backbone.blocks.2.attn.v_bias": {"lr_mult": 0.056313514709472656, "decay_mult": 0},
+ "backbone.blocks.2.attn.proj.bias": {
+ "lr_mult": 0.056313514709472656,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.2.norm2.weight": {
+ "lr_mult": 0.056313514709472656,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.2.norm2.bias": {"lr_mult": 0.056313514709472656, "decay_mult": 0},
+ "backbone.blocks.2.mlp.layers.0.0.bias": {
+ "lr_mult": 0.056313514709472656,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.2.mlp.layers.1.bias": {
+ "lr_mult": 0.056313514709472656,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.2.attn.qkv.weight": {
+ "lr_mult": 0.056313514709472656,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.2.attn.proj.weight": {
+ "lr_mult": 0.056313514709472656,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.2.mlp.layers.0.0.weight": {
+ "lr_mult": 0.056313514709472656,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.2.mlp.layers.1.weight": {
+ "lr_mult": 0.056313514709472656,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.3.norm1.weight": {"lr_mult": 0.07508468627929688, "decay_mult": 0},
+ "backbone.blocks.3.norm1.bias": {"lr_mult": 0.07508468627929688, "decay_mult": 0},
+ "backbone.blocks.3.attn.q_bias": {"lr_mult": 0.07508468627929688, "decay_mult": 0},
+ "backbone.blocks.3.attn.v_bias": {"lr_mult": 0.07508468627929688, "decay_mult": 0},
+ "backbone.blocks.3.attn.proj.bias": {
+ "lr_mult": 0.07508468627929688,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.3.norm2.weight": {"lr_mult": 0.07508468627929688, "decay_mult": 0},
+ "backbone.blocks.3.norm2.bias": {"lr_mult": 0.07508468627929688, "decay_mult": 0},
+ "backbone.blocks.3.mlp.layers.0.0.bias": {
+ "lr_mult": 0.07508468627929688,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.3.mlp.layers.1.bias": {
+ "lr_mult": 0.07508468627929688,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.3.attn.qkv.weight": {
+ "lr_mult": 0.07508468627929688,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.3.attn.proj.weight": {
+ "lr_mult": 0.07508468627929688,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.3.mlp.layers.0.0.weight": {
+ "lr_mult": 0.07508468627929688,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.3.mlp.layers.1.weight": {
+ "lr_mult": 0.07508468627929688,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.4.norm1.weight": {"lr_mult": 0.1001129150390625, "decay_mult": 0},
+ "backbone.blocks.4.norm1.bias": {"lr_mult": 0.1001129150390625, "decay_mult": 0},
+ "backbone.blocks.4.attn.q_bias": {"lr_mult": 0.1001129150390625, "decay_mult": 0},
+ "backbone.blocks.4.attn.v_bias": {"lr_mult": 0.1001129150390625, "decay_mult": 0},
+ "backbone.blocks.4.attn.proj.bias": {
+ "lr_mult": 0.1001129150390625,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.4.norm2.weight": {"lr_mult": 0.1001129150390625, "decay_mult": 0},
+ "backbone.blocks.4.norm2.bias": {"lr_mult": 0.1001129150390625, "decay_mult": 0},
+ "backbone.blocks.4.mlp.layers.0.0.bias": {
+ "lr_mult": 0.1001129150390625,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.4.mlp.layers.1.bias": {
+ "lr_mult": 0.1001129150390625,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.4.attn.qkv.weight": {
+ "lr_mult": 0.1001129150390625,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.4.attn.proj.weight": {
+ "lr_mult": 0.1001129150390625,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.4.mlp.layers.0.0.weight": {
+ "lr_mult": 0.1001129150390625,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.4.mlp.layers.1.weight": {
+ "lr_mult": 0.1001129150390625,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.5.norm1.weight": {"lr_mult": 0.13348388671875, "decay_mult": 0},
+ "backbone.blocks.5.norm1.bias": {"lr_mult": 0.13348388671875, "decay_mult": 0},
+ "backbone.blocks.5.attn.q_bias": {"lr_mult": 0.13348388671875, "decay_mult": 0},
+ "backbone.blocks.5.attn.v_bias": {"lr_mult": 0.13348388671875, "decay_mult": 0},
+ "backbone.blocks.5.attn.proj.bias": {"lr_mult": 0.13348388671875, "decay_mult": 0},
+ "backbone.blocks.5.norm2.weight": {"lr_mult": 0.13348388671875, "decay_mult": 0},
+ "backbone.blocks.5.norm2.bias": {"lr_mult": 0.13348388671875, "decay_mult": 0},
+ "backbone.blocks.5.mlp.layers.0.0.bias": {
+ "lr_mult": 0.13348388671875,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.5.mlp.layers.1.bias": {
+ "lr_mult": 0.13348388671875,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.5.attn.qkv.weight": {"lr_mult": 0.13348388671875, "decay_mult": 1},
+ "backbone.blocks.5.attn.proj.weight": {
+ "lr_mult": 0.13348388671875,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.5.mlp.layers.0.0.weight": {
+ "lr_mult": 0.13348388671875,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.5.mlp.layers.1.weight": {
+ "lr_mult": 0.13348388671875,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.6.norm1.weight": {"lr_mult": 0.177978515625, "decay_mult": 0},
+ "backbone.blocks.6.norm1.bias": {"lr_mult": 0.177978515625, "decay_mult": 0},
+ "backbone.blocks.6.attn.q_bias": {"lr_mult": 0.177978515625, "decay_mult": 0},
+ "backbone.blocks.6.attn.v_bias": {"lr_mult": 0.177978515625, "decay_mult": 0},
+ "backbone.blocks.6.attn.proj.bias": {"lr_mult": 0.177978515625, "decay_mult": 0},
+ "backbone.blocks.6.norm2.weight": {"lr_mult": 0.177978515625, "decay_mult": 0},
+ "backbone.blocks.6.norm2.bias": {"lr_mult": 0.177978515625, "decay_mult": 0},
+ "backbone.blocks.6.mlp.layers.0.0.bias": {
+ "lr_mult": 0.177978515625,
+ "decay_mult": 0,
+ },
+ "backbone.blocks.6.mlp.layers.1.bias": {"lr_mult": 0.177978515625, "decay_mult": 0},
+ "backbone.blocks.6.attn.qkv.weight": {"lr_mult": 0.177978515625, "decay_mult": 1},
+ "backbone.blocks.6.attn.proj.weight": {"lr_mult": 0.177978515625, "decay_mult": 1},
+ "backbone.blocks.6.mlp.layers.0.0.weight": {
+ "lr_mult": 0.177978515625,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.6.mlp.layers.1.weight": {
+ "lr_mult": 0.177978515625,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.7.norm1.weight": {"lr_mult": 0.2373046875, "decay_mult": 0},
+ "backbone.blocks.7.norm1.bias": {"lr_mult": 0.2373046875, "decay_mult": 0},
+ "backbone.blocks.7.attn.q_bias": {"lr_mult": 0.2373046875, "decay_mult": 0},
+ "backbone.blocks.7.attn.v_bias": {"lr_mult": 0.2373046875, "decay_mult": 0},
+ "backbone.blocks.7.attn.proj.bias": {"lr_mult": 0.2373046875, "decay_mult": 0},
+ "backbone.blocks.7.norm2.weight": {"lr_mult": 0.2373046875, "decay_mult": 0},
+ "backbone.blocks.7.norm2.bias": {"lr_mult": 0.2373046875, "decay_mult": 0},
+ "backbone.blocks.7.mlp.layers.0.0.bias": {"lr_mult": 0.2373046875, "decay_mult": 0},
+ "backbone.blocks.7.mlp.layers.1.bias": {"lr_mult": 0.2373046875, "decay_mult": 0},
+ "backbone.blocks.7.attn.qkv.weight": {"lr_mult": 0.2373046875, "decay_mult": 1},
+ "backbone.blocks.7.attn.proj.weight": {"lr_mult": 0.2373046875, "decay_mult": 1},
+ "backbone.blocks.7.mlp.layers.0.0.weight": {
+ "lr_mult": 0.2373046875,
+ "decay_mult": 1,
+ },
+ "backbone.blocks.7.mlp.layers.1.weight": {"lr_mult": 0.2373046875, "decay_mult": 1},
+ "backbone.blocks.8.norm1.weight": {"lr_mult": 0.31640625, "decay_mult": 0},
+ "backbone.blocks.8.norm1.bias": {"lr_mult": 0.31640625, "decay_mult": 0},
+ "backbone.blocks.8.attn.q_bias": {"lr_mult": 0.31640625, "decay_mult": 0},
+ "backbone.blocks.8.attn.v_bias": {"lr_mult": 0.31640625, "decay_mult": 0},
+ "backbone.blocks.8.attn.proj.bias": {"lr_mult": 0.31640625, "decay_mult": 0},
+ "backbone.blocks.8.norm2.weight": {"lr_mult": 0.31640625, "decay_mult": 0},
+ "backbone.blocks.8.norm2.bias": {"lr_mult": 0.31640625, "decay_mult": 0},
+ "backbone.blocks.8.mlp.layers.0.0.bias": {"lr_mult": 0.31640625, "decay_mult": 0},
+ "backbone.blocks.8.mlp.layers.1.bias": {"lr_mult": 0.31640625, "decay_mult": 0},
+ "backbone.blocks.8.attn.qkv.weight": {"lr_mult": 0.31640625, "decay_mult": 1},
+ "backbone.blocks.8.attn.proj.weight": {"lr_mult": 0.31640625, "decay_mult": 1},
+ "backbone.blocks.8.mlp.layers.0.0.weight": {"lr_mult": 0.31640625, "decay_mult": 1},
+ "backbone.blocks.8.mlp.layers.1.weight": {"lr_mult": 0.31640625, "decay_mult": 1},
+ "backbone.blocks.9.norm1.weight": {"lr_mult": 0.421875, "decay_mult": 0},
+ "backbone.blocks.9.norm1.bias": {"lr_mult": 0.421875, "decay_mult": 0},
+ "backbone.blocks.9.attn.q_bias": {"lr_mult": 0.421875, "decay_mult": 0},
+ "backbone.blocks.9.attn.v_bias": {"lr_mult": 0.421875, "decay_mult": 0},
+ "backbone.blocks.9.attn.proj.bias": {"lr_mult": 0.421875, "decay_mult": 0},
+ "backbone.blocks.9.norm2.weight": {"lr_mult": 0.421875, "decay_mult": 0},
+ "backbone.blocks.9.norm2.bias": {"lr_mult": 0.421875, "decay_mult": 0},
+ "backbone.blocks.9.mlp.layers.0.0.bias": {"lr_mult": 0.421875, "decay_mult": 0},
+ "backbone.blocks.9.mlp.layers.1.bias": {"lr_mult": 0.421875, "decay_mult": 0},
+ "backbone.blocks.9.attn.qkv.weight": {"lr_mult": 0.421875, "decay_mult": 1},
+ "backbone.blocks.9.attn.proj.weight": {"lr_mult": 0.421875, "decay_mult": 1},
+ "backbone.blocks.9.mlp.layers.0.0.weight": {"lr_mult": 0.421875, "decay_mult": 1},
+ "backbone.blocks.9.mlp.layers.1.weight": {"lr_mult": 0.421875, "decay_mult": 1},
+ "backbone.blocks.10.norm1.weight": {"lr_mult": 0.5625, "decay_mult": 0},
+ "backbone.blocks.10.norm1.bias": {"lr_mult": 0.5625, "decay_mult": 0},
+ "backbone.blocks.10.attn.q_bias": {"lr_mult": 0.5625, "decay_mult": 0},
+ "backbone.blocks.10.attn.v_bias": {"lr_mult": 0.5625, "decay_mult": 0},
+ "backbone.blocks.10.attn.proj.bias": {"lr_mult": 0.5625, "decay_mult": 0},
+ "backbone.blocks.10.norm2.weight": {"lr_mult": 0.5625, "decay_mult": 0},
+ "backbone.blocks.10.norm2.bias": {"lr_mult": 0.5625, "decay_mult": 0},
+ "backbone.blocks.10.mlp.layers.0.0.bias": {"lr_mult": 0.5625, "decay_mult": 0},
+ "backbone.blocks.10.mlp.layers.1.bias": {"lr_mult": 0.5625, "decay_mult": 0},
+ "backbone.blocks.10.attn.qkv.weight": {"lr_mult": 0.5625, "decay_mult": 1},
+ "backbone.blocks.10.attn.proj.weight": {"lr_mult": 0.5625, "decay_mult": 1},
+ "backbone.blocks.10.mlp.layers.0.0.weight": {"lr_mult": 0.5625, "decay_mult": 1},
+ "backbone.blocks.10.mlp.layers.1.weight": {"lr_mult": 0.5625, "decay_mult": 1},
+ "backbone.blocks.11.norm1.weight": {"lr_mult": 0.75, "decay_mult": 0},
+ "backbone.blocks.11.norm1.bias": {"lr_mult": 0.75, "decay_mult": 0},
+ "backbone.blocks.11.attn.q_bias": {"lr_mult": 0.75, "decay_mult": 0},
+ "backbone.blocks.11.attn.v_bias": {"lr_mult": 0.75, "decay_mult": 0},
+ "backbone.blocks.11.attn.proj.bias": {"lr_mult": 0.75, "decay_mult": 0},
+ "backbone.blocks.11.norm2.weight": {"lr_mult": 0.75, "decay_mult": 0},
+ "backbone.blocks.11.norm2.bias": {"lr_mult": 0.75, "decay_mult": 0},
+ "backbone.blocks.11.mlp.layers.0.0.bias": {"lr_mult": 0.75, "decay_mult": 0},
+ "backbone.blocks.11.mlp.layers.1.bias": {"lr_mult": 0.75, "decay_mult": 0},
+ "backbone.blocks.11.attn.qkv.weight": {"lr_mult": 0.75, "decay_mult": 1},
+ "backbone.blocks.11.attn.proj.weight": {"lr_mult": 0.75, "decay_mult": 1},
+ "backbone.blocks.11.mlp.layers.0.0.weight": {"lr_mult": 0.75, "decay_mult": 1},
+ "backbone.blocks.11.mlp.layers.1.weight": {"lr_mult": 0.75, "decay_mult": 1},
+ "backbone.fc_norm.weight": {"lr_mult": 1.0, "decay_mult": 0},
+ "backbone.fc_norm.bias": {"lr_mult": 1.0, "decay_mult": 0},
+ "cls_head.fc_cls.bias": {"lr_mult": 1.0, "decay_mult": 0},
+ "cls_head.fc_cls.weight": {"lr_mult": 1.0, "decay_mult": 1},
+}
+
+
+optim_wrapper = dict(
+ type="AmpOptimWrapper", # Automatic Mixed Precision may speed up trainig
+ optimizer=dict(
+ type="AdamW", # From VideoMAEv2 repo
+ lr=7e-4, # From VideoMAEv2 repo
+ weight_decay=0.05, # From VideoMAEv2 repo
+ betas=(0.9, 0.999), # From VideoMAEv2 repo
+ ),
+ paramwise_cfg=dict(custom_keys=vit_b_layer_decay_75_custom_keys),
+ # clip_grad=dict(max_norm=5, norm_type=2), # From VideoMAEv2 repo
+)
+
+# VALIDATION CONFIG
+val_evaluator = dict(
+ type="AddAccMetric",
+ metric_list=(
+ "unweighted_average_f1",
+ "per_class_f1",
+ "per_class_precision",
+ "per_class_recall",
+ ),
+)
+val_cfg = dict(type="ValLoop")
+
+
+# TEST CONFIG
+test_evaluator = dict(
+ type="AddAccMetric",
+ metric_list=(
+ "unweighted_average_f1",
+ "per_class_f1",
+ "per_class_precision",
+ "per_class_recall",
+ ),
+)
+test_cfg = dict(type="TestLoop")
diff --git a/experiments.dvc b/experiments.dvc
index 98f6b2a..ff1b824 100644
--- a/experiments.dvc
+++ b/experiments.dvc
@@ -1,6 +1,6 @@
outs:
-- md5: 004d25dfcdbec8b9a95e429079227b93.dir
- size: 1022832432
- nfiles: 9
+- md5: 55075530cd6a7d51a35547b6eebafda0.dir
+ size: 21652483819
+ nfiles: 97
hash: md5
path: experiments
diff --git a/job_scripts/vit-b_frame-int-8_gaussian-sampling-5s-clips-30-drop_priority-labeling_k400-hyperparams.sh b/job_scripts/vit-b_frame-int-8_gaussian-sampling-5s-clips-30-drop_priority-labeling_k400-hyperparams.sh
new file mode 100644
index 0000000..07c096e
--- /dev/null
+++ b/job_scripts/vit-b_frame-int-8_gaussian-sampling-5s-clips-30-drop_priority-labeling_k400-hyperparams.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+#SBATCH -A NAISS2023-22-1160 -p alvis
+#SBATCH -N 1 --gpus-per-node=A40:1
+#SBATCH --time=48:00:00
+
+apptainer exec \
+ --env PYTHONPATH=$(pwd) \
+ containers/c3se_job_container.sif \
+ python mmaction2/tools/train.py \
+ configs/experiments/vit-b_frame-int-8_gaussian-sampling-5s-clips-30-drop_priority-labeling_k400-hyperparams.py
\ No newline at end of file
diff --git a/job_scripts/vit-b_gaussian-sampling-5s-clips-30-drop_priority-labeling_k400-hyperparams.sh b/job_scripts/vit-b_gaussian-sampling-5s-clips-30-drop_priority-labeling_k400-hyperparams.sh
new file mode 100644
index 0000000..0fafc47
--- /dev/null
+++ b/job_scripts/vit-b_gaussian-sampling-5s-clips-30-drop_priority-labeling_k400-hyperparams.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+#SBATCH -A NAISS2023-22-1160 -p alvis
+#SBATCH -N 1 --gpus-per-node=A40:1
+#SBATCH --time=48:00:00
+
+apptainer exec \
+ --env PYTHONPATH=$(pwd) \
+ containers/c3se_job_container.sif \
+ python mmaction2/tools/train.py \
+ configs/experiments/vit-b_gaussian-sampling-5s-clips-30-drop_priority-labeling_k400-hyperparams.py
\ No newline at end of file
diff --git a/job_scripts/vit-b_gaussian-sampling_priority-labeling_k400-hyperparams.sh b/job_scripts/vit-b_gaussian-sampling_priority-labeling_k400-hyperparams.sh
new file mode 100644
index 0000000..fc47fdd
--- /dev/null
+++ b/job_scripts/vit-b_gaussian-sampling_priority-labeling_k400-hyperparams.sh
@@ -0,0 +1,11 @@
+#!/usr/bin/env bash
+#SBATCH -A NAISS2023-22-1160 -p alvis
+#SBATCH -N 1 --gpus-per-node=A40:1
+#SBATCH --time=24:00:00
+
+apptainer exec \
+ --env PYTHONPATH=$(pwd) \
+ containers/c3se_job_container.sif \
+ python mmaction2/tools/train.py \
+ configs/experiments/vit-b_gaussian-sampling_priority-labeling_k400-hyperparams.py \
+ --resume auto
\ No newline at end of file
diff --git a/job_scripts/vit-b_gaussian-sampling_priority-labeling_paper-hyperparams.sh b/job_scripts/vit-b_gaussian-sampling_priority-labeling_paper-hyperparams.sh
new file mode 100644
index 0000000..ce03cdb
--- /dev/null
+++ b/job_scripts/vit-b_gaussian-sampling_priority-labeling_paper-hyperparams.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+#SBATCH -A NAISS2023-22-1160 -p alvis
+#SBATCH -N 1 --gpus-per-node=A40:4
+#SBATCH --time=24:00:00
+
+apptainer exec \
+ --env PYTHONPATH=$(pwd) \
+ containers/c3se_job_container.sif \
+ python -m torch.distributed.launch --nproc_per_node=4 \
+ mmaction2/tools/train.py \
+ configs/experiments/vit-b_gaussian-sampling_priority-labeling_paper-hyperparams.py \
+ --launcher pytorch --resume auto
\ No newline at end of file
diff --git a/job_scripts/vit-b_gaussian-sampling_priority-labeling_paper-hyperparams_weighted-ce-loss.sh b/job_scripts/vit-b_gaussian-sampling_priority-labeling_paper-hyperparams_weighted-ce-loss.sh
new file mode 100644
index 0000000..0603a02
--- /dev/null
+++ b/job_scripts/vit-b_gaussian-sampling_priority-labeling_paper-hyperparams_weighted-ce-loss.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+#SBATCH -A NAISS2023-22-1160 -p alvis
+#SBATCH -N 1 --gpus-per-node=A40:4
+#SBATCH --time=24:00:00
+
+apptainer exec \
+ --env PYTHONPATH=$(pwd) \
+ containers/c3se_job_container.sif \
+ python -m torch.distributed.launch --nproc_per_node=4 \
+ mmaction2/tools/train.py \
+ configs/experiments/vit-b_gaussian-sampling_priority-labeling_paper-hyperparams_weighted-ce-loss.py \
+ --launcher pytorch
\ No newline at end of file
diff --git a/notebooks/custom_keys_optimizer.ipynb b/notebooks/custom_keys_optimizer.ipynb
new file mode 100644
index 0000000..31557cb
--- /dev/null
+++ b/notebooks/custom_keys_optimizer.ipynb
@@ -0,0 +1,938 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Custom Keys Optimizer\n",
+ "\n",
+ "Here we create the custom keys dictionary for the runner config.\n",
+ "It is necessary to get layer decay."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "LAYER_DECAY = 0.75\n",
+ "MODEL_DEPTH = 12\n",
+ "BASE_WEIGHT_DECAY = 0.05"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 8,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "12/04 22:32:34 - mmengine - \u001b[4m\u001b[97mINFO\u001b[0m - \n",
+ "------------------------------------------------------------\n",
+ "System environment:\n",
+ " sys.platform: darwin\n",
+ " Python: 3.10.13 | packaged by conda-forge | (main, Oct 26 2023, 18:09:17) [Clang 16.0.6 ]\n",
+ " CUDA available: False\n",
+ " numpy_random_seed: 104644062\n",
+ " GCC: Apple clang version 15.0.0 (clang-1500.0.40.1)\n",
+ " PyTorch: 2.1.1\n",
+ " PyTorch compiling details: PyTorch built with:\n",
+ " - GCC 4.2\n",
+ " - C++ Version: 201703\n",
+ " - clang 13.1.6\n",
+ " - LAPACK is enabled (usually provided by MKL)\n",
+ " - NNPACK is enabled\n",
+ " - CPU capability usage: NO AVX\n",
+ " - Build settings: BLAS_INFO=accelerate, BUILD_TYPE=Release, CXX_COMPILER=/Applications/Xcode_13.3.1.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/bin/clang++, CXX_FLAGS= -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -DNDEBUG -DUSE_KINETO -DLIBKINETO_NOCUPTI -DLIBKINETO_NOROCTRACER -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -DUSE_PYTORCH_METAL_EXPORT -DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DUSE_COREML_DELEGATE -O2 -fPIC -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor -Werror=braced-scalar-init -Werror=range-loop-construct -Werror=bool-operation -Wnarrowing -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-unused-parameter -Wno-unused-function -Wno-unused-result -Wno-strict-overflow -Wno-strict-aliasing -Wvla-extension -Wnewline-eof -Winconsistent-missing-override -Winconsistent-missing-destructor-override -Wno-range-loop-analysis -Wno-pass-failed -Wsuggest-override -Wno-error=pedantic -Wno-error=old-style-cast -Wno-error=inconsistent-missing-override -Wno-error=inconsistent-missing-destructor-override -Wconstant-conversion -Wno-invalid-partial-specialization -Wno-unused-private-field -Wno-missing-braces -Wunused-lambda-capture -Qunused-arguments -fcolor-diagnostics -faligned-new -Wno-unused-but-set-variable -fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type -DUSE_MPS -Wno-unused-private-field -Wno-missing-braces, LAPACK_INFO=accelerate, TORCH_DISABLE_GPU_ASSERTS=OFF, TORCH_VERSION=2.1.1, USE_CUDA=0, USE_CUDNN=OFF, USE_EIGEN_FOR_BLAS=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=OFF, USE_MKLDNN=OFF, USE_MPI=OFF, USE_NCCL=OFF, USE_NNPACK=ON, USE_OPENMP=OFF, USE_ROCM=OFF, \n",
+ "\n",
+ " TorchVision: 0.16.1\n",
+ " OpenCV: 4.8.1\n",
+ " MMEngine: 0.10.1\n",
+ "\n",
+ "Runtime environment:\n",
+ " cudnn_benchmark: False\n",
+ " mp_cfg: {'mp_start_method': 'fork', 'opencv_num_threads': 0}\n",
+ " dist_cfg: {'backend': 'nccl'}\n",
+ " seed: 104644062\n",
+ " Distributed launcher: none\n",
+ " Distributed training: False\n",
+ " GPU number: 1\n",
+ "------------------------------------------------------------\n",
+ "\n",
+ "12/04 22:32:34 - mmengine - \u001b[4m\u001b[97mINFO\u001b[0m - Config:\n",
+ "ann_file_test = 'data/Fall_Simulation_Data/annotations_test.csv'\n",
+ "ann_file_train = 'data/Fall_Simulation_Data/annotations_train.csv'\n",
+ "ann_file_val = 'data/Fall_Simulation_Data/annotations_val.csv'\n",
+ "custom_hooks = [\n",
+ " dict(enable=True, type='CustomVisualizationHook'),\n",
+ "]\n",
+ "custom_imports = dict(\n",
+ " allow_failed_imports=False,\n",
+ " imports=[\n",
+ " 'datasets',\n",
+ " 'evaluation',\n",
+ " 'visualization',\n",
+ " ])\n",
+ "dataset_type = 'HighQualityFallDataset'\n",
+ "default_hooks = dict(\n",
+ " checkpoint=dict(\n",
+ " by_epoch=True,\n",
+ " interval=3,\n",
+ " max_keep_ckpts=3,\n",
+ " save_best='auto',\n",
+ " type='CheckpointHook'),\n",
+ " logger=dict(type='LoggerHook'),\n",
+ " param_scheduler=dict(type='ParamSchedulerHook'),\n",
+ " runtime_info=dict(type='RuntimeInfoHook'),\n",
+ " sampler_seed=dict(type='DistSamplerSeedHook'),\n",
+ " sync_buffers=dict(type='SyncBuffersHook'),\n",
+ " timer=dict(type='IterTimerHook'))\n",
+ "default_scope = 'mmaction'\n",
+ "env_cfg = dict(\n",
+ " cudnn_benchmark=False,\n",
+ " dist_cfg=dict(backend='nccl'),\n",
+ " mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0))\n",
+ "label_strategy = dict(\n",
+ " label_description=dict(\n",
+ " end_timestamp_names=[\n",
+ " 'fall_end',\n",
+ " 'lying_end',\n",
+ " ],\n",
+ " names=[\n",
+ " 'fall',\n",
+ " 'lying',\n",
+ " 'other',\n",
+ " ],\n",
+ " other_class=2,\n",
+ " start_timestamp_names=[\n",
+ " 'fall_start',\n",
+ " 'lying_start',\n",
+ " ],\n",
+ " visible_names=[\n",
+ " 'fall_visible',\n",
+ " 'lying_visible',\n",
+ " ]),\n",
+ " type='PriorityLabel')\n",
+ "launcher = 'none'\n",
+ "load_from = 'weights/vit-small-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_20230510-25c748fd.pth'\n",
+ "log_level = 'INFO'\n",
+ "log_processor = dict(by_epoch=True, type='LogProcessor', window_size=10)\n",
+ "model = dict(\n",
+ " backbone=dict(\n",
+ " depth=12,\n",
+ " drop_path_rate=0.3,\n",
+ " embed_dims=384,\n",
+ " img_size=224,\n",
+ " mlp_ratio=4,\n",
+ " norm_cfg=dict(eps=1e-06, type='LN'),\n",
+ " num_frames=16,\n",
+ " num_heads=6,\n",
+ " patch_size=16,\n",
+ " qkv_bias=True,\n",
+ " type='VisionTransformer'),\n",
+ " cls_head=dict(\n",
+ " average_clips='prob',\n",
+ " in_channels=384,\n",
+ " num_classes=3,\n",
+ " topk=(1, ),\n",
+ " type='TimeSformerHead'),\n",
+ " data_preprocessor=dict(\n",
+ " format_shape='NCTHW',\n",
+ " mean=[\n",
+ " 102.17311096191406,\n",
+ " 98.78225708007812,\n",
+ " 92.68714141845703,\n",
+ " ],\n",
+ " std=[\n",
+ " 58.04566192626953,\n",
+ " 57.004024505615234,\n",
+ " 57.3704948425293,\n",
+ " ],\n",
+ " type='ActionDataPreprocessor'),\n",
+ " type='Recognizer3D')\n",
+ "optim_wrapper = dict(\n",
+ " clip_grad=dict(max_norm=5, norm_type=2),\n",
+ " optimizer=dict(\n",
+ " betas=(\n",
+ " 0.9,\n",
+ " 0.999,\n",
+ " ), lr=0.001, type='AdamW', weight_decay=0.1),\n",
+ " type='AmpOptimWrapper')\n",
+ "param_scheduler = [\n",
+ " dict(\n",
+ " begin=0,\n",
+ " by_epoch=True,\n",
+ " convert_to_iter_based=True,\n",
+ " end=5,\n",
+ " end_factor=1,\n",
+ " start_factor=0.001,\n",
+ " type='LinearLR'),\n",
+ " dict(\n",
+ " begin=5,\n",
+ " by_epoch=True,\n",
+ " convert_to_iter_based=True,\n",
+ " end=35,\n",
+ " eta_min=1e-06,\n",
+ " type='CosineAnnealingLR'),\n",
+ "]\n",
+ "resume = False\n",
+ "sampling_strategy = dict(clip_len=10, type='UniformSampling')\n",
+ "test_cfg = dict(type='TestLoop')\n",
+ "test_dataloader = dict(\n",
+ " batch_size=3,\n",
+ " dataset=dict(\n",
+ " ann_file='data/Fall_Simulation_Data/annotations_test.csv',\n",
+ " label_strategy=dict(\n",
+ " label_description=dict(\n",
+ " end_timestamp_names=[\n",
+ " 'fall_end',\n",
+ " 'lying_end',\n",
+ " ],\n",
+ " names=[\n",
+ " 'fall',\n",
+ " 'lying',\n",
+ " 'other',\n",
+ " ],\n",
+ " other_class=2,\n",
+ " start_timestamp_names=[\n",
+ " 'fall_start',\n",
+ " 'lying_start',\n",
+ " ],\n",
+ " visible_names=[\n",
+ " 'fall_visible',\n",
+ " 'lying_visible',\n",
+ " ]),\n",
+ " type='PriorityLabel'),\n",
+ " num_classes=3,\n",
+ " pipeline=[\n",
+ " dict(type='DecordInit'),\n",
+ " dict(\n",
+ " clip_len=16,\n",
+ " frame_interval=4,\n",
+ " num_clips=5,\n",
+ " test_mode=True,\n",
+ " type='SampleFrames'),\n",
+ " dict(type='DecordDecode'),\n",
+ " dict(scale=(\n",
+ " -1,\n",
+ " 224,\n",
+ " ), type='Resize'),\n",
+ " dict(crop_size=224, type='ThreeCrop'),\n",
+ " dict(input_format='NCTHW', type='FormatShape'),\n",
+ " dict(type='PackActionInputs'),\n",
+ " ],\n",
+ " sampling_strategy=dict(clip_len=10, type='UniformSampling'),\n",
+ " type='HighQualityFallDataset'),\n",
+ " num_workers=8,\n",
+ " persistent_workers=True,\n",
+ " sampler=dict(shuffle=False, type='DefaultSampler'))\n",
+ "test_evaluator = dict(\n",
+ " metric_list=(\n",
+ " 'unweighted_average_f1',\n",
+ " 'per_class_f1',\n",
+ " 'per_class_precision',\n",
+ " 'per_class_recall',\n",
+ " ),\n",
+ " type='AddAccMetric')\n",
+ "test_pipeline = [\n",
+ " dict(type='DecordInit'),\n",
+ " dict(\n",
+ " clip_len=16,\n",
+ " frame_interval=4,\n",
+ " num_clips=5,\n",
+ " test_mode=True,\n",
+ " type='SampleFrames'),\n",
+ " dict(type='DecordDecode'),\n",
+ " dict(scale=(\n",
+ " -1,\n",
+ " 224,\n",
+ " ), type='Resize'),\n",
+ " dict(crop_size=224, type='ThreeCrop'),\n",
+ " dict(input_format='NCTHW', type='FormatShape'),\n",
+ " dict(type='PackActionInputs'),\n",
+ "]\n",
+ "train_cfg = dict(max_epochs=35, type='EpochBasedTrainLoop', val_interval=1)\n",
+ "train_dataloader = dict(\n",
+ " batch_size=3,\n",
+ " dataset=dict(\n",
+ " ann_file='data/Fall_Simulation_Data/annotations_train.csv',\n",
+ " label_strategy=dict(\n",
+ " label_description=dict(\n",
+ " end_timestamp_names=[\n",
+ " 'fall_end',\n",
+ " 'lying_end',\n",
+ " ],\n",
+ " names=[\n",
+ " 'fall',\n",
+ " 'lying',\n",
+ " 'other',\n",
+ " ],\n",
+ " other_class=2,\n",
+ " start_timestamp_names=[\n",
+ " 'fall_start',\n",
+ " 'lying_start',\n",
+ " ],\n",
+ " visible_names=[\n",
+ " 'fall_visible',\n",
+ " 'lying_visible',\n",
+ " ]),\n",
+ " type='PriorityLabel'),\n",
+ " num_classes=3,\n",
+ " pipeline=[\n",
+ " dict(type='DecordInit'),\n",
+ " dict(type='ClipVideo'),\n",
+ " dict(\n",
+ " clip_len=16,\n",
+ " frame_interval=4,\n",
+ " num_clips=1,\n",
+ " type='SampleFrames'),\n",
+ " dict(type='DecordDecode'),\n",
+ " dict(scale=(\n",
+ " -1,\n",
+ " 224,\n",
+ " ), type='Resize'),\n",
+ " dict(size=224, type='RandomCrop'),\n",
+ " dict(keep_ratio=False, scale=(\n",
+ " 224,\n",
+ " 224,\n",
+ " ), type='Resize'),\n",
+ " dict(flip_ratio=0.5, type='Flip'),\n",
+ " dict(input_format='NCTHW', type='FormatShape'),\n",
+ " dict(type='PackActionInputs'),\n",
+ " ],\n",
+ " sampling_strategy=dict(clip_len=10, type='UniformSampling'),\n",
+ " type='HighQualityFallDataset'),\n",
+ " num_workers=8,\n",
+ " persistent_workers=True,\n",
+ " sampler=dict(shuffle=True, type='DefaultSampler'))\n",
+ "train_pipeline = [\n",
+ " dict(type='DecordInit'),\n",
+ " dict(type='ClipVideo'),\n",
+ " dict(clip_len=16, frame_interval=4, num_clips=1, type='SampleFrames'),\n",
+ " dict(type='DecordDecode'),\n",
+ " dict(scale=(\n",
+ " -1,\n",
+ " 224,\n",
+ " ), type='Resize'),\n",
+ " dict(size=224, type='RandomCrop'),\n",
+ " dict(keep_ratio=False, scale=(\n",
+ " 224,\n",
+ " 224,\n",
+ " ), type='Resize'),\n",
+ " dict(flip_ratio=0.5, type='Flip'),\n",
+ " dict(input_format='NCTHW', type='FormatShape'),\n",
+ " dict(type='PackActionInputs'),\n",
+ "]\n",
+ "val_cfg = dict(type='ValLoop')\n",
+ "val_dataloader = dict(\n",
+ " batch_size=3,\n",
+ " dataset=dict(\n",
+ " ann_file='data/Fall_Simulation_Data/annotations_val.csv',\n",
+ " label_strategy=dict(\n",
+ " label_description=dict(\n",
+ " end_timestamp_names=[\n",
+ " 'fall_end',\n",
+ " 'lying_end',\n",
+ " ],\n",
+ " names=[\n",
+ " 'fall',\n",
+ " 'lying',\n",
+ " 'other',\n",
+ " ],\n",
+ " other_class=2,\n",
+ " start_timestamp_names=[\n",
+ " 'fall_start',\n",
+ " 'lying_start',\n",
+ " ],\n",
+ " visible_names=[\n",
+ " 'fall_visible',\n",
+ " 'lying_visible',\n",
+ " ]),\n",
+ " type='PriorityLabel'),\n",
+ " num_classes=3,\n",
+ " pipeline=[\n",
+ " dict(type='DecordInit'),\n",
+ " dict(type='ClipVideo'),\n",
+ " dict(\n",
+ " clip_len=16,\n",
+ " frame_interval=4,\n",
+ " num_clips=1,\n",
+ " test_mode=True,\n",
+ " type='SampleFrames'),\n",
+ " dict(type='DecordDecode'),\n",
+ " dict(scale=(\n",
+ " -1,\n",
+ " 224,\n",
+ " ), type='Resize'),\n",
+ " dict(crop_size=224, type='CenterCrop'),\n",
+ " dict(input_format='NCTHW', type='FormatShape'),\n",
+ " dict(type='PackActionInputs'),\n",
+ " ],\n",
+ " sampling_strategy=dict(clip_len=10, type='UniformSampling'),\n",
+ " type='HighQualityFallDataset'),\n",
+ " num_workers=8,\n",
+ " persistent_workers=True,\n",
+ " sampler=dict(shuffle=False, type='DefaultSampler'))\n",
+ "val_evaluator = dict(\n",
+ " metric_list=(\n",
+ " 'unweighted_average_f1',\n",
+ " 'per_class_f1',\n",
+ " 'per_class_precision',\n",
+ " 'per_class_recall',\n",
+ " ),\n",
+ " type='AddAccMetric')\n",
+ "val_pipeline = [\n",
+ " dict(type='DecordInit'),\n",
+ " dict(type='ClipVideo'),\n",
+ " dict(\n",
+ " clip_len=16,\n",
+ " frame_interval=4,\n",
+ " num_clips=1,\n",
+ " test_mode=True,\n",
+ " type='SampleFrames'),\n",
+ " dict(type='DecordDecode'),\n",
+ " dict(scale=(\n",
+ " -1,\n",
+ " 224,\n",
+ " ), type='Resize'),\n",
+ " dict(crop_size=224, type='CenterCrop'),\n",
+ " dict(input_format='NCTHW', type='FormatShape'),\n",
+ " dict(type='PackActionInputs'),\n",
+ "]\n",
+ "vis_backends = dict(\n",
+ " save_dir='experiments/tensorboard', type='TensorboardVisBackend')\n",
+ "visualizer = dict(\n",
+ " type='ActionVisualizer',\n",
+ " vis_backends=dict(\n",
+ " save_dir='experiments/tensorboard', type='TensorboardVisBackend'))\n",
+ "work_dir = 'experiments'\n",
+ "\n",
+ "12/04 22:32:34 - mmengine - \u001b[4m\u001b[97mINFO\u001b[0m - Distributed training is not used, all SyncBatchNorm (SyncBN) layers in the model will be automatically reverted to BatchNormXd layers if they are used.\n",
+ "12/04 22:32:34 - mmengine - \u001b[4m\u001b[97mINFO\u001b[0m - Hooks will be executed in the following order:\n",
+ "before_run:\n",
+ "(VERY_HIGH ) RuntimeInfoHook \n",
+ "(BELOW_NORMAL) LoggerHook \n",
+ " -------------------- \n",
+ "before_train:\n",
+ "(VERY_HIGH ) RuntimeInfoHook \n",
+ "(NORMAL ) IterTimerHook \n",
+ "(VERY_LOW ) CheckpointHook \n",
+ " -------------------- \n",
+ "before_train_epoch:\n",
+ "(VERY_HIGH ) RuntimeInfoHook \n",
+ "(NORMAL ) IterTimerHook \n",
+ "(NORMAL ) DistSamplerSeedHook \n",
+ " -------------------- \n",
+ "before_train_iter:\n",
+ "(VERY_HIGH ) RuntimeInfoHook \n",
+ "(NORMAL ) IterTimerHook \n",
+ " -------------------- \n",
+ "after_train_iter:\n",
+ "(VERY_HIGH ) RuntimeInfoHook \n",
+ "(NORMAL ) IterTimerHook \n",
+ "(BELOW_NORMAL) LoggerHook \n",
+ "(LOW ) ParamSchedulerHook \n",
+ "(VERY_LOW ) CheckpointHook \n",
+ " -------------------- \n",
+ "after_train_epoch:\n",
+ "(NORMAL ) IterTimerHook \n",
+ "(NORMAL ) SyncBuffersHook \n",
+ "(LOW ) ParamSchedulerHook \n",
+ "(VERY_LOW ) CheckpointHook \n",
+ " -------------------- \n",
+ "before_val:\n",
+ "(VERY_HIGH ) RuntimeInfoHook \n",
+ " -------------------- \n",
+ "before_val_epoch:\n",
+ "(NORMAL ) IterTimerHook \n",
+ "(NORMAL ) SyncBuffersHook \n",
+ " -------------------- \n",
+ "before_val_iter:\n",
+ "(NORMAL ) IterTimerHook \n",
+ " -------------------- \n",
+ "after_val_iter:\n",
+ "(NORMAL ) IterTimerHook \n",
+ "(NORMAL ) CustomVisualizationHook \n",
+ "(BELOW_NORMAL) LoggerHook \n",
+ " -------------------- \n",
+ "after_val_epoch:\n",
+ "(VERY_HIGH ) RuntimeInfoHook \n",
+ "(NORMAL ) IterTimerHook \n",
+ "(BELOW_NORMAL) LoggerHook \n",
+ "(LOW ) ParamSchedulerHook \n",
+ "(VERY_LOW ) CheckpointHook \n",
+ " -------------------- \n",
+ "after_val:\n",
+ "(VERY_HIGH ) RuntimeInfoHook \n",
+ " -------------------- \n",
+ "after_train:\n",
+ "(VERY_HIGH ) RuntimeInfoHook \n",
+ "(VERY_LOW ) CheckpointHook \n",
+ " -------------------- \n",
+ "before_test:\n",
+ "(VERY_HIGH ) RuntimeInfoHook \n",
+ " -------------------- \n",
+ "before_test_epoch:\n",
+ "(NORMAL ) IterTimerHook \n",
+ " -------------------- \n",
+ "before_test_iter:\n",
+ "(NORMAL ) IterTimerHook \n",
+ " -------------------- \n",
+ "after_test_iter:\n",
+ "(NORMAL ) IterTimerHook \n",
+ "(NORMAL ) CustomVisualizationHook \n",
+ "(BELOW_NORMAL) LoggerHook \n",
+ " -------------------- \n",
+ "after_test_epoch:\n",
+ "(VERY_HIGH ) RuntimeInfoHook \n",
+ "(NORMAL ) IterTimerHook \n",
+ "(BELOW_NORMAL) LoggerHook \n",
+ " -------------------- \n",
+ "after_test:\n",
+ "(VERY_HIGH ) RuntimeInfoHook \n",
+ " -------------------- \n",
+ "after_run:\n",
+ "(BELOW_NORMAL) LoggerHook \n",
+ " -------------------- \n"
+ ]
+ }
+ ],
+ "source": [
+ "from mmengine.runner import Runner\n",
+ "from mmengine.config import Config\n",
+ "\n",
+ "runner_cfg = Config.fromfile(\n",
+ " \"configs/models/vit-s-p16_videomaev2-vit-g-dist-k710-pre_16x4x1_kinetics-400_base.py\"\n",
+ ")\n",
+ "runner = Runner.from_cfg(runner_cfg)\n",
+ "model = runner.model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 9,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Functions from the VideoMAE repo\n",
+ "\n",
+ "\n",
+ "def get_num_layer_for_vit(var_name, num_max_layer):\n",
+ " if var_name in (\"backbone.cls_token\", \"backbone.mask_token\", \"backbone.pos_embed\"):\n",
+ " return 0\n",
+ " elif var_name.startswith(\"backbone.patch_embed\"):\n",
+ " return 0\n",
+ " elif var_name.startswith(\"backbone.rel_pos_bias\"):\n",
+ " return num_max_layer - 1\n",
+ " elif var_name.startswith(\"backbone.blocks\"):\n",
+ " layer_id = int(var_name.split(\".\")[2])\n",
+ " return layer_id + 1\n",
+ " else:\n",
+ " return num_max_layer - 1\n",
+ "\n",
+ "\n",
+ "class LayerDecayValueAssigner(object):\n",
+ " def __init__(self, values):\n",
+ " self.values = values\n",
+ "\n",
+ " def get_scale(self, layer_id):\n",
+ " return self.values[layer_id]\n",
+ "\n",
+ " def get_layer_id(self, var_name):\n",
+ " return get_num_layer_for_vit(var_name, len(self.values))\n",
+ "\n",
+ "\n",
+ "def get_parameter_groups(\n",
+ " model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None\n",
+ "):\n",
+ " parameter_group_names = {}\n",
+ " parameter_group_vars = {}\n",
+ "\n",
+ " for name, param in model.named_parameters():\n",
+ " if not param.requires_grad:\n",
+ " continue # frozen weights\n",
+ " if (\n",
+ " len(param.shape) == 1\n",
+ " or name.endswith(\".bias\")\n",
+ " or name.endswith(\".scale\")\n",
+ " or name in skip_list\n",
+ " ):\n",
+ " group_name = \"no_decay\"\n",
+ " this_weight_decay = 0.0\n",
+ " else:\n",
+ " group_name = \"decay\"\n",
+ " this_weight_decay = weight_decay\n",
+ " if get_num_layer is not None:\n",
+ " layer_id = get_num_layer(name)\n",
+ " group_name = \"layer_%d_%s\" % (layer_id, group_name)\n",
+ " else:\n",
+ " layer_id = None\n",
+ "\n",
+ " if group_name not in parameter_group_names:\n",
+ " if get_layer_scale is not None:\n",
+ " scale = get_layer_scale(layer_id)\n",
+ " else:\n",
+ " scale = 1.0\n",
+ "\n",
+ " parameter_group_names[group_name] = {\n",
+ " \"weight_decay\": this_weight_decay,\n",
+ " \"params\": [],\n",
+ " \"lr_scale\": scale,\n",
+ " }\n",
+ " parameter_group_vars[group_name] = {\n",
+ " \"weight_decay\": this_weight_decay,\n",
+ " \"params\": [],\n",
+ " \"lr_scale\": scale,\n",
+ " }\n",
+ "\n",
+ " parameter_group_vars[group_name][\"params\"].append(param)\n",
+ " parameter_group_names[group_name][\"params\"].append(name)\n",
+ "\n",
+ " return parameter_group_names"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Get the parameter groups from VideoMAE\n",
+ "\n",
+ "assigner = LayerDecayValueAssigner(\n",
+ " list(LAYER_DECAY ** (MODEL_DEPTH + 1 - i) for i in range(MODEL_DEPTH + 2))\n",
+ ")\n",
+ "\n",
+ "groups = get_parameter_groups(\n",
+ " model,\n",
+ " BASE_WEIGHT_DECAY,\n",
+ " get_num_layer=assigner.get_layer_id,\n",
+ " get_layer_scale=assigner.get_scale,\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 11,
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/plain": [
+ "{'backbone.patch_embed.projection.weight': {'lr_mult': 0.023757264018058777,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.patch_embed.projection.bias': {'lr_mult': 0.023757264018058777,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.0.norm1.weight': {'lr_mult': 0.03167635202407837,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.0.norm1.bias': {'lr_mult': 0.03167635202407837,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.0.attn.q_bias': {'lr_mult': 0.03167635202407837,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.0.attn.v_bias': {'lr_mult': 0.03167635202407837,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.0.attn.proj.bias': {'lr_mult': 0.03167635202407837,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.0.norm2.weight': {'lr_mult': 0.03167635202407837,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.0.norm2.bias': {'lr_mult': 0.03167635202407837,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.0.mlp.layers.0.0.bias': {'lr_mult': 0.03167635202407837,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.0.mlp.layers.1.bias': {'lr_mult': 0.03167635202407837,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.0.attn.qkv.weight': {'lr_mult': 0.03167635202407837,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.0.attn.proj.weight': {'lr_mult': 0.03167635202407837,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.0.mlp.layers.0.0.weight': {'lr_mult': 0.03167635202407837,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.0.mlp.layers.1.weight': {'lr_mult': 0.03167635202407837,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.1.norm1.weight': {'lr_mult': 0.04223513603210449,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.1.norm1.bias': {'lr_mult': 0.04223513603210449,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.1.attn.q_bias': {'lr_mult': 0.04223513603210449,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.1.attn.v_bias': {'lr_mult': 0.04223513603210449,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.1.attn.proj.bias': {'lr_mult': 0.04223513603210449,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.1.norm2.weight': {'lr_mult': 0.04223513603210449,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.1.norm2.bias': {'lr_mult': 0.04223513603210449,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.1.mlp.layers.0.0.bias': {'lr_mult': 0.04223513603210449,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.1.mlp.layers.1.bias': {'lr_mult': 0.04223513603210449,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.1.attn.qkv.weight': {'lr_mult': 0.04223513603210449,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.1.attn.proj.weight': {'lr_mult': 0.04223513603210449,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.1.mlp.layers.0.0.weight': {'lr_mult': 0.04223513603210449,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.1.mlp.layers.1.weight': {'lr_mult': 0.04223513603210449,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.2.norm1.weight': {'lr_mult': 0.056313514709472656,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.2.norm1.bias': {'lr_mult': 0.056313514709472656,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.2.attn.q_bias': {'lr_mult': 0.056313514709472656,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.2.attn.v_bias': {'lr_mult': 0.056313514709472656,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.2.attn.proj.bias': {'lr_mult': 0.056313514709472656,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.2.norm2.weight': {'lr_mult': 0.056313514709472656,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.2.norm2.bias': {'lr_mult': 0.056313514709472656,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.2.mlp.layers.0.0.bias': {'lr_mult': 0.056313514709472656,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.2.mlp.layers.1.bias': {'lr_mult': 0.056313514709472656,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.2.attn.qkv.weight': {'lr_mult': 0.056313514709472656,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.2.attn.proj.weight': {'lr_mult': 0.056313514709472656,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.2.mlp.layers.0.0.weight': {'lr_mult': 0.056313514709472656,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.2.mlp.layers.1.weight': {'lr_mult': 0.056313514709472656,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.3.norm1.weight': {'lr_mult': 0.07508468627929688,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.3.norm1.bias': {'lr_mult': 0.07508468627929688,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.3.attn.q_bias': {'lr_mult': 0.07508468627929688,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.3.attn.v_bias': {'lr_mult': 0.07508468627929688,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.3.attn.proj.bias': {'lr_mult': 0.07508468627929688,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.3.norm2.weight': {'lr_mult': 0.07508468627929688,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.3.norm2.bias': {'lr_mult': 0.07508468627929688,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.3.mlp.layers.0.0.bias': {'lr_mult': 0.07508468627929688,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.3.mlp.layers.1.bias': {'lr_mult': 0.07508468627929688,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.3.attn.qkv.weight': {'lr_mult': 0.07508468627929688,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.3.attn.proj.weight': {'lr_mult': 0.07508468627929688,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.3.mlp.layers.0.0.weight': {'lr_mult': 0.07508468627929688,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.3.mlp.layers.1.weight': {'lr_mult': 0.07508468627929688,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.4.norm1.weight': {'lr_mult': 0.1001129150390625,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.4.norm1.bias': {'lr_mult': 0.1001129150390625,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.4.attn.q_bias': {'lr_mult': 0.1001129150390625,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.4.attn.v_bias': {'lr_mult': 0.1001129150390625,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.4.attn.proj.bias': {'lr_mult': 0.1001129150390625,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.4.norm2.weight': {'lr_mult': 0.1001129150390625,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.4.norm2.bias': {'lr_mult': 0.1001129150390625,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.4.mlp.layers.0.0.bias': {'lr_mult': 0.1001129150390625,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.4.mlp.layers.1.bias': {'lr_mult': 0.1001129150390625,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.4.attn.qkv.weight': {'lr_mult': 0.1001129150390625,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.4.attn.proj.weight': {'lr_mult': 0.1001129150390625,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.4.mlp.layers.0.0.weight': {'lr_mult': 0.1001129150390625,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.4.mlp.layers.1.weight': {'lr_mult': 0.1001129150390625,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.5.norm1.weight': {'lr_mult': 0.13348388671875,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.5.norm1.bias': {'lr_mult': 0.13348388671875,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.5.attn.q_bias': {'lr_mult': 0.13348388671875,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.5.attn.v_bias': {'lr_mult': 0.13348388671875,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.5.attn.proj.bias': {'lr_mult': 0.13348388671875,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.5.norm2.weight': {'lr_mult': 0.13348388671875,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.5.norm2.bias': {'lr_mult': 0.13348388671875,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.5.mlp.layers.0.0.bias': {'lr_mult': 0.13348388671875,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.5.mlp.layers.1.bias': {'lr_mult': 0.13348388671875,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.5.attn.qkv.weight': {'lr_mult': 0.13348388671875,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.5.attn.proj.weight': {'lr_mult': 0.13348388671875,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.5.mlp.layers.0.0.weight': {'lr_mult': 0.13348388671875,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.5.mlp.layers.1.weight': {'lr_mult': 0.13348388671875,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.6.norm1.weight': {'lr_mult': 0.177978515625,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.6.norm1.bias': {'lr_mult': 0.177978515625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.6.attn.q_bias': {'lr_mult': 0.177978515625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.6.attn.v_bias': {'lr_mult': 0.177978515625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.6.attn.proj.bias': {'lr_mult': 0.177978515625,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.6.norm2.weight': {'lr_mult': 0.177978515625,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.6.norm2.bias': {'lr_mult': 0.177978515625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.6.mlp.layers.0.0.bias': {'lr_mult': 0.177978515625,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.6.mlp.layers.1.bias': {'lr_mult': 0.177978515625,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.6.attn.qkv.weight': {'lr_mult': 0.177978515625,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.6.attn.proj.weight': {'lr_mult': 0.177978515625,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.6.mlp.layers.0.0.weight': {'lr_mult': 0.177978515625,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.6.mlp.layers.1.weight': {'lr_mult': 0.177978515625,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.7.norm1.weight': {'lr_mult': 0.2373046875, 'decay_mult': 0},\n",
+ " 'backbone.blocks.7.norm1.bias': {'lr_mult': 0.2373046875, 'decay_mult': 0},\n",
+ " 'backbone.blocks.7.attn.q_bias': {'lr_mult': 0.2373046875, 'decay_mult': 0},\n",
+ " 'backbone.blocks.7.attn.v_bias': {'lr_mult': 0.2373046875, 'decay_mult': 0},\n",
+ " 'backbone.blocks.7.attn.proj.bias': {'lr_mult': 0.2373046875,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.7.norm2.weight': {'lr_mult': 0.2373046875, 'decay_mult': 0},\n",
+ " 'backbone.blocks.7.norm2.bias': {'lr_mult': 0.2373046875, 'decay_mult': 0},\n",
+ " 'backbone.blocks.7.mlp.layers.0.0.bias': {'lr_mult': 0.2373046875,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.7.mlp.layers.1.bias': {'lr_mult': 0.2373046875,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.7.attn.qkv.weight': {'lr_mult': 0.2373046875,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.7.attn.proj.weight': {'lr_mult': 0.2373046875,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.7.mlp.layers.0.0.weight': {'lr_mult': 0.2373046875,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.7.mlp.layers.1.weight': {'lr_mult': 0.2373046875,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.8.norm1.weight': {'lr_mult': 0.31640625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.8.norm1.bias': {'lr_mult': 0.31640625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.8.attn.q_bias': {'lr_mult': 0.31640625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.8.attn.v_bias': {'lr_mult': 0.31640625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.8.attn.proj.bias': {'lr_mult': 0.31640625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.8.norm2.weight': {'lr_mult': 0.31640625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.8.norm2.bias': {'lr_mult': 0.31640625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.8.mlp.layers.0.0.bias': {'lr_mult': 0.31640625,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.8.mlp.layers.1.bias': {'lr_mult': 0.31640625,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.8.attn.qkv.weight': {'lr_mult': 0.31640625, 'decay_mult': 1},\n",
+ " 'backbone.blocks.8.attn.proj.weight': {'lr_mult': 0.31640625,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.8.mlp.layers.0.0.weight': {'lr_mult': 0.31640625,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.8.mlp.layers.1.weight': {'lr_mult': 0.31640625,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.9.norm1.weight': {'lr_mult': 0.421875, 'decay_mult': 0},\n",
+ " 'backbone.blocks.9.norm1.bias': {'lr_mult': 0.421875, 'decay_mult': 0},\n",
+ " 'backbone.blocks.9.attn.q_bias': {'lr_mult': 0.421875, 'decay_mult': 0},\n",
+ " 'backbone.blocks.9.attn.v_bias': {'lr_mult': 0.421875, 'decay_mult': 0},\n",
+ " 'backbone.blocks.9.attn.proj.bias': {'lr_mult': 0.421875, 'decay_mult': 0},\n",
+ " 'backbone.blocks.9.norm2.weight': {'lr_mult': 0.421875, 'decay_mult': 0},\n",
+ " 'backbone.blocks.9.norm2.bias': {'lr_mult': 0.421875, 'decay_mult': 0},\n",
+ " 'backbone.blocks.9.mlp.layers.0.0.bias': {'lr_mult': 0.421875,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.9.mlp.layers.1.bias': {'lr_mult': 0.421875, 'decay_mult': 0},\n",
+ " 'backbone.blocks.9.attn.qkv.weight': {'lr_mult': 0.421875, 'decay_mult': 1},\n",
+ " 'backbone.blocks.9.attn.proj.weight': {'lr_mult': 0.421875, 'decay_mult': 1},\n",
+ " 'backbone.blocks.9.mlp.layers.0.0.weight': {'lr_mult': 0.421875,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.9.mlp.layers.1.weight': {'lr_mult': 0.421875,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.10.norm1.weight': {'lr_mult': 0.5625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.10.norm1.bias': {'lr_mult': 0.5625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.10.attn.q_bias': {'lr_mult': 0.5625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.10.attn.v_bias': {'lr_mult': 0.5625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.10.attn.proj.bias': {'lr_mult': 0.5625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.10.norm2.weight': {'lr_mult': 0.5625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.10.norm2.bias': {'lr_mult': 0.5625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.10.mlp.layers.0.0.bias': {'lr_mult': 0.5625,\n",
+ " 'decay_mult': 0},\n",
+ " 'backbone.blocks.10.mlp.layers.1.bias': {'lr_mult': 0.5625, 'decay_mult': 0},\n",
+ " 'backbone.blocks.10.attn.qkv.weight': {'lr_mult': 0.5625, 'decay_mult': 1},\n",
+ " 'backbone.blocks.10.attn.proj.weight': {'lr_mult': 0.5625, 'decay_mult': 1},\n",
+ " 'backbone.blocks.10.mlp.layers.0.0.weight': {'lr_mult': 0.5625,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.10.mlp.layers.1.weight': {'lr_mult': 0.5625,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.11.norm1.weight': {'lr_mult': 0.75, 'decay_mult': 0},\n",
+ " 'backbone.blocks.11.norm1.bias': {'lr_mult': 0.75, 'decay_mult': 0},\n",
+ " 'backbone.blocks.11.attn.q_bias': {'lr_mult': 0.75, 'decay_mult': 0},\n",
+ " 'backbone.blocks.11.attn.v_bias': {'lr_mult': 0.75, 'decay_mult': 0},\n",
+ " 'backbone.blocks.11.attn.proj.bias': {'lr_mult': 0.75, 'decay_mult': 0},\n",
+ " 'backbone.blocks.11.norm2.weight': {'lr_mult': 0.75, 'decay_mult': 0},\n",
+ " 'backbone.blocks.11.norm2.bias': {'lr_mult': 0.75, 'decay_mult': 0},\n",
+ " 'backbone.blocks.11.mlp.layers.0.0.bias': {'lr_mult': 0.75, 'decay_mult': 0},\n",
+ " 'backbone.blocks.11.mlp.layers.1.bias': {'lr_mult': 0.75, 'decay_mult': 0},\n",
+ " 'backbone.blocks.11.attn.qkv.weight': {'lr_mult': 0.75, 'decay_mult': 1},\n",
+ " 'backbone.blocks.11.attn.proj.weight': {'lr_mult': 0.75, 'decay_mult': 1},\n",
+ " 'backbone.blocks.11.mlp.layers.0.0.weight': {'lr_mult': 0.75,\n",
+ " 'decay_mult': 1},\n",
+ " 'backbone.blocks.11.mlp.layers.1.weight': {'lr_mult': 0.75, 'decay_mult': 1},\n",
+ " 'backbone.fc_norm.weight': {'lr_mult': 1.0, 'decay_mult': 0},\n",
+ " 'backbone.fc_norm.bias': {'lr_mult': 1.0, 'decay_mult': 0},\n",
+ " 'cls_head.fc_cls.bias': {'lr_mult': 1.0, 'decay_mult': 0},\n",
+ " 'cls_head.fc_cls.weight': {'lr_mult': 1.0, 'decay_mult': 1}}"
+ ]
+ },
+ "execution_count": 11,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "# Convert the parameter groups to the format used by mmaction\n",
+ "\n",
+ "custom_keys = {}\n",
+ "for _, group in groups.items():\n",
+ " decay_mult = 0 if group[\"weight_decay\"] == 0 else 1\n",
+ " params = group[\"params\"]\n",
+ " lr_mult = group[\"lr_scale\"]\n",
+ " for param in params:\n",
+ " custom_keys[param] = {\"lr_mult\": lr_mult, \"decay_mult\": decay_mult}\n",
+ "\n",
+ "custom_keys"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "human-fall-detection",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.10.13"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/notebooks/dataset_label_analysis.ipynb b/notebooks/dataset_label_analysis.ipynb
index dbc73a3..d4d6287 100644
--- a/notebooks/dataset_label_analysis.ipynb
+++ b/notebooks/dataset_label_analysis.ipynb
@@ -1,353 +1,837 @@
{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Label Analysis\n",
- "\n",
- "In this notebook we analyse the datsets and label distributions we get for different settings for sampling and labeling strategy."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 11,
- "metadata": {},
- "outputs": [],
- "source": [
- "%reload_ext autoreload\n",
- "%autoreload 2\n",
- "import re\n",
- "\n",
- "import matplotlib.pyplot as plt\n",
- "import numpy as np\n",
- "import pandas as pd\n",
- "\n",
- "from datasets import HighQualityFallDataset\n",
- "from datasets.transforms.label_strategy import HQFD_LABEL_DESCRIPTION, PriorityLabel\n",
- "from datasets.transforms.sampling_strategy import GaussianSampling, UniformSampling"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 12,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/html": [
- "
\n",
- "\n",
- "
\n",
- " \n",
- " \n",
- " | \n",
- " filename | \n",
- " label | \n",
- " interval | \n",
- " sample_idx | \n",
- " modality | \n",
- " start_index | \n",
- " label_name | \n",
- " video_category | \n",
- "
\n",
- " \n",
- " \n",
- " \n",
- " 0 | \n",
- " data/Fall_Simulation_Data/videos/ADL17_Cam1.avi | \n",
- " 2 | \n",
- " (30.0, 40.0) | \n",
- " 0 | \n",
- " RGB | \n",
- " 0 | \n",
- " Other | \n",
- " ADL | \n",
- "
\n",
- " \n",
- " 1 | \n",
- " data/Fall_Simulation_Data/videos/ADL17_Cam1.avi | \n",
- " 2 | \n",
- " (50.0, 60.0) | \n",
- " 1 | \n",
- " RGB | \n",
- " 0 | \n",
- " Other | \n",
- " ADL | \n",
- "
\n",
- " \n",
- " 2 | \n",
- " data/Fall_Simulation_Data/videos/ADL17_Cam1.avi | \n",
- " 2 | \n",
- " (60.0, 70.0) | \n",
- " 2 | \n",
- " RGB | \n",
- " 0 | \n",
- " Other | \n",
- " ADL | \n",
- "
\n",
- " \n",
- " 3 | \n",
- " data/Fall_Simulation_Data/videos/ADL17_Cam1.avi | \n",
- " 2 | \n",
- " (90.0, 100.0) | \n",
- " 3 | \n",
- " RGB | \n",
- " 0 | \n",
- " Other | \n",
- " ADL | \n",
- "
\n",
- " \n",
- " 4 | \n",
- " data/Fall_Simulation_Data/videos/ADL17_Cam1.avi | \n",
- " 2 | \n",
- " (160.0, 170.0) | \n",
- " 4 | \n",
- " RGB | \n",
- " 0 | \n",
- " Other | \n",
- " ADL | \n",
- "
\n",
- " \n",
- "
\n",
- "
"
- ],
- "text/plain": [
- " filename label interval \\\n",
- "0 data/Fall_Simulation_Data/videos/ADL17_Cam1.avi 2 (30.0, 40.0) \n",
- "1 data/Fall_Simulation_Data/videos/ADL17_Cam1.avi 2 (50.0, 60.0) \n",
- "2 data/Fall_Simulation_Data/videos/ADL17_Cam1.avi 2 (60.0, 70.0) \n",
- "3 data/Fall_Simulation_Data/videos/ADL17_Cam1.avi 2 (90.0, 100.0) \n",
- "4 data/Fall_Simulation_Data/videos/ADL17_Cam1.avi 2 (160.0, 170.0) \n",
- "\n",
- " sample_idx modality start_index label_name video_category \n",
- "0 0 RGB 0 Other ADL \n",
- "1 1 RGB 0 Other ADL \n",
- "2 2 RGB 0 Other ADL \n",
- "3 3 RGB 0 Other ADL \n",
- "4 4 RGB 0 Other ADL "
- ]
- },
- "execution_count": 12,
- "metadata": {},
- "output_type": "execute_result"
- }
- ],
- "source": [
- "ANN_FILE = \"data/Fall_Simulation_Data/annotations.csv\"\n",
- "\n",
- "uniform_sampling = UniformSampling(clip_len=10, stride=0, overlap=False)\n",
- "gaussian_sampling = GaussianSampling(\n",
- " clip_len=10, n_samples_per_sec=None, fallback_sampler=None, std=None\n",
- ")\n",
- "label_strategy = PriorityLabel(\n",
- " label_description=HQFD_LABEL_DESCRIPTION,\n",
- " threshold=0.0,\n",
- " absolute_threshold=False,\n",
- " priority=[0, 1, 2],\n",
- ")\n",
- "\n",
- "hqfd = HighQualityFallDataset(\n",
- " ann_file=ANN_FILE,\n",
- " sampling_strategy=gaussian_sampling,\n",
- " label_strategy=label_strategy,\n",
- " pipeline=[],\n",
- " num_classes=3,\n",
- " test_mode=False,\n",
- " drop_ratios=[0.0, 0.0, 0.75],\n",
- ")\n",
- "\n",
- "df_hqfd = pd.DataFrame(list(hqfd))\n",
- "class_names = [\"Fall\", \"Lying\", \"Other\"]\n",
- "df_hqfd[\"label_name\"] = df_hqfd[\"label\"].apply(lambda x: class_names[x])\n",
- "\n",
- "\n",
- "def extract_category(filename):\n",
- " match = re.search(r\"(ADL|Fall)\", filename.split(\"/\")[-1])\n",
- " return match.group(1) if match else None\n",
- "\n",
- "\n",
- "df_hqfd[\"video_category\"] = df_hqfd[\"filename\"].apply(extract_category)\n",
- "\n",
- "df_hqfd.head()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 13,
- "metadata": {},
- "outputs": [],
- "source": [
- "def plot_label_dist(df_hqfd):\n",
- " display(\"---- Label Distribution ----\")\n",
- " display(df_hqfd[\"label_name\"].value_counts().sort_index() / len(df_hqfd))\n",
- "\n",
- " display(\"---- Label Counts ----\")\n",
- " display(df_hqfd[\"label_name\"].value_counts().sort_index())\n",
- " df_hqfd[\"label_name\"].value_counts().sort_index().plot(kind=\"bar\")\n",
- " plt.show()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 14,
- "metadata": {},
- "outputs": [
- {
- "data": {
- "text/plain": [
- "'---- Label Distribution ----'"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/plain": [
- "label_name\n",
- "Fall 0.288679\n",
- "Lying 0.204657\n",
- "Other 0.506664\n",
- "Name: count, dtype: float64"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/plain": [
- "'---- Label Counts ----'"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "text/plain": [
- "label_name\n",
- "Fall 1711\n",
- "Lying 1213\n",
- "Other 3003\n",
- "Name: count, dtype: int64"
- ]
- },
- "metadata": {},
- "output_type": "display_data"
- },
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAjAAAAHKCAYAAAAdEHGNAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAtQ0lEQVR4nO3df1TUdb7H8RegICgDksLIEYnWUjDxB246la6WiUatXumUK+uPoh8a1CqbGfea+eNuuJaplWWdblGtpnZvtf7YVML8keIvuvizLA3DVgdaTUZJQWDuH3v83mZFEwOHDzwf58w5znw/8533t6bjs5nvzPi43W63AAAADOLr7QEAAABqi4ABAADGIWAAAIBxCBgAAGAcAgYAABiHgAEAAMYhYAAAgHEIGAAAYJxm3h6gvlRXV+vo0aMKDg6Wj4+Pt8cBAACXwe1269SpU4qMjJSv78VfZ2m0AXP06FFFRUV5ewwAAHAFjhw5ovbt2190e6MNmODgYEn//Adgs9m8PA0AALgcLpdLUVFR1t/jF9NoA+b820Y2m42AAQDAMD93+gcn8QIAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4tQqYV199VfHx8dbX8zscDn388cfW9rNnzyotLU3XXHONWrVqpeTkZBUXF3vso6ioSElJSQoKClJ4eLgmTZqkyspKjzXr169Xz549FRAQoI4dOyo7O/vKjxAAADQ6tQqY9u3ba9asWcrPz9fOnTt12223aejQodq3b58kaeLEiVqxYoXef/99bdiwQUePHtXw4cOt+1dVVSkpKUkVFRXasmWL3n77bWVnZ2vq1KnWmsLCQiUlJWnAgAEqKCjQhAkT9OCDD2rNmjV1dMgAAMB0Pm632/1LdhAWFqbnnntO99xzj9q2bavFixfrnnvukSR9+eWXio2NVV5envr06aOPP/5Yd911l44ePaqIiAhJ0sKFCzV58mR9//338vf31+TJk7Vq1Srt3bvXeowRI0bo5MmTWr169WXP5XK5FBISotLSUn7MEQAAQ1zu399XfA5MVVWVlixZorKyMjkcDuXn5+vcuXMaOHCgtaZz587q0KGD8vLyJEl5eXnq2rWrFS+SlJiYKJfLZb2Kk5eX57GP82vO7wMAAKBZbe+wZ88eORwOnT17Vq1atdKHH36ouLg4FRQUyN/fX6GhoR7rIyIi5HQ6JUlOp9MjXs5vP7/tUmtcLpfOnDmjwMDAGucqLy9XeXm5dd3lctX20AAADdS1T63y9giNxuFZSd4eoU7U+hWYTp06qaCgQNu2bdP48eM1ZswY7d+/vz5mq5WsrCyFhIRYl6ioKG+PBAAA6kmtA8bf318dO3ZUQkKCsrKy1K1bN82fP192u10VFRU6efKkx/ri4mLZ7XZJkt1uv+BTSeev/9wam8120VdfJCkzM1OlpaXW5ciRI7U9NAAAYIhf/D0w1dXVKi8vV0JCgpo3b67c3Fxr24EDB1RUVCSHwyFJcjgc2rNnj0pKSqw1OTk5stlsiouLs9b8dB/n15zfx8UEBARYH+8+fwEAAI1Trc6ByczM1JAhQ9ShQwedOnVKixcv1vr167VmzRqFhIQoNTVVGRkZCgsLk81m02OPPSaHw6E+ffpIkgYNGqS4uDiNGjVKs2fPltPp1JQpU5SWlqaAgABJ0rhx4/Tyyy/rySef1AMPPKB169Zp2bJlWrWK9z8BAMA/1SpgSkpKNHr0aB07dkwhISGKj4/XmjVrdMcdd0iS5s6dK19fXyUnJ6u8vFyJiYl65ZVXrPv7+flp5cqVGj9+vBwOh1q2bKkxY8ZoxowZ1pqYmBitWrVKEydO1Pz589W+fXu98cYbSkxMrKNDBgAApvvF3wPTUPE9MADQePAppLrT0D+FVO/fAwMAAOAtBAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADj1CpgsrKy9Otf/1rBwcEKDw/XsGHDdODAAY81/fv3l4+Pj8dl3LhxHmuKioqUlJSkoKAghYeHa9KkSaqsrPRYs379evXs2VMBAQHq2LGjsrOzr+wIAQBAo1OrgNmwYYPS0tK0detW5eTk6Ny5cxo0aJDKyso81j300EM6duyYdZk9e7a1raqqSklJSaqoqNCWLVv09ttvKzs7W1OnTrXWFBYWKikpSQMGDFBBQYEmTJigBx98UGvWrPmFhwsAABqDZrVZvHr1ao/r2dnZCg8PV35+vvr162fdHhQUJLvdXuM+1q5dq/379+uTTz5RRESEunfvrpkzZ2ry5MmaNm2a/P39tXDhQsXExGjOnDmSpNjYWH322WeaO3euEhMTa3uMAACgkflF58CUlpZKksLCwjxuX7Rokdq0aaMbb7xRmZmZ+vHHH61teXl56tq1qyIiIqzbEhMT5XK5tG/fPmvNwIEDPfaZmJiovLy8i85SXl4ul8vlcQEAAI1TrV6B+anq6mpNmDBBt9xyi2688Ubr9pEjRyo6OlqRkZHavXu3Jk+erAMHDuiDDz6QJDmdTo94kWRddzqdl1zjcrl05swZBQYGXjBPVlaWpk+ffqWHAwAADHLFAZOWlqa9e/fqs88+87j94Ycftv7ctWtXtWvXTrfffrsOHTqkX/3qV1c+6c/IzMxURkaGdd3lcikqKqreHg8AAHjPFb2FlJ6erpUrV+rTTz9V+/btL7m2d+/ekqSDBw9Kkux2u4qLiz3WnL9+/ryZi62x2Ww1vvoiSQEBAbLZbB4XAADQONUqYNxut9LT0/Xhhx9q3bp1iomJ+dn7FBQUSJLatWsnSXI4HNqzZ49KSkqsNTk5ObLZbIqLi7PW5ObmeuwnJydHDoejNuMCAIBGqlYBk5aWpr/85S9avHixgoOD5XQ65XQ6debMGUnSoUOHNHPmTOXn5+vw4cNavny5Ro8erX79+ik+Pl6SNGjQIMXFxWnUqFHatWuX1qxZoylTpigtLU0BAQGSpHHjxumbb77Rk08+qS+//FKvvPKKli1bpokTJ9bx4QMAABPVKmBeffVVlZaWqn///mrXrp11Wbp0qSTJ399fn3zyiQYNGqTOnTvrj3/8o5KTk7VixQprH35+flq5cqX8/PzkcDj0+9//XqNHj9aMGTOsNTExMVq1apVycnLUrVs3zZkzR2+88QYfoQYAAJIkH7fb7fb2EPXB5XIpJCREpaWlnA8DAIa79qlV3h6h0Tg8K8nbI1zS5f79zW8hAQAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjFOrgMnKytKvf/1rBQcHKzw8XMOGDdOBAwc81pw9e1ZpaWm65ppr1KpVKyUnJ6u4uNhjTVFRkZKSkhQUFKTw8HBNmjRJlZWVHmvWr1+vnj17KiAgQB07dlR2dvaVHSEAAGh0ahUwGzZsUFpamrZu3aqcnBydO3dOgwYNUllZmbVm4sSJWrFihd5//31t2LBBR48e1fDhw63tVVVVSkpKUkVFhbZs2aK3335b2dnZmjp1qrWmsLBQSUlJGjBggAoKCjRhwgQ9+OCDWrNmTR0cMgAAMJ2P2+12X+mdv//+e4WHh2vDhg3q16+fSktL1bZtWy1evFj33HOPJOnLL79UbGys8vLy1KdPH3388ce66667dPToUUVEREiSFi5cqMmTJ+v777+Xv7+/Jk+erFWrVmnv3r3WY40YMUInT57U6tWrL2s2l8ulkJAQlZaWymazXekhAgAagGufWuXtERqNw7OSvD3CJV3u39+/6ByY0tJSSVJYWJgkKT8/X+fOndPAgQOtNZ07d1aHDh2Ul5cnScrLy1PXrl2teJGkxMREuVwu7du3z1rz032cX3N+HzUpLy+Xy+XyuAAAgMbpigOmurpaEyZM0C233KIbb7xRkuR0OuXv76/Q0FCPtREREXI6ndaan8bL+e3nt11qjcvl0pkzZ2qcJysrSyEhIdYlKirqSg8NAAA0cFccMGlpadq7d6+WLFlSl/NcsczMTJWWllqXI0eOeHskAABQT5pdyZ3S09O1cuVKbdy4Ue3bt7dut9vtqqio0MmTJz1ehSkuLpbdbrfWbN++3WN/5z+l9NM1//rJpeLiYtlsNgUGBtY4U0BAgAICAq7kcAAAgGFq9QqM2+1Wenq6PvzwQ61bt04xMTEe2xMSEtS8eXPl5uZatx04cEBFRUVyOBySJIfDoT179qikpMRak5OTI5vNpri4OGvNT/dxfs35fQAAgKatVq/ApKWlafHixfrrX/+q4OBg65yVkJAQBQYGKiQkRKmpqcrIyFBYWJhsNpsee+wxORwO9enTR5I0aNAgxcXFadSoUZo9e7acTqemTJmitLQ06xWUcePG6eWXX9aTTz6pBx54QOvWrdOyZcu0ahVnoQMAgFq+AvPqq6+qtLRU/fv3V7t27azL0qVLrTVz587VXXfdpeTkZPXr1092u10ffPCBtd3Pz08rV66Un5+fHA6Hfv/732v06NGaMWOGtSYmJkarVq1STk6OunXrpjlz5uiNN95QYmJiHRwyAAAw3S/6HpiGjO+BAYDGg++BqTt8DwwAAICXEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADBOM28P0NRd+9Qqb4/QaByeleTtEQAAVwmvwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwTq0DZuPGjbr77rsVGRkpHx8fffTRRx7bx44dKx8fH4/L4MGDPdacOHFCKSkpstlsCg0NVWpqqk6fPu2xZvfu3erbt69atGihqKgozZ49u/ZHBwAAGqVaB0xZWZm6deumBQsWXHTN4MGDdezYMevy3nvveWxPSUnRvn37lJOTo5UrV2rjxo16+OGHre0ul0uDBg1SdHS08vPz9dxzz2natGl6/fXXazsuAABohJrV9g5DhgzRkCFDLrkmICBAdru9xm1ffPGFVq9erR07dqhXr16SpJdeekl33nmnnn/+eUVGRmrRokWqqKjQm2++KX9/f3Xp0kUFBQV64YUXPEIHAAA0TfVyDsz69esVHh6uTp06afz48Tp+/Li1LS8vT6GhoVa8SNLAgQPl6+urbdu2WWv69esnf39/a01iYqIOHDigH374ocbHLC8vl8vl8rgAAIDGqc4DZvDgwXrnnXeUm5urP//5z9qwYYOGDBmiqqoqSZLT6VR4eLjHfZo1a6awsDA5nU5rTUREhMea89fPr/lXWVlZCgkJsS5RUVF1fWgAAKCBqPVbSD9nxIgR1p+7du2q+Ph4/epXv9L69et1++231/XDWTIzM5WRkWFdd7lcRAwAAI1UvX+M+rrrrlObNm108OBBSZLdbldJSYnHmsrKSp04ccI6b8Zut6u4uNhjzfnrFzu3JiAgQDabzeMCAAAap3oPmO+++07Hjx9Xu3btJEkOh0MnT55Ufn6+tWbdunWqrq5W7969rTUbN27UuXPnrDU5OTnq1KmTWrduXd8jAwCABq7WAXP69GkVFBSooKBAklRYWKiCggIVFRXp9OnTmjRpkrZu3arDhw8rNzdXQ4cOVceOHZWYmChJio2N1eDBg/XQQw9p+/bt2rx5s9LT0zVixAhFRkZKkkaOHCl/f3+lpqZq3759Wrp0qebPn+/xFhEAAGi6ah0wO3fuVI8ePdSjRw9JUkZGhnr06KGpU6fKz89Pu3fv1m9/+1vdcMMNSk1NVUJCgjZt2qSAgABrH4sWLVLnzp11++23684779Stt97q8R0vISEhWrt2rQoLC5WQkKA//vGPmjp1Kh+hBgAAkq7gJN7+/fvL7XZfdPuaNWt+dh9hYWFavHjxJdfEx8dr06ZNtR0PAAA0AfwWEgAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4zTz9gAAGp5rn1rl7REahcOzkrw9AtBo8QoMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDi1DpiNGzfq7rvvVmRkpHx8fPTRRx95bHe73Zo6daratWunwMBADRw4UF9//bXHmhMnTiglJUU2m02hoaFKTU3V6dOnPdbs3r1bffv2VYsWLRQVFaXZs2fX/ugAAECjVOuAKSsrU7du3bRgwYIat8+ePVsvvviiFi5cqG3btqlly5ZKTEzU2bNnrTUpKSnat2+fcnJytHLlSm3cuFEPP/ywtd3lcmnQoEGKjo5Wfn6+nnvuOU2bNk2vv/76FRwiAABobJrV9g5DhgzRkCFDatzmdrs1b948TZkyRUOHDpUkvfPOO4qIiNBHH32kESNG6IsvvtDq1au1Y8cO9erVS5L00ksv6c4779Tzzz+vyMhILVq0SBUVFXrzzTfl7++vLl26qKCgQC+88IJH6AAAgKapTs+BKSwslNPp1MCBA63bQkJC1Lt3b+Xl5UmS8vLyFBoaasWLJA0cOFC+vr7atm2btaZfv37y9/e31iQmJurAgQP64Ycfanzs8vJyuVwujwsAAGic6jRgnE6nJCkiIsLj9oiICGub0+lUeHi4x/ZmzZopLCzMY01N+/jpY/yrrKwshYSEWJeoqKhffkAAAKBBajSfQsrMzFRpaal1OXLkiLdHAgAA9aROA8Zut0uSiouLPW4vLi62ttntdpWUlHhsr6ys1IkTJzzW1LSPnz7GvwoICJDNZvO4AACAxqlOAyYmJkZ2u125ubnWbS6XS9u2bZPD4ZAkORwOnTx5Uvn5+daadevWqbq6Wr1797bWbNy4UefOnbPW5OTkqFOnTmrdunVdjgwAAAxU64A5ffq0CgoKVFBQIOmfJ+4WFBSoqKhIPj4+mjBhgv7zP/9Ty5cv1549ezR69GhFRkZq2LBhkqTY2FgNHjxYDz30kLZv367NmzcrPT1dI0aMUGRkpCRp5MiR8vf3V2pqqvbt26elS5dq/vz5ysjIqLMDBwAA5qr1x6h37typAQMGWNfPR8WYMWOUnZ2tJ598UmVlZXr44Yd18uRJ3XrrrVq9erVatGhh3WfRokVKT0/X7bffLl9fXyUnJ+vFF1+0toeEhGjt2rVKS0tTQkKC2rRpo6lTp/IRagAAIOkKAqZ///5yu90X3e7j46MZM2ZoxowZF10TFhamxYsXX/Jx4uPjtWnTptqOBwAAmoBG8ykkAADQdBAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAME6dB8y0adPk4+PjcencubO1/ezZs0pLS9M111yjVq1aKTk5WcXFxR77KCoqUlJSkoKCghQeHq5JkyapsrKyrkcFAACGalYfO+3SpYs++eST/3+QZv//MBMnTtSqVav0/vvvKyQkROnp6Ro+fLg2b94sSaqqqlJSUpLsdru2bNmiY8eOafTo0WrevLmeffbZ+hgXAAAYpl4CplmzZrLb7RfcXlpaqv/6r//S4sWLddttt0mS3nrrLcXGxmrr1q3q06eP1q5dq/379+uTTz5RRESEunfvrpkzZ2ry5MmaNm2a/P3962NkAABgkHo5B+brr79WZGSkrrvuOqWkpKioqEiSlJ+fr3PnzmngwIHW2s6dO6tDhw7Ky8uTJOXl5alr166KiIiw1iQmJsrlcmnfvn31MS4AADBMnb8C07t3b2VnZ6tTp046duyYpk+frr59+2rv3r1yOp3y9/dXaGiox30iIiLkdDolSU6n0yNezm8/v+1iysvLVV5ebl13uVx1dEQAAKChqfOAGTJkiPXn+Ph49e7dW9HR0Vq2bJkCAwPr+uEsWVlZmj59er3tHwAANBz1/jHq0NBQ3XDDDTp48KDsdrsqKip08uRJjzXFxcXWOTN2u/2CTyWdv17TeTXnZWZmqrS01LocOXKkbg8EAAA0GPUeMKdPn9ahQ4fUrl07JSQkqHnz5srNzbW2HzhwQEVFRXI4HJIkh8OhPXv2qKSkxFqTk5Mjm82muLi4iz5OQECAbDabxwUAADROdf4W0hNPPKG7775b0dHROnr0qJ555hn5+fnpd7/7nUJCQpSamqqMjAyFhYXJZrPpsccek8PhUJ8+fSRJgwYNUlxcnEaNGqXZs2fL6XRqypQpSktLU0BAQF2PCwAADFTnAfPdd9/pd7/7nY4fP662bdvq1ltv1datW9W2bVtJ0ty5c+Xr66vk5GSVl5crMTFRr7zyinV/Pz8/rVy5UuPHj5fD4VDLli01ZswYzZgxo65HBQAAhqrzgFmyZMklt7do0UILFizQggULLromOjpaf/vb3+p6NAAA0EjwW0gAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADAOAQMAAIxDwAAAAOMQMAAAwDgEDAAAMA4BAwAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjNOiAWbBgga699lq1aNFCvXv31vbt2709EgAAaAAabMAsXbpUGRkZeuaZZ/T555+rW7duSkxMVElJibdHAwAAXtZgA+aFF17QQw89pPvvv19xcXFauHChgoKC9Oabb3p7NAAA4GXNvD1ATSoqKpSfn6/MzEzrNl9fXw0cOFB5eXk13qe8vFzl5eXW9dLSUkmSy+Wq32F/oeryH709QqPR0P9dm4TnZd3gOVl3eE7WnYb+vDw/n9vtvuS6Bhkw//jHP1RVVaWIiAiP2yMiIvTll1/WeJ+srCxNnz79gtujoqLqZUY0PCHzvD0B4InnJBoiU56Xp06dUkhIyEW3N8iAuRKZmZnKyMiwrldXV+vEiRO65ppr5OPj48XJzOdyuRQVFaUjR47IZrN5exyA5yQaHJ6TdcftduvUqVOKjIy85LoGGTBt2rSRn5+fiouLPW4vLi6W3W6v8T4BAQEKCAjwuC00NLS+RmySbDYb/2GiQeE5iYaG52TduNQrL+c1yJN4/f39lZCQoNzcXOu26upq5ebmyuFweHEyAADQEDTIV2AkKSMjQ2PGjFGvXr100003ad68eSorK9P999/v7dEAAICXNdiAue+++/T9999r6tSpcjqd6t69u1avXn3Bib2ofwEBAXrmmWcueIsO8Baek2hoeE5efT7un/ucEgAAQAPTIM+BAQAAuBQCBgAAGIeAAQAAxiFgAACAcQgYAABgHAIGAIBaqqys1DvvvHPBN8bj6uFj1AAAXIGgoCB98cUXio6O9vYoTVKD/SI7XH09evS47B++/Pzzz+t5GgBo2G666SYVFBQQMF5CwMAybNgwb48AXFTr1q1rDGwfHx+1aNFCHTt21NixY/m5EVw1jz76qDIyMnTkyBElJCSoZcuWHtvj4+O9NFnTwFtIAIwwd+5c/elPf9KQIUN00003SZK2b9+u1atXa+LEiSosLNS7776rl156SQ899JCXp0VT4Ot74WmkPj4+crvd8vHxUVVVlRemajoIGABGSE5O1h133KFx48Z53P7aa69p7dq1+p//+R+99NJLev3117Vnzx4vTYmm5Ntvv73kdt5aql8EDCwXe4m+JidOnKjnaQBPrVq1UkFBgTp27Ohx+8GDB9W9e3edPn1ahw4dUnx8vMrKyrw0JYCrhXNgYJk3b563RwAuKiwsTCtWrNDEiRM9bl+xYoXCwsIkSWVlZQoODvbGeGii3n33XS1cuFCFhYXKy8tTdHS05s2bp5iYGA0dOtTb4zVqBAwsY8aM8fYIwEU9/fTTGj9+vD799FPrHJgdO3bob3/7mxYuXChJysnJ0W9+8xtvjokm5NVXX9XUqVM1YcIE/elPf7LOeQkNDdW8efMImHrGW0j4WWfPnlVFRYXHbTabzUvToCnbvHmzXn75ZR04cECS1KlTJz322GO6+eabvTwZmqK4uDg9++yzGjZsmIKDg7Vr1y5dd9112rt3r/r3769//OMf3h6xUeMVGNSorKxMkydP1rJly3T8+PELtnN2Pbzhlltu0S233OLtMQBJUmFhoXr06HHB7QEBAZyHdRUQMKjRk08+qU8//VSvvvqqRo0apQULFujvf/+7XnvtNc2aNcvb46GJqq6u1sGDB1VSUqLq6mqPbf369fPSVGiqYmJiavwiu9WrVys2NtZLUzUdBAxqtGLFCr3zzjvq37+/7r//fvXt21cdO3ZUdHS0Fi1apJSUFG+PiCZm69atGjlypL799lv96zvffOcGvCEjI0NpaWk6e/as3G63tm/frvfee09ZWVl64403vD1eo0fAoEYnTpzQddddJ+mf57uc/9j0rbfeqvHjx3tzNDRR48aNU69evbRq1Sq1a9fusj/yD9SXBx98UIGBgZoyZYp+/PFHjRw5UpGRkZo/f75GjBjh7fEaPQIGNbruuutUWFioDh06qHPnzlq2bJluuukmrVixQqGhod4eD03Q119/rf/+7/++4HtgAG9KSUlRSkqKfvzxR50+fVrh4eHeHqnJuPB7kNGkffPNN6qurtb999+vXbt2SZKeeuopLViwQC1atNDEiRM1adIkL0+Jpqh37946ePCgt8cAahQUFES8XGV8jBoe/Pz8dOzYMes/xPvuu08vvviizp49q/z8fHXs2JEfKINXfPjhh5oyZYomTZqkrl27qnnz5h7beV7iaisuLtYTTzyh3NxclZSUXHBuFudl1S8CBh58fX3ldDqtgPnpdxsA3sQP56GhGTJkiIqKipSenl7jeVl8kV394hwYAEYoLCz09giAh88++0ybNm1S9+7dvT1Kk0TAwIOPj88F/xfBpz3QEPDLvmhooqKiLnjbCFcPbyHBg6+vr4YMGaKAgABJ//w+mNtuu00tW7b0WPfBBx94Yzw0McuXL9eQIUPUvHlzLV++/JJrf/vb316lqYB/Wrt2rebMmaPXXntN1157rbfHaXIIGHi4//77L2vdW2+9Vc+TAJ7nZNV0Dsx5nAODq6V169Yer0qXlZWpsrJSQUFBF5xYfv77s1A/CBgAAC7T22+/fdlrx4wZU4+TgIABYIRvvvmGT8MBsPBFdgCM0LFjRw0YMEB/+ctfdPbsWW+PA8jPz08lJSUX3H78+HH5+fl5YaKmhYABYITPP/9c8fHxysjIkN1u1yOPPKLt27d7eyw0YRd7A6O8vFz+/v5XeZqmh7eQABilsrJSy5cvV3Z2tlavXq0bbrhBDzzwgEaNGqW2bdt6ezw0AS+++KIkaeLEiZo5c6ZatWplbauqqtLGjRt1+PBh/e///q+3RmwSCBgARiovL9crr7yizMxMVVRUyN/fX/fee6/+/Oc/q127dt4eD41YTEyMJOnbb79V+/btPd4u8vf317XXXqsZM2aod+/e3hqxSSBgABhl586devPNN7VkyRK1bNlSY8aMUWpqqr777jtNnz5dLpeLt5ZwVQwYMEAffPCBKisr5ePjozZt2nh7pCaFc2AAGOGFF15Q165ddfPNN+vo0aN655139O2332rcuHHKyspS3759lZ2drc8//9zbo6IJOHnypGJjY3X99dfLbrcrIiJCbdq0UXp6uk6ePOnt8ZoEXoEBYITrr79eDzzwgMaOHevxFtGuXbvUs2dPVVVVqaKiQu+99x7fv4F6deLECTkcDv39739XSkqKYmNjJUn79+/X4sWLFRUVpS1btqh169ZenrRxI2AAGO2nAQNcDRMmTFBubq4++eQTRUREeGxzOp0aNGiQbr/9ds2dO9dLEzYNvIUEAEAtfPTRR3r++ecviBdJstvtmj17tj788EMvTNa0EDAAANTCsWPH1KVLl4tuv/HGG+V0Oq/iRE1TM28PAACXMnz48Etu54RJXG1t2rTR4cOH1b59+xq3FxYWKiws7CpP1fQQMAAatJCQkJ/dPnr06Ks0DSAlJibqP/7jP5STk3PBN+6Wl5fr6aef1uDBg700XdPBSbwAANTCd999p169eikgIEBpaWnq3Lmz3G63vvjiC73yyisqLy/Xzp07FRUV5e1RGzUCBgCAWiosLNSjjz6qtWvXWr+J5OPjozvuuEMvv/yyOnbs6OUJGz8CBgCAK/TDDz/o66+/lvTPX0zn3Jerh4ABAADG4WPUAADAOAQMAAAwDgEDAACMQ8AAuKj+/ftrwoQJl7V2/fr18vHx+cVfLHfttddq3rx5v2gfABo/AgYAABiHgAEAAMYhYABclnfffVe9evVScHCw7Ha7Ro4cqZKSkgvWbd68WfHx8WrRooX69OmjvXv3emz/7LPP1LdvXwUGBioqKkqPP/64ysrKrmgmHx8fvfHGG/q3f/s3BQUF6frrr9fy5cut7VVVVUpNTVVMTIwCAwPVqVMnzZ8/32MfY8eO1bBhw/Tss88qIiJCoaGhmjFjhiorKzVp0iSFhYWpffv2euuttzzud+TIEd17770KDQ1VWFiYhg4dqsOHD1/RcQCoPQIGwGU5d+6cZs6cqV27dumjjz7S4cOHNXbs2AvWTZo0SXPmzNGOHTvUtm1b3X333Tp37pwk6dChQxo8eLCSk5O1e/duLV26VJ999pnS09OveK7p06fr3nvv1e7du3XnnXcqJSVFJ06ckCRVV1erffv2ev/997V//35NnTpV//7v/65ly5Z57GPdunU6evSoNm7cqBdeeEHPPPOM7rrrLrVu3Vrbtm3TuHHj9Mgjj+i7776z/lkkJiYqODhYmzZt0ubNm9WqVSsNHjxYFRUVV3wsAGrBDQAX8Zvf/Mb9hz/8ocZtO3bscEtynzp1yu12u92ffvqpW5J7yZIl1prjx4+7AwMD3UuXLnW73W53amqq++GHH/bYz6ZNm9y+vr7uM2fOuN1utzs6Oto9d+7cy5pPknvKlCnW9dOnT7sluT/++OOL3ictLc2dnJxsXR8zZow7OjraXVVVZd3WqVMnd9++fa3rlZWV7pYtW7rfe+89t9vtdr/77rvuTp06uaurq6015eXl7sDAQPeaNWsua3YAvwy/Rg3gsuTn52vatGnatWuXfvjhB1VXV0uSioqKFBcXZ61zOBzWn8PCwtSpUyd98cUXkqRdu3Zp9+7dWrRokbXG7XarurpahYWFio2NrfVc8fHx1p9btmwpm83m8dbWggUL9Oabb6qoqEhnzpxRRUWFunfv7rGPLl26yNf3/1+QjoiI0I033mhd9/Pz0zXXXGPtd9euXTp48KCCg4M99nP27FkdOnSo1scAoPYIGAA/q6ysTImJiUpMTNSiRYvUtm1bFRUVKTExsVZvmZw+fVqPPPKIHn/88Qu2dejQ4Ypma968ucd1Hx8fK66WLFmiJ554QnPmzJHD4VBwcLCee+45bdu27Wf3can9nj59WgkJCR4hdl7btm2v6DgA1A4BA+Bnffnllzp+/LhmzZqlqKgoSdLOnTtrXLt161YrRn744Qd99dVX1isrPXv21P79+6/aL/Vu3rxZN998sx599FHrtrp4haRnz55aunSpwsPDZbPZfvH+ANQeJ/EC+FkdOnSQv7+/XnrpJX3zzTdavny5Zs6cWePaGTNmKDc3V3v37tXYsWPVpk0bDRs2TJI0efJkbdmyRenp6SooKNDXX3+tv/71r7/oJN5Luf7667Vz506tWbNGX331lZ5++mnt2LHjF+83JSVFbdq00dChQ7Vp0yYVFhZq/fr1evzxx60TfQHULwIGwM9q27atsrOz9f777ysuLk6zZs3S888/X+PaWbNm6Q9/+IMSEhLkdDq1YsUK+fv7S/rn+SobNmzQV199pb59+6pHjx6aOnWqIiMj62XuRx55RMOHD9d9992n3r176/jx4x6vxlypoKAgbdy4UR06dNDw4cMVGxur1NRUnT17lldkgKvEx+12u709BAAAQG3wCgwAADAOAQOgQVq0aJFatWpV46VLly7eHg+Al/EWEoAG6dSpUyouLq5xW/PmzRUdHX2VJwLQkBAwAADAOLyFBAAAjEPAAAAA4xAwAADAOAQMAAAwDgEDAACMQ8AAAADjEDAAAMA4BAwAADDO/wGCpGCtFmTYoAAAAABJRU5ErkJggg==",
- "text/plain": [
- "