-
Notifications
You must be signed in to change notification settings - Fork 470
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
训练结果中的bbox_mAP_s一直为 0.00 ? #555
Comments
The value is the same before and after calling 2024/12/22 05:27:46 - mmengine - INFO - Epoch(train) [5][7100/7393] base_lr: 2.0000e-05 lr: 1.8515e-05 eta: 23:53:38 time: 0.3311 data_time: 0.0008 memory: 10926 grad_norm: 338.3706 loss: 142.5298 loss_cls: 69.8546 loss_bbox: 42.1329 loss_dfl: 30.5423 |
2024/12/22 02:04:23 - mmengine - INFO - Config:
_backend_args = None
_multiscale_resize_transforms = [
dict(
transforms=[
dict(scale=(
640,
640,
), type='YOLOv5KeepRatioResize'),
dict(
allow_scale_up=False,
pad_val=dict(img=114),
scale=(
640,
640,
),
type='LetterResize'),
],
type='Compose'),
dict(
transforms=[
dict(scale=(
320,
320,
), type='YOLOv5KeepRatioResize'),
dict(
allow_scale_up=False,
pad_val=dict(img=114),
scale=(
320,
320,
),
type='LetterResize'),
],
type='Compose'),
dict(
transforms=[
dict(scale=(
960,
960,
), type='YOLOv5KeepRatioResize'),
dict(
allow_scale_up=False,
pad_val=dict(img=114),
scale=(
960,
960,
),
type='LetterResize'),
],
type='Compose'),
]
affine_scale = 0.9
albu_train_transforms = [
dict(p=0.01, type='Blur'),
dict(p=0.01, type='MedianBlur'),
dict(p=0.01, type='ToGray'),
dict(p=0.01, type='CLAHE'),
]
backend_args = None
base_lr = 2e-05
batch_shapes_cfg = None
classnames = [
'person',
'bicycle',
'car',
'motorcycle',
'airplane',
'bus',
'train',
'truck',
'boat',
'traffic light',
'fire hydrant',
'stop sign',
'parking meter',
'bench',
'bird',
'cat',
'dog',
'horse',
'sheep',
'cow',
'elephant',
'bear',
'zebra',
'giraffe',
'backpack',
'umbrella',
'handbag',
'tie',
'suitcase',
'frisbee',
'skis',
'snowboard',
'sports ball',
'kite',
'baseball bat',
'baseball glove',
'skateboard',
'surfboard',
'tennis racket',
'bottle',
'wine glass',
'cup',
'fork',
'knife',
'spoon',
'bowl',
'banana',
'apple',
'sandwich',
'orange',
'broccoli',
'carrot',
'hot dog',
'pizza',
'donut',
'cake',
'chair',
'couch',
'potted plant',
'bed',
'dining table',
'toilet',
'TV',
'laptop',
'mouse',
'remote',
'keyboard',
'cell phone',
'microwave',
'oven',
'toaster',
'sink',
'refrigerator',
'book',
'clock',
'vase',
'scissors',
'teddy bear',
'hair drier',
'toothbrush',
]
close_mosaic_epochs = 30
coco_train_dataset = dict(
delete=True,
class_text_path='data/texts/coco_class_texts.json',
dataset=dict(
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='images/train2017/'),
data_root='/home/wrf/Dara/coco2017label/coco/',
filter_cfg=dict(filter_empty_gt=False, min_size=32),
type='YOLOv5CocoDataset'),
pipeline=[
dict(backend_args=None, type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
img_scale=(
640,
640,
),
pad_val=114.0,
pre_transform=[
dict(backend_args=None, type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
],
type='MultiModalMosaic'),
dict(
border=(
-320,
-320,
),
border_val=(
114,
114,
114,
),
max_aspect_ratio=100.0,
max_rotate_degree=0.0,
max_shear_degree=0.0,
scaling_ratio_range=(
0.09999999999999998,
1.9,
),
type='YOLOv5RandomAffine'),
dict(
pre_transform=[
dict(backend_args=None, type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
img_scale=(
640,
640,
),
pad_val=114.0,
pre_transform=[
dict(backend_args=None, type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
],
type='MultiModalMosaic'),
dict(
border=(
-320,
-320,
),
border_val=(
114,
114,
114,
),
max_aspect_ratio=100.0,
max_rotate_degree=0.0,
max_shear_degree=0.0,
scaling_ratio_range=(
0.09999999999999998,
1.9,
),
type='YOLOv5RandomAffine'),
],
prob=0.15,
type='YOLOv5MultiModalMixUp'),
dict(
bbox_params=dict(
format='pascal_voc',
label_fields=[
'gt_bboxes_labels',
'gt_ignore_flags',
],
type='BboxParams'),
keymap=dict(gt_bboxes='bboxes', img='image'),
transforms=[
dict(p=0.01, type='Blur'),
dict(p=0.01, type='MedianBlur'),
dict(p=0.01, type='ToGray'),
dict(p=0.01, type='CLAHE'),
],
type='mmdet.Albu'),
dict(type='YOLOv5HSVRandomAug'),
dict(prob=0.5, type='mmdet.RandomFlip'),
dict(
max_num_samples=80,
num_neg_samples=(
80,
80,
),
padding_to_max=True,
padding_value='',
type='RandomLoadText'),
dict(
meta_keys=(
'img_id',
'img_path',
'ori_shape',
'img_shape',
'flip',
'flip_direction',
'texts',
),
type='mmdet.PackDetInputs'),
],
type='MultiModalDataset')
coco_val_dataset = dict(
delete=True,
class_text_path='data/texts/coco_class_texts.json',
dataset=dict(
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='images/val2017/'),
data_root='/home/wrf/Dara/coco2017label/coco/',
filter_cfg=dict(filter_empty_gt=False, min_size=32),
type='YOLOv5CocoDataset'),
pipeline=[
dict(backend_args=None, type='LoadImageFromFile'),
dict(scale=(
640,
640,
), type='YOLOv5KeepRatioResize'),
dict(
allow_scale_up=False,
pad_val=dict(img=114),
scale=(
640,
640,
),
type='LetterResize'),
dict(scope='mmdet', type='LoadAnnotations', with_bbox=True),
dict(type='LoadText'),
dict(
meta_keys=(
'img_id',
'img_path',
'ori_shape',
'img_shape',
'scale_factor',
'pad_param',
'texts',
),
type='mmdet.PackDetInputs'),
],
type='MultiModalDataset')
custom_hooks = [
dict(
ema_type='ExpMomentumEMA',
momentum=0.0001,
priority=49,
strict_load=False,
type='EMAHook',
update_buffers=True),
dict(
switch_epoch=10,
switch_pipeline=[
dict(backend_args=None, type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(scale=(
640,
640,
), type='YOLOv5KeepRatioResize'),
dict(
allow_scale_up=True,
pad_val=dict(img=114.0),
scale=(
640,
640,
),
type='LetterResize'),
dict(
border_val=(
114,
114,
114,
),
max_aspect_ratio=100,
max_rotate_degree=0.0,
max_shear_degree=0.0,
scaling_ratio_range=(
0.09999999999999998,
1.9,
),
type='YOLOv5RandomAffine'),
dict(
bbox_params=dict(
format='pascal_voc',
label_fields=[
'gt_bboxes_labels',
'gt_ignore_flags',
],
type='BboxParams'),
keymap=dict(gt_bboxes='bboxes', img='image'),
transforms=[
dict(p=0.01, type='Blur'),
dict(p=0.01, type='MedianBlur'),
dict(p=0.01, type='ToGray'),
dict(p=0.01, type='CLAHE'),
],
type='mmdet.Albu'),
dict(type='YOLOv5HSVRandomAug'),
dict(prob=0.5, type='mmdet.RandomFlip'),
dict(
max_num_samples=80,
num_neg_samples=(
80,
80,
),
padding_to_max=True,
padding_value='',
type='RandomLoadText'),
dict(
meta_keys=(
'img_id',
'img_path',
'ori_shape',
'img_shape',
'flip',
'flip_direction',
'texts',
),
type='mmdet.PackDetInputs'),
],
type='mmdet.PipelineSwitchHook'),
]
custom_imports = dict(
allow_failed_imports=False, imports=[
'yolo_world',
])
data_root = 'data/coco/'
dataset_type = 'YOLOv5CocoDataset'
deepen_factor = 1.0
default_hooks = dict(
checkpoint=dict(
interval=5, max_keep_ckpts=-1, save_best=None, type='CheckpointHook'),
logger=dict(interval=50, type='LoggerHook'),
param_scheduler=dict(
lr_factor=0.01,
max_epochs=40,
scheduler_type='linear',
type='YOLOv5ParamSchedulerHook'),
sampler_seed=dict(type='DistSamplerSeedHook'),
timer=dict(type='IterTimerHook'),
visualization=dict(type='mmdet.DetVisualizationHook'))
default_scope = 'mmyolo'
env_cfg = dict(
cudnn_benchmark=True,
dist_cfg=dict(backend='nccl'),
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0))
img_scale = (
640,
640,
)
img_scales = [
(
640,
640,
),
(
320,
320,
),
(
960,
960,
),
]
last_stage_out_channels = 512
last_transform = [
dict(
bbox_params=dict(
format='pascal_voc',
label_fields=[
'gt_bboxes_labels',
'gt_ignore_flags',
],
type='BboxParams'),
keymap=dict(gt_bboxes='bboxes', img='image'),
transforms=[
dict(p=0.01, type='Blur'),
dict(p=0.01, type='MedianBlur'),
dict(p=0.01, type='ToGray'),
dict(p=0.01, type='CLAHE'),
],
type='mmdet.Albu'),
dict(type='YOLOv5HSVRandomAug'),
dict(prob=0.5, type='mmdet.RandomFlip'),
dict(
meta_keys=(
'img_id',
'img_path',
'ori_shape',
'img_shape',
'flip',
'flip_direction',
),
type='mmdet.PackDetInputs'),
]
launcher = 'none'
load_from = None
log_level = 'INFO'
log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50)
loss_bbox_weight = 7.5
loss_cls_weight = 0.5
loss_dfl_weight = 0.375
lr_factor = 0.01
max_aspect_ratio = 100
max_epochs = 40
max_keep_ckpts = 2
mixup_prob = 0.15
model = dict(
backbone=dict(
image_model=dict(
act_cfg=dict(inplace=True, type='SiLU'),
arch='P5',
deepen_factor=1.0,
last_stage_out_channels=512,
norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'),
type='YOLOv8CSPDarknet',
widen_factor=1.0),
text_model=dict(
classnames=[
'person',
'bicycle',
'car',
'motorcycle',
'airplane',
'bus',
'train',
'truck',
'boat',
'traffic light',
'fire hydrant',
'stop sign',
'parking meter',
'bench',
'bird',
'cat',
'dog',
'horse',
'sheep',
'cow',
'elephant',
'bear',
'zebra',
'giraffe',
'backpack',
'umbrella',
'handbag',
'tie',
'suitcase',
'frisbee',
'skis',
'snowboard',
'sports ball',
'kite',
'baseball bat',
'baseball glove',
'skateboard',
'surfboard',
'tennis racket',
'bottle',
'wine glass',
'cup',
'fork',
'knife',
'spoon',
'bowl',
'banana',
'apple',
'sandwich',
'orange',
'broccoli',
'carrot',
'hot dog',
'pizza',
'donut',
'cake',
'chair',
'couch',
'potted plant',
'bed',
'dining table',
'toilet',
'TV',
'laptop',
'mouse',
'remote',
'keyboard',
'cell phone',
'microwave',
'oven',
'toaster',
'sink',
'refrigerator',
'book',
'clock',
'vase',
'scissors',
'teddy bear',
'hair drier',
'toothbrush',
],
frozen_modules=[
'all',
],
model_name='pretrained/clip-vit-base-patch32',
type='HuggingCLIPCocoOpLanguageBackbone'),
type='MultiModalYOLOBackbone'),
bbox_head=dict(
bbox_coder=dict(type='DistancePointBBoxCoder'),
head_module=dict(
act_cfg=dict(inplace=True, type='SiLU'),
embed_dims=512,
featmap_strides=[
8,
16,
32,
],
in_channels=[
256,
512,
512,
],
norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'),
num_classes=80,
reg_max=16,
type='YOLOWorldHeadModule',
use_bn_head=True,
widen_factor=1.0),
loss_bbox=dict(
bbox_format='xyxy',
iou_mode='ciou',
loss_weight=7.5,
reduction='sum',
return_iou=False,
type='IoULoss'),
loss_cls=dict(
loss_weight=0.5,
reduction='none',
type='mmdet.CrossEntropyLoss',
use_sigmoid=True),
loss_dfl=dict(
loss_weight=0.375,
reduction='mean',
type='mmdet.DistributionFocalLoss'),
prior_generator=dict(
offset=0.5, strides=[
8,
16,
32,
], type='mmdet.MlvlPointGenerator'),
type='YOLOWorldHead'),
data_preprocessor=dict(
bgr_to_rgb=True,
mean=[
0.0,
0.0,
0.0,
],
std=[
255.0,
255.0,
255.0,
],
type='YOLOWDetDataPreprocessor'),
mm_neck=True,
neck=dict(
act_cfg=dict(inplace=True, type='SiLU'),
block_cfg=dict(type='MaxSigmoidCSPLayerWithTwoConv'),
deepen_factor=1.0,
embed_channels=[
128,
256,
256,
],
guide_channels=512,
in_channels=[
256,
512,
512,
],
norm_cfg=dict(eps=0.001, momentum=0.03, type='BN'),
num_csp_blocks=3,
num_heads=[
4,
8,
8,
],
out_channels=[
256,
512,
512,
],
type='YOLOWorldPAFPN',
widen_factor=1.0),
num_test_classes=80,
num_train_classes=80,
test_cfg=dict(
max_per_img=300,
multi_label=True,
nms=dict(iou_threshold=0.7, type='nms'),
nms_pre=30000,
score_thr=0.001),
train_cfg=dict(
assigner=dict(
alpha=0.5,
beta=6.0,
eps=1e-09,
num_classes=80,
topk=10,
type='BatchTaskAlignedAssigner',
use_ciou=True)),
type='YOLOWorldDetector')
model_test_cfg = dict(
max_per_img=300,
multi_label=True,
nms=dict(iou_threshold=0.7, type='nms'),
nms_pre=30000,
score_thr=0.001)
mosaic_affine_transform = [
dict(
img_scale=(
640,
640,
),
pad_val=114.0,
pre_transform=[
dict(backend_args=None, type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
],
type='MultiModalMosaic'),
dict(
border=(
-320,
-320,
),
border_val=(
114,
114,
114,
),
max_aspect_ratio=100.0,
max_rotate_degree=0.0,
max_shear_degree=0.0,
scaling_ratio_range=(
0.09999999999999998,
1.9,
),
type='YOLOv5RandomAffine'),
]
neck_embed_channels = [
128,
256,
256,
]
neck_num_heads = [
4,
8,
8,
]
norm_cfg = dict(eps=0.001, momentum=0.03, type='BN')
num_classes = 80
num_det_layers = 3
num_training_classes = 80
optim_wrapper = dict(
clip_grad=dict(max_norm=10.0),
constructor='YOLOWv5OptimizerConstructor',
loss_scale='dynamic',
optimizer=dict(
batch_size_per_gpu=16, lr=2e-05, type='AdamW', weight_decay=0.05),
paramwise_cfg=dict(
custom_keys=dict({
'backbone.text_model': dict(lr_mult=0.01),
'logit_scale': dict(weight_decay=0.0)
})),
type='AmpOptimWrapper')
param_scheduler = None
persistent_workers = False
pre_transform = [
dict(backend_args=None, type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
]
resume = True
save_epoch_intervals = 5
strides = [
8,
16,
32,
]
tal_alpha = 0.5
tal_beta = 6.0
tal_topk = 10
test_cfg = dict(type='TestLoop')
test_dataloader = dict(
batch_size=1,
dataset=dict(
class_text_path='data/texts/coco_class_texts.json',
dataset=dict(
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='images/val2017/'),
data_root='/home/wrf/Dara/coco2017label/coco/',
filter_cfg=dict(filter_empty_gt=False, min_size=32),
type='YOLOv5CocoDataset'),
pipeline=[
dict(backend_args=None, type='LoadImageFromFile'),
dict(scale=(
640,
640,
), type='YOLOv5KeepRatioResize'),
dict(
allow_scale_up=False,
pad_val=dict(img=114),
scale=(
640,
640,
),
type='LetterResize'),
dict(scope='mmdet', type='LoadAnnotations', with_bbox=True),
dict(type='LoadText'),
dict(
meta_keys=(
'img_id',
'img_path',
'ori_shape',
'img_shape',
'scale_factor',
'pad_param',
'texts',
),
type='mmdet.PackDetInputs'),
],
type='MultiModalDataset'),
drop_last=False,
num_workers=2,
persistent_workers=True,
pin_memory=True,
sampler=dict(shuffle=False, type='DefaultSampler'))
test_evaluator = dict(
ann_file='data/coco/annotations/instances_val2017.json',
metric='bbox',
proposal_nums=(
100,
1,
10,
),
type='mmdet.CocoMetric')
test_pipeline = [
dict(backend_args=None, type='LoadImageFromFile'),
dict(scale=(
640,
640,
), type='YOLOv5KeepRatioResize'),
dict(
allow_scale_up=False,
pad_val=dict(img=114),
scale=(
640,
640,
),
type='LetterResize'),
dict(scope='mmdet', type='LoadAnnotations', with_bbox=True),
dict(type='LoadText'),
dict(
meta_keys=(
'img_id',
'img_path',
'ori_shape',
'img_shape',
'scale_factor',
'pad_param',
'texts',
),
type='mmdet.PackDetInputs'),
]
text_channels = 512
text_model_name = 'pretrained/clip-vit-base-patch32'
text_transform = [
dict(
max_num_samples=80,
num_neg_samples=(
80,
80,
),
padding_to_max=True,
padding_value='',
type='RandomLoadText'),
dict(
meta_keys=(
'img_id',
'img_path',
'ori_shape',
'img_shape',
'flip',
'flip_direction',
'texts',
),
type='mmdet.PackDetInputs'),
]
train_ann_file = 'annotations/instances_train2017.json'
train_batch_size_per_gpu = 16
train_cfg = dict(
dynamic_intervals=[
(
10,
1,
),
],
max_epochs=40,
type='EpochBasedTrainLoop',
val_interval=5)
train_data_prefix = 'train2017/'
train_dataloader = dict(
batch_size=16,
collate_fn=dict(type='yolow_collate'),
dataset=dict(
class_text_path='data/texts/coco_class_texts.json',
dataset=dict(
ann_file='annotations/instances_train2017.json',
data_prefix=dict(img='images/train2017/'),
data_root='/home/wrf/Dara/coco2017label/coco/',
filter_cfg=dict(filter_empty_gt=False, min_size=32),
type='YOLOv5CocoDataset'),
pipeline=[
dict(backend_args=None, type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
img_scale=(
640,
640,
),
pad_val=114.0,
pre_transform=[
dict(backend_args=None, type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
],
type='MultiModalMosaic'),
dict(
border=(
-320,
-320,
),
border_val=(
114,
114,
114,
),
max_aspect_ratio=100.0,
max_rotate_degree=0.0,
max_shear_degree=0.0,
scaling_ratio_range=(
0.09999999999999998,
1.9,
),
type='YOLOv5RandomAffine'),
dict(
pre_transform=[
dict(backend_args=None, type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
img_scale=(
640,
640,
),
pad_val=114.0,
pre_transform=[
dict(backend_args=None, type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
],
type='MultiModalMosaic'),
dict(
border=(
-320,
-320,
),
border_val=(
114,
114,
114,
),
max_aspect_ratio=100.0,
max_rotate_degree=0.0,
max_shear_degree=0.0,
scaling_ratio_range=(
0.09999999999999998,
1.9,
),
type='YOLOv5RandomAffine'),
],
prob=0.15,
type='YOLOv5MultiModalMixUp'),
dict(
bbox_params=dict(
format='pascal_voc',
label_fields=[
'gt_bboxes_labels',
'gt_ignore_flags',
],
type='BboxParams'),
keymap=dict(gt_bboxes='bboxes', img='image'),
transforms=[
dict(p=0.01, type='Blur'),
dict(p=0.01, type='MedianBlur'),
dict(p=0.01, type='ToGray'),
dict(p=0.01, type='CLAHE'),
],
type='mmdet.Albu'),
dict(type='YOLOv5HSVRandomAug'),
dict(prob=0.5, type='mmdet.RandomFlip'),
dict(
max_num_samples=80,
num_neg_samples=(
80,
80,
),
padding_to_max=True,
padding_value='',
type='RandomLoadText'),
dict(
meta_keys=(
'img_id',
'img_path',
'ori_shape',
'img_shape',
'flip',
'flip_direction',
'texts',
),
type='mmdet.PackDetInputs'),
],
type='MultiModalDataset'),
num_workers=8,
persistent_workers=False,
pin_memory=True,
sampler=dict(shuffle=True, type='DefaultSampler'))
train_num_workers = 8
train_pipeline = [
dict(backend_args=None, type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
img_scale=(
640,
640,
),
pad_val=114.0,
pre_transform=[
dict(backend_args=None, type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
],
type='MultiModalMosaic'),
dict(
border=(
-320,
-320,
),
border_val=(
114,
114,
114,
),
max_aspect_ratio=100.0,
max_rotate_degree=0.0,
max_shear_degree=0.0,
scaling_ratio_range=(
0.09999999999999998,
1.9,
),
type='YOLOv5RandomAffine'),
dict(
pre_transform=[
dict(backend_args=None, type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(
img_scale=(
640,
640,
),
pad_val=114.0,
pre_transform=[
dict(backend_args=None, type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
],
type='MultiModalMosaic'),
dict(
border=(
-320,
-320,
),
border_val=(
114,
114,
114,
),
max_aspect_ratio=100.0,
max_rotate_degree=0.0,
max_shear_degree=0.0,
scaling_ratio_range=(
0.09999999999999998,
1.9,
),
type='YOLOv5RandomAffine'),
],
prob=0.15,
type='YOLOv5MultiModalMixUp'),
dict(
bbox_params=dict(
format='pascal_voc',
label_fields=[
'gt_bboxes_labels',
'gt_ignore_flags',
],
type='BboxParams'),
keymap=dict(gt_bboxes='bboxes', img='image'),
transforms=[
dict(p=0.01, type='Blur'),
dict(p=0.01, type='MedianBlur'),
dict(p=0.01, type='ToGray'),
dict(p=0.01, type='CLAHE'),
],
type='mmdet.Albu'),
dict(type='YOLOv5HSVRandomAug'),
dict(prob=0.5, type='mmdet.RandomFlip'),
dict(
max_num_samples=80,
num_neg_samples=(
80,
80,
),
padding_to_max=True,
padding_value='',
type='RandomLoadText'),
dict(
meta_keys=(
'img_id',
'img_path',
'ori_shape',
'img_shape',
'flip',
'flip_direction',
'texts',
),
type='mmdet.PackDetInputs'),
]
train_pipeline_stage2 = [
dict(backend_args=None, type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(scale=(
640,
640,
), type='YOLOv5KeepRatioResize'),
dict(
allow_scale_up=True,
pad_val=dict(img=114.0),
scale=(
640,
640,
),
type='LetterResize'),
dict(
border_val=(
114,
114,
114,
),
max_aspect_ratio=100,
max_rotate_degree=0.0,
max_shear_degree=0.0,
scaling_ratio_range=(
0.09999999999999998,
1.9,
),
type='YOLOv5RandomAffine'),
dict(
bbox_params=dict(
format='pascal_voc',
label_fields=[
'gt_bboxes_labels',
'gt_ignore_flags',
],
type='BboxParams'),
keymap=dict(gt_bboxes='bboxes', img='image'),
transforms=[
dict(p=0.01, type='Blur'),
dict(p=0.01, type='MedianBlur'),
dict(p=0.01, type='ToGray'),
dict(p=0.01, type='CLAHE'),
],
type='mmdet.Albu'),
dict(type='YOLOv5HSVRandomAug'),
dict(prob=0.5, type='mmdet.RandomFlip'),
dict(
max_num_samples=80,
num_neg_samples=(
80,
80,
),
padding_to_max=True,
padding_value='',
type='RandomLoadText'),
dict(
meta_keys=(
'img_id',
'img_path',
'ori_shape',
'img_shape',
'flip',
'flip_direction',
'texts',
),
type='mmdet.PackDetInputs'),
]
tta_model = dict(
tta_cfg=dict(max_per_img=300, nms=dict(iou_threshold=0.65, type='nms')),
type='mmdet.DetTTAModel')
tta_pipeline = [
dict(backend_args=None, type='LoadImageFromFile'),
dict(
transforms=[
[
dict(
transforms=[
dict(scale=(
640,
640,
), type='YOLOv5KeepRatioResize'),
dict(
allow_scale_up=False,
pad_val=dict(img=114),
scale=(
640,
640,
),
type='LetterResize'),
],
type='Compose'),
dict(
transforms=[
dict(scale=(
320,
320,
), type='YOLOv5KeepRatioResize'),
dict(
allow_scale_up=False,
pad_val=dict(img=114),
scale=(
320,
320,
),
type='LetterResize'),
],
type='Compose'),
dict(
transforms=[
dict(scale=(
960,
960,
), type='YOLOv5KeepRatioResize'),
dict(
allow_scale_up=False,
pad_val=dict(img=114),
scale=(
960,
960,
),
type='LetterResize'),
],
type='Compose'),
],
[
dict(prob=1.0, type='mmdet.RandomFlip'),
dict(prob=0.0, type='mmdet.RandomFlip'),
],
[
dict(type='mmdet.LoadAnnotations', with_bbox=True),
],
[
dict(
meta_keys=(
'img_id',
'img_path',
'ori_shape',
'img_shape',
'scale_factor',
'pad_param',
'flip',
'flip_direction',
),
type='mmdet.PackDetInputs'),
],
],
type='TestTimeAug'),
]
val_ann_file = 'annotations/instances_val2017.json'
val_batch_size_per_gpu = 1
val_cfg = dict(type='ValLoop')
val_data_prefix = 'val2017/'
val_dataloader = dict(
batch_size=1,
dataset=dict(
class_text_path='data/texts/coco_class_texts.json',
dataset=dict(
ann_file='annotations/instances_val2017.json',
data_prefix=dict(img='images/val2017/'),
data_root='/home/wrf/Dara/coco2017label/coco/',
filter_cfg=dict(filter_empty_gt=False, min_size=32),
type='YOLOv5CocoDataset'),
pipeline=[
dict(backend_args=None, type='LoadImageFromFile'),
dict(scale=(
640,
640,
), type='YOLOv5KeepRatioResize'),
dict(
allow_scale_up=False,
pad_val=dict(img=114),
scale=(
640,
640,
),
type='LetterResize'),
dict(scope='mmdet', type='LoadAnnotations', with_bbox=True),
dict(type='LoadText'),
dict(
meta_keys=(
'img_id',
'img_path',
'ori_shape',
'img_shape',
'scale_factor',
'pad_param',
'texts',
),
type='mmdet.PackDetInputs'),
],
type='MultiModalDataset'),
drop_last=False,
num_workers=2,
persistent_workers=True,
pin_memory=True,
sampler=dict(shuffle=False, type='DefaultSampler'))
val_evaluator = dict(
ann_file=
'/home/wrf/Dara/coco2017label/coco/annotations/instances_val2017.json',
metric='bbox',
proposal_nums=(
100,
1,
10,
),
type='mmdet.CocoMetric')
val_interval_stage2 = 1
val_num_workers = 2
vis_backends = [
dict(type='LocalVisBackend'),
]
visualizer = dict(
name='visualizer',
type='mmdet.DetLocalVisualizer',
vis_backends=[
dict(type='LocalVisBackend'),
])
weight_decay = 0.05
widen_factor = 1.0
work_dir = 'log'
2024/12/22 02:04:26 - mmengine - INFO - Distributed training is not used, all SyncBatchNorm (SyncBN) layers in the model will be automatically reverted to BatchNormXd layers if they are used.
2024/12/22 02:04:26 - mmengine - INFO - Hooks will be executed in the following order:
before_run:
(VERY_HIGH ) RuntimeInfoHook
(49 ) EMAHook
(BELOW_NORMAL) LoggerHook
after_load_checkpoint:
(49 ) EMAHook
before_train:
(9 ) YOLOv5ParamSchedulerHook
(VERY_HIGH ) RuntimeInfoHook
(49 ) EMAHook
(NORMAL ) IterTimerHook
(VERY_LOW ) CheckpointHook
before_train_epoch:
(VERY_HIGH ) RuntimeInfoHook
(NORMAL ) IterTimerHook
(NORMAL ) DistSamplerSeedHook
(NORMAL ) PipelineSwitchHook
before_train_iter:
(9 ) YOLOv5ParamSchedulerHook
(VERY_HIGH ) RuntimeInfoHook
(NORMAL ) IterTimerHook
after_train_iter:
(9 ) YOLOv5ParamSchedulerHook
(VERY_HIGH ) RuntimeInfoHook
(49 ) EMAHook
(NORMAL ) IterTimerHook
(BELOW_NORMAL) LoggerHook
(VERY_LOW ) CheckpointHook
after_train_epoch:
(9 ) YOLOv5ParamSchedulerHook
(NORMAL ) IterTimerHook
(VERY_LOW ) CheckpointHook
before_val:
(VERY_HIGH ) RuntimeInfoHook
before_val_epoch:
(49 ) EMAHook
(NORMAL ) IterTimerHook
before_val_iter:
(NORMAL ) IterTimerHook
after_val_iter:
(NORMAL ) IterTimerHook
(NORMAL ) DetVisualizationHook
(BELOW_NORMAL) LoggerHook
after_val_epoch:
(9 ) YOLOv5ParamSchedulerHook
(VERY_HIGH ) RuntimeInfoHook
(49 ) EMAHook
(NORMAL ) IterTimerHook
(BELOW_NORMAL) LoggerHook
(VERY_LOW ) CheckpointHook
after_val:
(VERY_HIGH ) RuntimeInfoHook
before_save_checkpoint:
(49 ) EMAHook
after_train:
(VERY_HIGH ) RuntimeInfoHook
(VERY_LOW ) CheckpointHook
before_test:
(VERY_HIGH ) RuntimeInfoHook
before_test_epoch:
(49 ) EMAHook
(NORMAL ) IterTimerHook
before_test_iter:
(NORMAL ) IterTimerHook
after_test_iter:
(NORMAL ) IterTimerHook
(NORMAL ) DetVisualizationHook
(BELOW_NORMAL) LoggerHook
after_test_epoch:
(VERY_HIGH ) RuntimeInfoHook
(49 ) EMAHook
(NORMAL ) IterTimerHook
(BELOW_NORMAL) LoggerHook
after_test:
(VERY_HIGH ) RuntimeInfoHook
after_run:
(BELOW_NORMAL) LoggerHook
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.context_to_text_projection.weight:lr=2.0000000000000002e-07
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.context_to_text_projection.weight:weight_decay=0.05
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.context_to_text_projection.weight:lr_mult=0.01
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.context_to_text_projection.bias:lr=2.0000000000000002e-07
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.context_to_text_projection.bias:weight_decay=0.05
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.context_to_text_projection.bias:lr_mult=0.01
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.prompt_learner.ctx:lr=2.0000000000000002e-07
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.prompt_learner.ctx:weight_decay=0.05
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.prompt_learner.ctx:lr_mult=0.01
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.prompt_learner.meta_net.0.weight:lr=2.0000000000000002e-07
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.prompt_learner.meta_net.0.weight:weight_decay=0.05
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.prompt_learner.meta_net.0.weight:lr_mult=0.01
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.prompt_learner.meta_net.0.bias:lr=2.0000000000000002e-07
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.prompt_learner.meta_net.0.bias:weight_decay=0.05
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.prompt_learner.meta_net.0.bias:lr_mult=0.01
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.prompt_learner.meta_net.2.weight:lr=2.0000000000000002e-07
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.prompt_learner.meta_net.2.weight:weight_decay=0.05
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.prompt_learner.meta_net.2.weight:lr_mult=0.01
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.prompt_learner.meta_net.2.bias:lr=2.0000000000000002e-07
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.prompt_learner.meta_net.2.bias:weight_decay=0.05
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- backbone.text_model.prompt_learner.meta_net.2.bias:lr_mult=0.01
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- bbox_head.head_module.cls_contrasts.0.logit_scale:lr=2e-05
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- bbox_head.head_module.cls_contrasts.0.logit_scale:weight_decay=0.0
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- bbox_head.head_module.cls_contrasts.1.logit_scale:lr=2e-05
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- bbox_head.head_module.cls_contrasts.1.logit_scale:weight_decay=0.0
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- bbox_head.head_module.cls_contrasts.2.logit_scale:lr=2e-05
2024/12/22 02:04:42 - mmengine - INFO - paramwise_options -- bbox_head.head_module.cls_contrasts.2.logit_scale:weight_decay=0.0
Name of parameter - Initialization information
backbone.image_model.stem.conv.weight - torch.Size([64, 3, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stem.bn.weight - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stem.bn.bias - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.0.conv.weight - torch.Size([128, 64, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage1.0.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.0.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.1.main_conv.conv.weight - torch.Size([128, 128, 1, 1]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage1.1.main_conv.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.1.main_conv.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.1.final_conv.conv.weight - torch.Size([128, 320, 1, 1]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage1.1.final_conv.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.1.final_conv.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.1.blocks.0.conv1.conv.weight - torch.Size([64, 64, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage1.1.blocks.0.conv1.bn.weight - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.1.blocks.0.conv1.bn.bias - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.1.blocks.0.conv2.conv.weight - torch.Size([64, 64, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage1.1.blocks.0.conv2.bn.weight - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.1.blocks.0.conv2.bn.bias - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.1.blocks.1.conv1.conv.weight - torch.Size([64, 64, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage1.1.blocks.1.conv1.bn.weight - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.1.blocks.1.conv1.bn.bias - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.1.blocks.1.conv2.conv.weight - torch.Size([64, 64, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage1.1.blocks.1.conv2.bn.weight - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.1.blocks.1.conv2.bn.bias - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.1.blocks.2.conv1.conv.weight - torch.Size([64, 64, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage1.1.blocks.2.conv1.bn.weight - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.1.blocks.2.conv1.bn.bias - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.1.blocks.2.conv2.conv.weight - torch.Size([64, 64, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage1.1.blocks.2.conv2.bn.weight - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage1.1.blocks.2.conv2.bn.bias - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.0.conv.weight - torch.Size([256, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage2.0.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.0.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.main_conv.conv.weight - torch.Size([256, 256, 1, 1]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage2.1.main_conv.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.main_conv.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.final_conv.conv.weight - torch.Size([256, 1024, 1, 1]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage2.1.final_conv.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.final_conv.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.0.conv1.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage2.1.blocks.0.conv1.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.0.conv1.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.0.conv2.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage2.1.blocks.0.conv2.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.0.conv2.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.1.conv1.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage2.1.blocks.1.conv1.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.1.conv1.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.1.conv2.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage2.1.blocks.1.conv2.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.1.conv2.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.2.conv1.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage2.1.blocks.2.conv1.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.2.conv1.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.2.conv2.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage2.1.blocks.2.conv2.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.2.conv2.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.3.conv1.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage2.1.blocks.3.conv1.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.3.conv1.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.3.conv2.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage2.1.blocks.3.conv2.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.3.conv2.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.4.conv1.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage2.1.blocks.4.conv1.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.4.conv1.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.4.conv2.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage2.1.blocks.4.conv2.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.4.conv2.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.5.conv1.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage2.1.blocks.5.conv1.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.5.conv1.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.5.conv2.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage2.1.blocks.5.conv2.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage2.1.blocks.5.conv2.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.0.conv.weight - torch.Size([512, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage3.0.bn.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.0.bn.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.main_conv.conv.weight - torch.Size([512, 512, 1, 1]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage3.1.main_conv.bn.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.main_conv.bn.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.final_conv.conv.weight - torch.Size([512, 2048, 1, 1]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage3.1.final_conv.bn.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.final_conv.bn.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.0.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage3.1.blocks.0.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.0.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.0.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage3.1.blocks.0.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.0.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.1.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage3.1.blocks.1.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.1.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.1.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage3.1.blocks.1.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.1.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.2.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage3.1.blocks.2.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.2.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.2.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage3.1.blocks.2.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.2.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.3.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage3.1.blocks.3.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.3.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.3.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage3.1.blocks.3.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.3.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.4.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage3.1.blocks.4.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.4.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.4.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage3.1.blocks.4.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.4.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.5.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage3.1.blocks.5.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.5.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.5.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage3.1.blocks.5.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage3.1.blocks.5.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.0.conv.weight - torch.Size([512, 512, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage4.0.bn.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.0.bn.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.1.main_conv.conv.weight - torch.Size([512, 512, 1, 1]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage4.1.main_conv.bn.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.1.main_conv.bn.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.1.final_conv.conv.weight - torch.Size([512, 1280, 1, 1]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage4.1.final_conv.bn.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.1.final_conv.bn.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.1.blocks.0.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage4.1.blocks.0.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.1.blocks.0.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.1.blocks.0.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage4.1.blocks.0.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.1.blocks.0.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.1.blocks.1.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage4.1.blocks.1.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.1.blocks.1.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.1.blocks.1.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage4.1.blocks.1.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.1.blocks.1.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.1.blocks.2.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage4.1.blocks.2.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.1.blocks.2.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.1.blocks.2.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage4.1.blocks.2.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.1.blocks.2.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.2.conv1.conv.weight - torch.Size([256, 512, 1, 1]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage4.2.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.2.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.2.conv2.conv.weight - torch.Size([512, 1024, 1, 1]):
Initialized by user-defined
init_weights
in YOLOv8CSPDarknetbackbone.image_model.stage4.2.conv2.bn.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.image_model.stage4.2.conv2.bn.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.embeddings.token_embedding.weight - torch.Size([49408, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.embeddings.position_embedding.weight - torch.Size([77, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.0.self_attn.k_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.0.self_attn.k_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.0.self_attn.v_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.0.self_attn.v_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.0.self_attn.q_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.0.self_attn.q_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.0.self_attn.out_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.0.self_attn.out_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.0.layer_norm1.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.0.layer_norm1.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.0.mlp.fc1.weight - torch.Size([2048, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.0.mlp.fc1.bias - torch.Size([2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.0.mlp.fc2.weight - torch.Size([512, 2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.0.mlp.fc2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.0.layer_norm2.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.0.layer_norm2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.1.self_attn.k_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.1.self_attn.k_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.1.self_attn.v_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.1.self_attn.v_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.1.self_attn.q_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.1.self_attn.q_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.1.self_attn.out_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.1.self_attn.out_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.1.layer_norm1.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.1.layer_norm1.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.1.mlp.fc1.weight - torch.Size([2048, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.1.mlp.fc1.bias - torch.Size([2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.1.mlp.fc2.weight - torch.Size([512, 2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.1.mlp.fc2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.1.layer_norm2.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.1.layer_norm2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.2.self_attn.k_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.2.self_attn.k_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.2.self_attn.v_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.2.self_attn.v_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.2.self_attn.q_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.2.self_attn.q_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.2.self_attn.out_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.2.self_attn.out_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.2.layer_norm1.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.2.layer_norm1.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.2.mlp.fc1.weight - torch.Size([2048, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.2.mlp.fc1.bias - torch.Size([2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.2.mlp.fc2.weight - torch.Size([512, 2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.2.mlp.fc2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.2.layer_norm2.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.2.layer_norm2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.3.self_attn.k_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.3.self_attn.k_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.3.self_attn.v_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.3.self_attn.v_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.3.self_attn.q_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.3.self_attn.q_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.3.self_attn.out_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.3.self_attn.out_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.3.layer_norm1.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.3.layer_norm1.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.3.mlp.fc1.weight - torch.Size([2048, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.3.mlp.fc1.bias - torch.Size([2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.3.mlp.fc2.weight - torch.Size([512, 2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.3.mlp.fc2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.3.layer_norm2.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.3.layer_norm2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.4.self_attn.k_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.4.self_attn.k_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.4.self_attn.v_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.4.self_attn.v_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.4.self_attn.q_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.4.self_attn.q_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.4.self_attn.out_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.4.self_attn.out_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.4.layer_norm1.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.4.layer_norm1.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.4.mlp.fc1.weight - torch.Size([2048, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.4.mlp.fc1.bias - torch.Size([2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.4.mlp.fc2.weight - torch.Size([512, 2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.4.mlp.fc2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.4.layer_norm2.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.4.layer_norm2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.5.self_attn.k_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.5.self_attn.k_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.5.self_attn.v_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.5.self_attn.v_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.5.self_attn.q_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.5.self_attn.q_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.5.self_attn.out_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.5.self_attn.out_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.5.layer_norm1.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.5.layer_norm1.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.5.mlp.fc1.weight - torch.Size([2048, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.5.mlp.fc1.bias - torch.Size([2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.5.mlp.fc2.weight - torch.Size([512, 2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.5.mlp.fc2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.5.layer_norm2.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.5.layer_norm2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.6.self_attn.k_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.6.self_attn.k_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.6.self_attn.v_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.6.self_attn.v_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.6.self_attn.q_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.6.self_attn.q_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.6.self_attn.out_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.6.self_attn.out_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.6.layer_norm1.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.6.layer_norm1.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.6.mlp.fc1.weight - torch.Size([2048, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.6.mlp.fc1.bias - torch.Size([2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.6.mlp.fc2.weight - torch.Size([512, 2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.6.mlp.fc2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.6.layer_norm2.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.6.layer_norm2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.7.self_attn.k_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.7.self_attn.k_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.7.self_attn.v_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.7.self_attn.v_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.7.self_attn.q_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.7.self_attn.q_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.7.self_attn.out_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.7.self_attn.out_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.7.layer_norm1.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.7.layer_norm1.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.7.mlp.fc1.weight - torch.Size([2048, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.7.mlp.fc1.bias - torch.Size([2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.7.mlp.fc2.weight - torch.Size([512, 2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.7.mlp.fc2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.7.layer_norm2.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.7.layer_norm2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.8.self_attn.k_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.8.self_attn.k_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.8.self_attn.v_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.8.self_attn.v_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.8.self_attn.q_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.8.self_attn.q_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.8.self_attn.out_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.8.self_attn.out_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.8.layer_norm1.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.8.layer_norm1.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.8.mlp.fc1.weight - torch.Size([2048, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.8.mlp.fc1.bias - torch.Size([2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.8.mlp.fc2.weight - torch.Size([512, 2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.8.mlp.fc2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.8.layer_norm2.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.8.layer_norm2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.9.self_attn.k_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.9.self_attn.k_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.9.self_attn.v_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.9.self_attn.v_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.9.self_attn.q_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.9.self_attn.q_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.9.self_attn.out_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.9.self_attn.out_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.9.layer_norm1.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.9.layer_norm1.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.9.mlp.fc1.weight - torch.Size([2048, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.9.mlp.fc1.bias - torch.Size([2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.9.mlp.fc2.weight - torch.Size([512, 2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.9.mlp.fc2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.9.layer_norm2.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.9.layer_norm2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.10.self_attn.k_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.10.self_attn.k_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.10.self_attn.v_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.10.self_attn.v_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.10.self_attn.q_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.10.self_attn.q_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.10.self_attn.out_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.10.self_attn.out_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.10.layer_norm1.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.10.layer_norm1.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.10.mlp.fc1.weight - torch.Size([2048, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.10.mlp.fc1.bias - torch.Size([2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.10.mlp.fc2.weight - torch.Size([512, 2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.10.mlp.fc2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.10.layer_norm2.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.10.layer_norm2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.11.self_attn.k_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.11.self_attn.k_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.11.self_attn.v_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.11.self_attn.v_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.11.self_attn.q_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.11.self_attn.q_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.11.self_attn.out_proj.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.11.self_attn.out_proj.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.11.layer_norm1.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.11.layer_norm1.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.11.mlp.fc1.weight - torch.Size([2048, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.11.mlp.fc1.bias - torch.Size([2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.11.mlp.fc2.weight - torch.Size([512, 2048]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.11.mlp.fc2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.11.layer_norm2.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.encoder.layers.11.layer_norm2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.final_layer_norm.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_model.final_layer_norm.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.model.text_projection.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.context_to_text_projection.weight - torch.Size([512, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.context_to_text_projection.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.prompt_learner.ctx - torch.Size([16, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.prompt_learner.meta_net.0.weight - torch.Size([256, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.prompt_learner.meta_net.0.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.prompt_learner.meta_net.2.weight - torch.Size([512, 256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbackbone.text_model.prompt_learner.meta_net.2.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.main_conv.conv.weight - torch.Size([512, 1024, 1, 1]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.0.main_conv.bn.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.main_conv.bn.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.final_conv.conv.weight - torch.Size([512, 1536, 1, 1]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.0.final_conv.bn.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.final_conv.bn.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.blocks.0.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.0.blocks.0.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.blocks.0.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.blocks.0.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.0.blocks.0.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.blocks.0.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.blocks.1.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.0.blocks.1.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.blocks.1.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.blocks.1.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.0.blocks.1.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.blocks.1.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.blocks.2.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.0.blocks.2.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.blocks.2.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.blocks.2.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.0.blocks.2.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.blocks.2.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.attn_block.bias - torch.Size([8]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.attn_block.guide_fc.weight - torch.Size([256, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.attn_block.guide_fc.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.attn_block.project_conv.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.0.attn_block.project_conv.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.0.attn_block.project_conv.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.main_conv.conv.weight - torch.Size([256, 768, 1, 1]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.1.main_conv.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.main_conv.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.final_conv.conv.weight - torch.Size([256, 768, 1, 1]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.1.final_conv.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.final_conv.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.blocks.0.conv1.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.1.blocks.0.conv1.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.blocks.0.conv1.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.blocks.0.conv2.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.1.blocks.0.conv2.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.blocks.0.conv2.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.blocks.1.conv1.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.1.blocks.1.conv1.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.blocks.1.conv1.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.blocks.1.conv2.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.1.blocks.1.conv2.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.blocks.1.conv2.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.blocks.2.conv1.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.1.blocks.2.conv1.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.blocks.2.conv1.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.blocks.2.conv2.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.1.blocks.2.conv2.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.blocks.2.conv2.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.attn_block.bias - torch.Size([4]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.attn_block.guide_fc.weight - torch.Size([128, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.attn_block.guide_fc.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.attn_block.project_conv.conv.weight - torch.Size([128, 128, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.top_down_layers.1.attn_block.project_conv.bn.weight - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.top_down_layers.1.attn_block.project_conv.bn.bias - torch.Size([128]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.downsample_layers.0.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.downsample_layers.0.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.downsample_layers.0.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.downsample_layers.1.conv.weight - torch.Size([512, 512, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.downsample_layers.1.bn.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.downsample_layers.1.bn.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.main_conv.conv.weight - torch.Size([512, 768, 1, 1]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.0.main_conv.bn.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.main_conv.bn.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.final_conv.conv.weight - torch.Size([512, 1536, 1, 1]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.0.final_conv.bn.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.final_conv.bn.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.blocks.0.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.0.blocks.0.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.blocks.0.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.blocks.0.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.0.blocks.0.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.blocks.0.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.blocks.1.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.0.blocks.1.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.blocks.1.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.blocks.1.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.0.blocks.1.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.blocks.1.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.blocks.2.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.0.blocks.2.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.blocks.2.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.blocks.2.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.0.blocks.2.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.blocks.2.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.attn_block.bias - torch.Size([8]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.attn_block.guide_fc.weight - torch.Size([256, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.attn_block.guide_fc.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.attn_block.project_conv.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.0.attn_block.project_conv.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.0.attn_block.project_conv.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.main_conv.conv.weight - torch.Size([512, 1024, 1, 1]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.1.main_conv.bn.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.main_conv.bn.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.final_conv.conv.weight - torch.Size([512, 1536, 1, 1]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.1.final_conv.bn.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.final_conv.bn.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.blocks.0.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.1.blocks.0.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.blocks.0.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.blocks.0.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.1.blocks.0.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.blocks.0.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.blocks.1.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.1.blocks.1.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.blocks.1.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.blocks.1.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.1.blocks.1.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.blocks.1.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.blocks.2.conv1.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.1.blocks.2.conv1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.blocks.2.conv1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.blocks.2.conv2.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.1.blocks.2.conv2.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.blocks.2.conv2.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.attn_block.bias - torch.Size([8]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.attn_block.guide_fc.weight - torch.Size([256, 512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.attn_block.guide_fc.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.attn_block.project_conv.conv.weight - torch.Size([256, 256, 3, 3]):
Initialized by user-defined
init_weights
in YOLOWorldPAFPNneck.bottom_up_layers.1.attn_block.project_conv.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorneck.bottom_up_layers.1.attn_block.project_conv.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.0.0.conv.weight - torch.Size([256, 256, 3, 3]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.0.0.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.0.0.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.0.1.conv.weight - torch.Size([256, 256, 3, 3]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.0.1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.0.1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.0.2.weight - torch.Size([512, 256, 1, 1]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.0.2.bias - torch.Size([512]):
Initialized by user-defined
init_weights
in YOLOWorldHeadModulebbox_head.head_module.cls_preds.1.0.conv.weight - torch.Size([256, 512, 3, 3]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.1.0.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.1.0.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.1.1.conv.weight - torch.Size([256, 256, 3, 3]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.1.1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.1.1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.1.2.weight - torch.Size([512, 256, 1, 1]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.1.2.bias - torch.Size([512]):
Initialized by user-defined
init_weights
in YOLOWorldHeadModulebbox_head.head_module.cls_preds.2.0.conv.weight - torch.Size([256, 512, 3, 3]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.2.0.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.2.0.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.2.1.conv.weight - torch.Size([256, 256, 3, 3]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.2.1.bn.weight - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.2.1.bn.bias - torch.Size([256]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.2.2.weight - torch.Size([512, 256, 1, 1]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_preds.2.2.bias - torch.Size([512]):
Initialized by user-defined
init_weights
in YOLOWorldHeadModulebbox_head.head_module.reg_preds.0.0.conv.weight - torch.Size([64, 256, 3, 3]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.0.0.bn.weight - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.0.0.bn.bias - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.0.1.conv.weight - torch.Size([64, 64, 3, 3]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.0.1.bn.weight - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.0.1.bn.bias - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.0.2.weight - torch.Size([64, 64, 1, 1]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.0.2.bias - torch.Size([64]):
Initialized by user-defined
init_weights
in YOLOWorldHeadModulebbox_head.head_module.reg_preds.1.0.conv.weight - torch.Size([64, 512, 3, 3]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.1.0.bn.weight - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.1.0.bn.bias - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.1.1.conv.weight - torch.Size([64, 64, 3, 3]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.1.1.bn.weight - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.1.1.bn.bias - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.1.2.weight - torch.Size([64, 64, 1, 1]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.1.2.bias - torch.Size([64]):
Initialized by user-defined
init_weights
in YOLOWorldHeadModulebbox_head.head_module.reg_preds.2.0.conv.weight - torch.Size([64, 512, 3, 3]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.2.0.bn.weight - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.2.0.bn.bias - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.2.1.conv.weight - torch.Size([64, 64, 3, 3]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.2.1.bn.weight - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.2.1.bn.bias - torch.Size([64]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.2.2.weight - torch.Size([64, 64, 1, 1]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.reg_preds.2.2.bias - torch.Size([64]):
Initialized by user-defined
init_weights
in YOLOWorldHeadModulebbox_head.head_module.cls_contrasts.0.bias - torch.Size([]):
Initialized by user-defined
init_weights
in YOLOWorldHeadModulebbox_head.head_module.cls_contrasts.0.logit_scale - torch.Size([]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_contrasts.0.norm.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_contrasts.0.norm.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_contrasts.1.bias - torch.Size([]):
Initialized by user-defined
init_weights
in YOLOWorldHeadModulebbox_head.head_module.cls_contrasts.1.logit_scale - torch.Size([]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_contrasts.1.norm.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_contrasts.1.norm.bias - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_contrasts.2.bias - torch.Size([]):
Initialized by user-defined
init_weights
in YOLOWorldHeadModulebbox_head.head_module.cls_contrasts.2.logit_scale - torch.Size([]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_contrasts.2.norm.weight - torch.Size([512]):
The value is the same before and after calling
init_weights
of YOLOWorldDetectorbbox_head.head_module.cls_contrasts.2.norm.bias - torch.Size([512]):
The text was updated successfully, but these errors were encountered: