From 1167efd42da1667a71b43734d201dd83dcea13d0 Mon Sep 17 00:00:00 2001
From: zxy <zxy@B-X0GHLVDL-0414.local>
Date: Mon, 16 May 2022 17:13:03 +0800
Subject: [PATCH 01/69] try pr

---
 configs/detection/yolox/yolox_s_8xb16_300e_coco.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index 36310498..d1164698 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -179,4 +179,4 @@
         # dict(type='WandbLoggerHookV2'),
     ])
 
-export = dict(use_jit=False)
+export = dict(use_jit=True)

From 9988716216b0382e8422b575866db65a5ef751b4 Mon Sep 17 00:00:00 2001
From: zxy <zxy@B-X0GHLVDL-0414.local>
Date: Tue, 17 May 2022 14:24:27 +0800
Subject: [PATCH 02/69] add attention layer and more loss function

---
 .../yolox/yolox_s_8xb16_300e_coco.py          |   9 +-
 easycv/models/detection/yolox/ASFF.py         | 128 ++++++++++++
 easycv/models/detection/yolox/attention.py    |  89 +++++++++
 easycv/models/detection/yolox/yolo_head.py    |  35 +++-
 easycv/models/detection/yolox/yolo_pafpn.py   |  39 +++-
 easycv/models/detection/yolox/yolox.py        |   7 +-
 easycv/models/loss/__init__.py                |   1 +
 easycv/models/loss/focal_loss.py              | 188 ++++++++++++++++++
 easycv/models/loss/iou_loss.py                |  67 ++++++-
 tools/eval.py                                 |   4 +-
 10 files changed, 554 insertions(+), 13 deletions(-)
 create mode 100644 easycv/models/detection/yolox/ASFF.py
 create mode 100644 easycv/models/detection/yolox/attention.py
 create mode 100644 easycv/models/loss/focal_loss.py

diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index d1164698..5c145e50 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -6,7 +6,11 @@
     num_classes=80,
     model_type='s',  # s m l x tiny nano
     test_conf=0.01,
-    nms_thre=0.65)
+    nms_thre=0.65,
+    use_att=None,
+    obj_loss_type='focal',
+    reg_loss_type= 'ciou'
+)
 
 # s m l x
 img_scale = (640, 640)
@@ -35,7 +39,8 @@
 ]
 
 # dataset settings
-data_root = 'data/coco/'
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/data/coco'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/easycv/models/detection/yolox/ASFF.py b/easycv/models/detection/yolox/ASFF.py
new file mode 100644
index 00000000..9cbc8e6a
--- /dev/null
+++ b/easycv/models/detection/yolox/ASFF.py
@@ -0,0 +1,128 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from easycv.models.backbones.network_blocks import SiLU
+
+def autopad(k, p=None):  # kernel, padding
+    # Pad to 'same'
+    if p is None:
+        p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad
+    return p
+
+
+class Conv(nn.Module):
+    # Standard convolution
+    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in, ch_out, kernel, stride, padding, groups
+        super(Conv, self).__init__()
+        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
+        self.bn = nn.BatchNorm2d(c2)
+        self.act = SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
+
+    def forward(self, x):
+        return self.act(self.bn(self.conv(x)))
+
+    def forward_fuse(self, x):
+        return self.act(self.conv(x))
+
+
+class ASFF(nn.Module):
+    def __init__(self, level, multiplier=1, rfb=False, vis=False, act_cfg=True):
+        """
+        multiplier should be 1, 0.5
+        which means, the channel of ASFF can be
+        512, 256, 128 -> multiplier=0.5
+        1024, 512, 256 -> multiplier=1
+        For even smaller, you need change code manually.
+        """
+        super(ASFF, self).__init__()
+        self.level = level
+        self.dim = [int(1024 * multiplier), int(512 * multiplier),
+                    int(256 * multiplier)]
+        # print(self.dim)
+
+        self.inter_dim = self.dim[self.level]
+        if level == 0:
+            self.stride_level_1 = Conv(int(512 * multiplier), self.inter_dim, 3, 2)
+
+            self.stride_level_2 = Conv(int(256 * multiplier), self.inter_dim, 3, 2)
+
+            self.expand = Conv(self.inter_dim, int(
+                1024 * multiplier), 3, 1)
+        elif level == 1:
+            self.compress_level_0 = Conv(
+                int(1024 * multiplier), self.inter_dim, 1, 1)
+            self.stride_level_2 = Conv(
+                int(256 * multiplier), self.inter_dim, 3, 2)
+            self.expand = Conv(self.inter_dim, int(512 * multiplier), 3, 1)
+        elif level == 2:
+            self.compress_level_0 = Conv(
+                int(1024 * multiplier), self.inter_dim, 1, 1)
+            self.compress_level_1 = Conv(
+                int(512 * multiplier), self.inter_dim, 1, 1)
+            self.expand = Conv(self.inter_dim, int(
+                256 * multiplier), 3, 1)
+
+        # when adding rfb, we use half number of channels to save memory
+        compress_c = 8 if rfb else 16
+        self.weight_level_0 = Conv(
+            self.inter_dim, compress_c, 1, 1)
+        self.weight_level_1 = Conv(
+            self.inter_dim, compress_c, 1, 1)
+        self.weight_level_2 = Conv(
+            self.inter_dim, compress_c, 1, 1)
+
+        self.weight_levels = Conv(
+            compress_c * 3, 3, 1, 1)
+        self.vis = vis
+
+    def forward(self, x):  # l,m,s
+        """
+        #
+        256, 512, 1024
+        from small -> large
+        """
+        x_level_0 = x[2]  # 最大特征层
+        x_level_1 = x[1]  # 中间特征层
+        x_level_2 = x[0]  # 最小特征层
+
+        if self.level == 0:
+            level_0_resized = x_level_0
+            level_1_resized = self.stride_level_1(x_level_1)
+            level_2_downsampled_inter = F.max_pool2d(
+                x_level_2, 3, stride=2, padding=1)
+            level_2_resized = self.stride_level_2(level_2_downsampled_inter)
+        elif self.level == 1:
+            level_0_compressed = self.compress_level_0(x_level_0)
+            level_0_resized = F.interpolate(
+                level_0_compressed, scale_factor=2, mode='nearest')
+            level_1_resized = x_level_1
+            level_2_resized = self.stride_level_2(x_level_2)
+        elif self.level == 2:
+            level_0_compressed = self.compress_level_0(x_level_0)
+            level_0_resized = F.interpolate(
+                level_0_compressed, scale_factor=4, mode='nearest')
+            x_level_1_compressed = self.compress_level_1(x_level_1)
+            level_1_resized = F.interpolate(
+                x_level_1_compressed, scale_factor=2, mode='nearest')
+            level_2_resized = x_level_2
+
+        level_0_weight_v = self.weight_level_0(level_0_resized)
+        level_1_weight_v = self.weight_level_1(level_1_resized)
+        level_2_weight_v = self.weight_level_2(level_2_resized)
+
+        levels_weight_v = torch.cat(
+            (level_0_weight_v, level_1_weight_v, level_2_weight_v), 1)
+        levels_weight = self.weight_levels(levels_weight_v)
+        levels_weight = F.softmax(levels_weight, dim=1)
+
+        fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] + \
+                            level_1_resized * levels_weight[:, 1:2, :, :] + \
+                            level_2_resized * levels_weight[:, 2:, :, :]
+
+        out = self.expand(fused_out_reduced)
+
+        if self.vis:
+            return out, levels_weight, fused_out_reduced.sum(dim=1)
+        else:
+            return out
+
diff --git a/easycv/models/detection/yolox/attention.py b/easycv/models/detection/yolox/attention.py
new file mode 100644
index 00000000..ad6889d0
--- /dev/null
+++ b/easycv/models/detection/yolox/attention.py
@@ -0,0 +1,89 @@
+import torch
+import torch.nn as nn
+import math
+
+
+# SE
+class SE(nn.Module):
+    def __init__(self, channel, ratio=16):
+        super(SE, self).__init__()
+        self.avg_pool = nn.AdaptiveAvgPool2d(1)
+        self.fc = nn.Sequential(
+            nn.Linear(channel, channel // ratio, bias=False),
+            nn.ReLU(inplace=True),
+            nn.Linear(channel // ratio, channel, bias=False),
+            nn.Sigmoid()
+        )
+
+    def forward(self, x):
+        b, c, _, _ = x.size()
+        y = self.avg_pool(x).view(b, c)
+        y = self.fc(y).view(b, c, 1, 1)
+        return x * y
+
+
+class ChannelAttention(nn.Module):
+    def __init__(self, in_planes, ratio=8):
+        super(ChannelAttention, self).__init__()
+        self.avg_pool = nn.AdaptiveAvgPool2d(1)
+        self.max_pool = nn.AdaptiveMaxPool2d(1)
+
+        self.fc1 = nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False)
+        self.relu1 = nn.ReLU()
+        self.fc2 = nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False)
+
+        self.sigmoid = nn.Sigmoid()
+
+    def forward(self, x):
+        avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
+        max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
+        out = avg_out + max_out
+        return self.sigmoid(out)
+
+
+class SpatialAttention(nn.Module):
+    def __init__(self, kernel_size=7):
+        super(SpatialAttention, self).__init__()
+
+        assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
+        padding = 3 if kernel_size == 7 else 1
+        self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
+        self.sigmoid = nn.Sigmoid()
+
+    def forward(self, x):
+        avg_out = torch.mean(x, dim=1, keepdim=True)
+        max_out, _ = torch.max(x, dim=1, keepdim=True)
+        x = torch.cat([avg_out, max_out], dim=1)
+        x = self.conv1(x)
+        return self.sigmoid(x)
+
+
+# CBAM
+class CBAM(nn.Module):
+    def __init__(self, channel, ratio=8, kernel_size=7):
+        super(CBAM, self).__init__()
+        self.channelattention = ChannelAttention(channel, ratio=ratio)
+        self.spatialattention = SpatialAttention(kernel_size=kernel_size)
+
+    def forward(self, x):
+        x = x * self.channelattention(x)
+        x = x * self.spatialattention(x)
+        return x
+
+
+class ECA(nn.Module):
+    def __init__(self, channel, b=1, gamma=2):
+        super(ECA, self).__init__()
+        kernel_size = int(abs((math.log(channel, 2) + b) / gamma))
+        kernel_size = kernel_size if kernel_size % 2 else kernel_size + 1
+
+        self.avg_pool = nn.AdaptiveAvgPool2d(1)
+        self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, bias=False)
+        self.sigmoid = nn.Sigmoid()
+
+    def forward(self, x):
+        y = self.avg_pool(x)
+        y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
+        y = self.sigmoid(y)
+        return x * y.expand_as(x)
+
diff --git a/easycv/models/detection/yolox/yolo_head.py b/easycv/models/detection/yolox/yolo_head.py
index b5dcfde1..8e0bccb7 100644
--- a/easycv/models/detection/yolox/yolo_head.py
+++ b/easycv/models/detection/yolox/yolo_head.py
@@ -10,6 +10,7 @@
 from easycv.models.backbones.network_blocks import BaseConv, DWConv
 from easycv.models.detection.utils import bboxes_iou
 from easycv.models.loss import IOUloss
+from easycv.models.loss import FocalLoss,VarifocalLoss
 
 
 class YOLOXHead(nn.Module):
@@ -21,7 +22,9 @@ def __init__(self,
                  in_channels=[256, 512, 1024],
                  act='silu',
                  depthwise=False,
-                 stage='CLOUD'):
+                 stage='CLOUD',
+                 obj_loss_type='l1',
+                 reg_loss_type='l1'):
         """
         Args:
             num_classes (int): detection class numbers.
@@ -31,6 +34,8 @@ def __init__(self,
             act (str): activation type of conv. Defalut value: "silu".
             depthwise (bool): whether apply depthwise conv in conv branch. Default value: False.
             stage (str): model stage, distinguish edge head to cloud head. Default value: CLOUD.
+            obj_loss_type (str): the loss function of the obj conf. Default value: l1.
+            reg_loss_type (str): the loss function of the box prediction. Default value: l1.
         """
         super().__init__()
 
@@ -115,10 +120,26 @@ def __init__(self,
                     padding=0,
                 ))
 
-        self.use_l1 = False
-        self.l1_loss = nn.L1Loss(reduction='none')
+
         self.bcewithlog_loss = nn.BCEWithLogitsLoss(reduction='none')
-        self.iou_loss = IOUloss(reduction='none')
+
+        if reg_loss_type=='l1':
+            self.use_l1 = True
+            self.l1_loss = nn.L1Loss(reduction='none')
+        else:
+            self.use_l1 = False
+
+        self.iou_loss = IOUloss(reduction='none',loss_type=reg_loss_type)
+
+        if obj_loss_type=='BCE':
+            self.obj_loss = nn.BCEWithLogitsLoss(reduction='none')
+        elif obj_loss_type=='focal':
+            self.obj_loss = FocalLoss(reduction='none')
+        elif obj_loss_type=='v_focal':
+            self.obj_loss = VarifocalLoss(reduction='none')
+        else:
+            assert "Undefined loss type: {}".format(obj_loss_type)
+
         self.strides = strides
         self.grids = [torch.zeros(1)] * len(in_channels)
 
@@ -355,6 +376,7 @@ def get_losses(
                     self.num_classes) * pred_ious_this_matching.unsqueeze(-1)
                 obj_target = fg_mask.unsqueeze(-1)
                 reg_target = gt_bboxes_per_image[matched_gt_inds]
+
                 if self.use_l1:
                     l1_target = self.get_l1_target(
                         outputs.new_zeros((num_fg_img, 4)),
@@ -375,17 +397,20 @@ def get_losses(
         reg_targets = torch.cat(reg_targets, 0)
         obj_targets = torch.cat(obj_targets, 0)
         fg_masks = torch.cat(fg_masks, 0)
+
         if self.use_l1:
             l1_targets = torch.cat(l1_targets, 0)
 
         num_fg = max(num_fg, 1)
+
         loss_iou = (self.iou_loss(
             bbox_preds.view(-1, 4)[fg_masks], reg_targets)).sum() / num_fg
-        loss_obj = (self.bcewithlog_loss(obj_preds.view(-1, 1),
+        loss_obj = (self.obj_loss(obj_preds.view(-1, 1),
                                          obj_targets)).sum() / num_fg
         loss_cls = (self.bcewithlog_loss(
             cls_preds.view(-1, self.num_classes)[fg_masks],
             cls_targets)).sum() / num_fg
+
         if self.use_l1:
             loss_l1 = (self.l1_loss(
                 origin_preds.view(-1, 4)[fg_masks], l1_targets)).sum() / num_fg
diff --git a/easycv/models/detection/yolox/yolo_pafpn.py b/easycv/models/detection/yolox/yolo_pafpn.py
index 57364eb5..ab75123f 100644
--- a/easycv/models/detection/yolox/yolo_pafpn.py
+++ b/easycv/models/detection/yolox/yolo_pafpn.py
@@ -5,7 +5,8 @@
 
 from easycv.models.backbones.darknet import CSPDarknet
 from easycv.models.backbones.network_blocks import BaseConv, CSPLayer, DWConv
-
+from .attention import SE, CBAM, ECA
+from .ASFF import ASFF
 
 class YOLOPAFPN(nn.Module):
     """
@@ -20,6 +21,7 @@ def __init__(
         in_channels=[256, 512, 1024],
         depthwise=False,
         act='silu',
+        use_att=None
     ):
         super().__init__()
         self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act)
@@ -86,6 +88,28 @@ def __init__(
             depthwise=depthwise,
             act=act)
 
+        self.use_att=use_att
+
+        if self.use_att!=None and self.use_att!='ASFF':
+            # add attention layer
+            if self.use_att=="CBAM":
+                ATT = CBAM
+            elif self.use_att=="SE":
+                ATT = SE
+            elif self.use_att=="ECA":
+                ATT = ECA
+            else:
+                assert "Unknown Attention Layer!"
+
+            self.att_1 = ATT(int(in_channels[2] * width))  # 对应dark5输出的1024维度通道
+            self.att_2 = ATT(int(in_channels[1] * width))  # 对应dark4输出的512维度通道
+            self.att_3 = ATT(int(in_channels[0] * width))  # 对应dark3输出的256维度通道
+
+        if self.use_att=='ASFF':
+            self.asff_1 = ASFF(level=0, multiplier=width)
+            self.asff_2 = ASFF(level=1, multiplier=width)
+            self.asff_3 = ASFF(level=2, multiplier=width)
+
     def forward(self, input):
         """
         Args:
@@ -100,6 +124,12 @@ def forward(self, input):
         features = [out_features[f] for f in self.in_features]
         [x2, x1, x0] = features
 
+        # add attention
+        if self.use_att!=None and self.use_att!='ASFF':
+            x0 = self.att_1(x0)
+            x1 = self.att_2(x1)
+            x2 = self.att_3(x2)
+
         fpn_out0 = self.lateral_conv0(x0)  # 1024->512/32
         f_out0 = self.upsample(fpn_out0)  # 512/16
         f_out0 = torch.cat([f_out0, x1], 1)  # 512->1024/16
@@ -119,4 +149,11 @@ def forward(self, input):
         pan_out0 = self.C3_n4(p_out0)  # 1024->1024/32
 
         outputs = (pan_out2, pan_out1, pan_out0)
+
+        if self.use_att=='ASFF':
+            pan_out0 = self.asff_1(outputs)
+            pan_out1 = self.asff_2(outputs)
+            pan_out2 = self.asff_3(outputs)
+            outputs = (pan_out2, pan_out1, pan_out0)
+
         return outputs
diff --git a/easycv/models/detection/yolox/yolox.py b/easycv/models/detection/yolox/yolox.py
index c637e6d1..e0c1aa31 100644
--- a/easycv/models/detection/yolox/yolox.py
+++ b/easycv/models/detection/yolox/yolox.py
@@ -44,6 +44,9 @@ def __init__(self,
                  test_size: tuple = (640, 640),
                  test_conf: float = 0.01,
                  nms_thre: float = 0.65,
+                 use_att: str = None,
+                 obj_loss_type: str = 'l1',
+                 reg_loss_type: str = 'l1',
                  pretrained: str = None):
         super(YOLOX, self).__init__()
         assert model_type in self.param_map, f'invalid model_type for yolox {model_type}, valid ones are {list(self.param_map.keys())}'
@@ -52,8 +55,8 @@ def __init__(self,
         depth = self.param_map[model_type][0]
         width = self.param_map[model_type][1]
 
-        self.backbone = YOLOPAFPN(depth, width, in_channels=in_channels)
-        self.head = YOLOXHead(num_classes, width, in_channels=in_channels)
+        self.backbone = YOLOPAFPN(depth, width, in_channels=in_channels, use_att=use_att)
+        self.head = YOLOXHead(num_classes, width, in_channels=in_channels, obj_loss_type=obj_loss_type, reg_loss_type=reg_loss_type)
 
         self.apply(init_yolo)  # init_yolo(self)
         self.head.initialize_biases(1e-2)
diff --git a/easycv/models/loss/__init__.py b/easycv/models/loss/__init__.py
index 9685f54b..713eec07 100644
--- a/easycv/models/loss/__init__.py
+++ b/easycv/models/loss/__init__.py
@@ -2,3 +2,4 @@
 from .iou_loss import IOUloss
 from .mse_loss import JointsMSELoss
 from .pytorch_metric_learning import *
+from .focal_loss import FocalLoss,VarifocalLoss
\ No newline at end of file
diff --git a/easycv/models/loss/focal_loss.py b/easycv/models/loss/focal_loss.py
new file mode 100644
index 00000000..bbbf9268
--- /dev/null
+++ b/easycv/models/loss/focal_loss.py
@@ -0,0 +1,188 @@
+import torch.nn as nn
+import torch.nn.functional as F
+import torch
+from ..registry import LOSSES
+
+
+def reduce_loss(loss, reduction):
+    """Reduce loss as specified.
+    Args:
+        loss (Tensor): Elementwise loss tensor.
+        reduction (str): Options are "none", "mean" and "sum".
+    Return:
+        Tensor: Reduced loss tensor.
+    """
+    reduction_enum = F._Reduction.get_enum(reduction)
+    # none: 0, elementwise_mean:1, sum: 2
+    if reduction_enum == 0:
+        return loss
+    elif reduction_enum == 1:
+        return loss.mean()
+    elif reduction_enum == 2:
+        return loss.sum()
+
+
+def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
+    """Apply element-wise weight and reduce loss.
+    Args:
+        loss (Tensor): Element-wise loss.
+        weight (Tensor): Element-wise weights.
+        reduction (str): Same as built-in losses of PyTorch.
+        avg_factor (float): Avarage factor when computing the mean of losses.
+    Returns:
+        Tensor: Processed loss values.
+    """
+    # if weight is specified, apply element-wise weight
+    if weight is not None:
+        loss = loss * weight
+
+    # if avg_factor is not specified, just reduce the loss
+    if avg_factor is None:
+        loss = reduce_loss(loss, reduction)
+    else:
+        # if reduction is mean, then average the loss by avg_factor
+        if reduction == 'mean':
+            loss = loss.sum() / avg_factor
+        # if reduction is 'none', then do nothing, otherwise raise an error
+        elif reduction != 'none':
+            raise ValueError('avg_factor can not be used with reduction="sum"')
+    return loss
+
+
+def varifocal_loss(pred,
+                   target,
+                   weight=None,
+                   alpha=0.75,
+                   gamma=2.0,
+                   iou_weighted=True,
+                   reduction='mean',
+                   avg_factor=None):
+    """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
+    Args:
+        pred (torch.Tensor): The prediction with shape (N, C), C is the
+            number of classes
+        target (torch.Tensor): The learning target of the iou-aware
+            classification score with shape (N, C), C is the number of classes.
+        weight (torch.Tensor, optional): The weight of loss for each
+            prediction. Defaults to None.
+        alpha (float, optional): A balance factor for the negative part of
+            Varifocal Loss, which is different from the alpha of Focal Loss.
+            Defaults to 0.75.
+        gamma (float, optional): The gamma for calculating the modulating
+            factor. Defaults to 2.0.
+        iou_weighted (bool, optional): Whether to weight the loss of the
+            positive example with the iou target. Defaults to True.
+        reduction (str, optional): The method used to reduce the loss into
+            a scalar. Defaults to 'mean'. Options are "none", "mean" and
+            "sum".
+        avg_factor (int, optional): Average factor that is used to average
+            the loss. Defaults to None.
+    """
+    # pred and target should be of the same size
+    assert pred.size() == target.size()
+    pred_sigmoid = pred.sigmoid()
+    target = target.type_as(pred)
+    if iou_weighted:
+        focal_weight = target * (target > 0.0).float() + \
+                       alpha * (pred_sigmoid - target).abs().pow(gamma) * \
+                       (target <= 0.0).float()
+    else:
+        focal_weight = (target > 0.0).float() + \
+                       alpha * (pred_sigmoid - target).abs().pow(gamma) * \
+                       (target <= 0.0).float()
+    loss = F.binary_cross_entropy_with_logits(
+        pred, target, reduction='none') * focal_weight
+    loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
+    return loss
+
+
+@LOSSES.register_module
+class VarifocalLoss(nn.Module):
+
+    def __init__(self,
+                 use_sigmoid=True,
+                 alpha=0.75,
+                 gamma=2.0,
+                 iou_weighted=True,
+                 reduction='mean',
+                 loss_weight=1.0):
+        """`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
+        Args:
+            use_sigmoid (bool, optional): Whether the prediction is
+                used for sigmoid or softmax. Defaults to True.
+            alpha (float, optional): A balance factor for the negative part of
+                Varifocal Loss, which is different from the alpha of Focal
+                Loss. Defaults to 0.75.
+            gamma (float, optional): The gamma for calculating the modulating
+                factor. Defaults to 2.0.
+            iou_weighted (bool, optional): Whether to weight the loss of the
+                positive examples with the iou target. Defaults to True.
+            reduction (str, optional): The method used to reduce the loss into
+                a scalar. Defaults to 'mean'. Options are "none", "mean" and
+                "sum".
+            loss_weight (float, optional): Weight of loss. Defaults to 1.0.
+        """
+        super(VarifocalLoss, self).__init__()
+        assert use_sigmoid is True, \
+            'Only sigmoid varifocal loss supported now.'
+        assert alpha >= 0.0
+        self.use_sigmoid = use_sigmoid
+        self.alpha = alpha
+        self.gamma = gamma
+        self.iou_weighted = iou_weighted
+        self.reduction = reduction
+        self.loss_weight = loss_weight
+
+    def forward(self,
+                pred,
+                target,
+                weight=None,
+                avg_factor=None,
+                reduction_override=None):
+        """Forward function.
+        Args:
+            pred (torch.Tensor): The prediction.
+            target (torch.Tensor): The learning target of the prediction.
+            weight (torch.Tensor, optional): The weight of loss for each
+                prediction. Defaults to None.
+            avg_factor (int, optional): Average factor that is used to average
+                the loss. Defaults to None.
+            reduction_override (str, optional): The reduction method used to
+                override the original reduction method of the loss.
+                Options are "none", "mean" and "sum".
+        Returns:
+            torch.Tensor: The calculated loss
+        """
+        assert reduction_override in (None, 'none', 'mean', 'sum')
+        reduction = (
+            reduction_override if reduction_override else self.reduction)
+        if self.use_sigmoid:
+            loss_cls = self.loss_weight * varifocal_loss(
+                pred,
+                target,
+                weight,
+                alpha=self.alpha,
+                gamma=self.gamma,
+                iou_weighted=self.iou_weighted,
+                reduction=reduction,
+                avg_factor=avg_factor)
+        else:
+            raise NotImplementedError
+        return loss_cls
+
+
+@LOSSES.register_module
+class FocalLoss(nn.Module):
+    def __init__(self, reduction='none', alpha=0.75):
+        super(FocalLoss, self).__init__()
+        self.alpha = alpha
+
+    def forward(self, pred, target):
+        pred = pred.sigmoid()
+        pos_inds = target.eq(1).float()
+        neg_inds = target.eq(0).float()
+        pos_loss = torch.log(pred + 1e-5) * torch.pow(1 - pred, 2) * pos_inds * self.alpha
+        neg_loss = torch.log(1 - pred + 1e-5) * torch.pow(pred, 2) * neg_inds * (1-self.alpha)
+        loss = -(pos_loss + neg_loss)
+        return loss
+
diff --git a/easycv/models/loss/iou_loss.py b/easycv/models/loss/iou_loss.py
index dfcad0b6..3bcf0731 100644
--- a/easycv/models/loss/iou_loss.py
+++ b/easycv/models/loss/iou_loss.py
@@ -4,7 +4,7 @@
 import torch.nn as nn
 
 from ..registry import LOSSES
-
+import math
 
 @LOSSES.register_module
 class IOUloss(nn.Module):
@@ -43,9 +43,74 @@ def forward(self, pred, target):
             giou = iou - (area_c - area_i) / area_c.clamp(1e-16)
             loss = 1 - giou.clamp(min=-1.0, max=1.0)
 
+        elif self.loss_type == "diou":
+            c_tl = torch.min(
+                (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2)  # 包围框的左上点
+            )
+            c_br = torch.max(
+                (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2)  # 包围框的右下点
+            )
+            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(c_br[:, 1] - c_tl[:, 1],
+                                                                           2) + 1e-7  # convex diagonal squared
+
+            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) + torch.pow(pred[:, 1] - target[:, 1],
+                                                                              2))  # center diagonal squared
+
+            diou = iou - (center_dis / convex_dis)
+            loss = 1 - diou.clamp(min=-1.0, max=1.0)
+
+        elif self.loss_type == "ciou":
+            c_tl = torch.min(
+                (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2)
+            )
+            c_br = torch.max(
+                (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2)
+            )
+            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(c_br[:, 1] - c_tl[:, 1],
+                                                                           2) + 1e-7  # convex diagonal squared
+
+            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) + torch.pow(pred[:, 1] - target[:, 1],
+                                                                              2))  # center diagonal squared
+
+            v = (4 / math.pi ** 2) * torch.pow(torch.atan(target[:, 2] / torch.clamp(target[:, 3], min=1e-7)) -
+                                               torch.atan(pred[:, 2] / torch.clamp(pred[:, 3], min=1e-7)), 2)
+
+            with torch.no_grad():
+                alpha = v / ((1 + 1e-7) - iou + v)
+
+            ciou = iou - (center_dis / convex_dis + alpha * v)
+
+            loss = 1 - ciou.clamp(min=-1.0, max=1.0)
+
+        elif self.loss_type == "eiou":
+
+            c_tl = torch.min(
+                (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2)
+            )
+            c_br = torch.max(
+                (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2)
+            )
+            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(c_br[:, 1] - c_tl[:, 1],
+                                                                           2) + 1e-7  # convex diagonal squared
+
+            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) + torch.pow(pred[:, 1] - target[:, 1],
+                                                                              2))  # center diagonal squared
+
+            dis_w = torch.pow(pred[:, 2] - target[:, 2], 2)  # 两个框的w欧式距离
+            dis_h = torch.pow(pred[:, 3] - target[:, 3], 2)  # 两个框的h欧式距离
+
+            C_w = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + 1e-7  # 包围框的w平方
+            C_h = torch.pow(c_br[:, 1] - c_tl[:, 1], 2) + 1e-7  # 包围框的h平方
+
+            eiou = iou - (center_dis / convex_dis) - (dis_w / C_w) - (dis_h / C_h)
+
+            loss = 1 - eiou.clamp(min=-1.0, max=1.0)
+
         if self.reduction == 'mean':
             loss = loss.mean()
         elif self.reduction == 'sum':
             loss = loss.sum()
 
         return loss
+
+
diff --git a/tools/eval.py b/tools/eval.py
index 9f9b0bd3..4eba2e47 100644
--- a/tools/eval.py
+++ b/tools/eval.py
@@ -206,8 +206,8 @@ def main():
                 imgs_per_gpu=imgs_per_gpu,
                 workers_per_gpu=cfg.data.workers_per_gpu,
                 dist=distributed,
-                shuffle=False,
-                oss_config=cfg.get('oss_io_config', None))
+                shuffle=False)
+                # oss_config=cfg.get('oss_io_config', None))
 
         if not distributed:
             outputs = single_gpu_test(

From 4f55bda925678b92180c8a4c81c5c899b2821ce2 Mon Sep 17 00:00:00 2001
From: zxy <zxy@B-X0GHLVDL-0414.local>
Date: Thu, 19 May 2022 15:58:16 +0800
Subject: [PATCH 03/69] add attention layer and various loss functions

---
 .../yolox/yolox_s_8xb16_300e_coco.py          |   6 +-
 .../yolox/yolox_s_8xb16_300e_coco_base.py     | 194 ++++++++++++++++++
 2 files changed, 197 insertions(+), 3 deletions(-)
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_base.py

diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index 5c145e50..1dd378bf 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -8,8 +8,8 @@
     test_conf=0.01,
     nms_thre=0.65,
     use_att=None,
-    obj_loss_type='focal',
-    reg_loss_type= 'ciou'
+    obj_loss_type='BSE',
+    reg_loss_type='iou'
 )
 
 # s m l x
@@ -136,7 +136,7 @@
 
 # evaluation
 eval_config = dict(
-    interval=10,
+    interval=1,
     gpu_collect=False,
     visualization_config=dict(
         vis_num=10,
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_base.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_base.py
new file mode 100644
index 00000000..d61dbabe
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_base.py
@@ -0,0 +1,194 @@
+_base_ = '../../base.py'
+
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/tmp/coco/'
+dataset_type = 'CocoDataset'
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+
+
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='MultiImageMixDataset',
+    data_source=dict(
+        type='CocoSource',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='MultiImageMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='CocoSource',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16,
+    workers_per_gpu=4,
+    train=train_dataset,
+    val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+# evaluation
+eval_config = dict(interval=10, gpu_collect=False)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+# basic_lr_per_img = 0.01 / 64.0
+optimizer = dict(
+  type='SGD',
+  # lr=0.01,
+  lr=0.02,
+  momentum=0.9,
+  weight_decay=5e-4,
+  nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHook')
+    ])
+# yapf:enable
+# runtime settings
+dist_params = dict(backend='nccl')
+cudnn_benchmark = True
+log_level = 'INFO'
+# load_from = '/apsarapangu/disk3/peizixiang.pzx/workspace/code/codereviews/ev-torch/work_dirs/modify_ckpts/yoloxs_coco_official_export.pt'
+load_from = None
+resume_from = None
+workflow = [('train', 1)]
+
+export = dict(use_jit=False)
\ No newline at end of file

From 42472ff5ba49dd43fef53770dbb3de5e598c4a35 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Tue, 31 May 2022 16:48:36 +0800
Subject: [PATCH 04/69] add siou loss

---
 .../yolox/yolox_s_8xb16_300e_coco.py          |   5 +-
 .../yolox/yolox_s_8xb16_300e_coco_asff.py     | 188 ++++++++++++++++++
 .../yolox/yolox_s_8xb16_300e_coco_cbam.py     | 188 ++++++++++++++++++
 .../yolox/yolox_s_8xb16_300e_coco_ciou.py     | 188 ++++++++++++++++++
 .../yolox/yolox_s_8xb16_300e_coco_diou.py     | 188 ++++++++++++++++++
 .../yolox/yolox_s_8xb16_300e_coco_eca.py      | 188 ++++++++++++++++++
 .../yolox/yolox_s_8xb16_300e_coco_eiou.py     | 188 ++++++++++++++++++
 .../yolox/yolox_s_8xb16_300e_coco_focal.py    | 188 ++++++++++++++++++
 .../yolox/yolox_s_8xb16_300e_coco_giou.py     | 188 ++++++++++++++++++
 .../yolox/yolox_s_8xb16_300e_coco_se.py       | 188 ++++++++++++++++++
 .../yolox/yolox_s_8xb16_300e_coco_siou.py     | 188 ++++++++++++++++++
 .../yolox/yolox_s_8xb16_300e_coco_siou2.py    | 188 ++++++++++++++++++
 .../yolox/yolox_s_8xb16_300e_coco_vfocal.py   | 188 ++++++++++++++++++
 .../yolox/yolox_s_8xb16_300e_wonorm.py        |   1 +
 docs/source/tutorials/export.md               | 141 +++++++++++++
 easycv/datasets/detection/raw.py              |   3 +
 easycv/models/detection/yolox/yolo_head.py    |  33 ++-
 easycv/models/detection/yolox/yolo_pafpn.py   |   2 +-
 easycv/models/loss/iou_loss.py                |  26 +++
 easycv/utils/test_util.py                     |   2 +
 tools/eval.py                                 |   4 +-
 21 files changed, 2459 insertions(+), 14 deletions(-)
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_asff.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_cbam.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_ciou.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_diou.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_eca.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_eiou.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_focal.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_giou.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_se.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_siou.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_siou2.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_vfocal.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_wonorm.py
 create mode 100644 docs/source/tutorials/export.md

diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index 1dd378bf..e5f2db95 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -8,7 +8,7 @@
     test_conf=0.01,
     nms_thre=0.65,
     use_att=None,
-    obj_loss_type='BSE',
+    obj_loss_type='BCE',
     reg_loss_type='iou'
 )
 
@@ -40,7 +40,8 @@
 
 # dataset settings
 data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/data/coco'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff.py
new file mode 100644
index 00000000..2b77ea71
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff.py
@@ -0,0 +1,188 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    obj_loss_type='BCE',
+    reg_loss_type='iou'
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=1,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_cbam.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_cbam.py
new file mode 100644
index 00000000..d89cc8eb
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_cbam.py
@@ -0,0 +1,188 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='CBAM',
+    obj_loss_type='BCE',
+    reg_loss_type='iou'
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/data/coco'
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=1,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_ciou.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_ciou.py
new file mode 100644
index 00000000..0a3e50a2
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_ciou.py
@@ -0,0 +1,188 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att=None,
+    obj_loss_type='BCE',
+    reg_loss_type='ciou'
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/data/coco'
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=1,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_diou.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_diou.py
new file mode 100644
index 00000000..7c143e28
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_diou.py
@@ -0,0 +1,188 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att=None,
+    obj_loss_type='BCE',
+    reg_loss_type='diou'
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/data/coco'
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=1,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_eca.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_eca.py
new file mode 100644
index 00000000..499287e4
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_eca.py
@@ -0,0 +1,188 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ECA',
+    obj_loss_type='BCE',
+    reg_loss_type='iou'
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/data/coco'
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=1,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_eiou.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_eiou.py
new file mode 100644
index 00000000..77939e15
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_eiou.py
@@ -0,0 +1,188 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att=None,
+    obj_loss_type='BCE',
+    reg_loss_type='eiou'
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/data/coco'
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=1,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_focal.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_focal.py
new file mode 100644
index 00000000..e4651286
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_focal.py
@@ -0,0 +1,188 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att=None,
+    obj_loss_type='focal',
+    reg_loss_type='iou'
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/data/coco'
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=1,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_giou.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_giou.py
new file mode 100644
index 00000000..e2964f11
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_giou.py
@@ -0,0 +1,188 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att=None,
+    obj_loss_type='BCE',
+    reg_loss_type='giou'
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/data/coco'
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=1,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_se.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_se.py
new file mode 100644
index 00000000..0fa960ce
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_se.py
@@ -0,0 +1,188 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='SE',
+    obj_loss_type='BCE',
+    reg_loss_type='iou'
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/data/coco'
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=1,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou.py
new file mode 100644
index 00000000..27021d31
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou.py
@@ -0,0 +1,188 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att=None,
+    obj_loss_type='BCE',
+    reg_loss_type='siou'
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=1,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou2.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou2.py
new file mode 100644
index 00000000..ea81125d
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou2.py
@@ -0,0 +1,188 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att=None,
+    obj_loss_type='BCE',
+    reg_loss_type='siou2'
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=1,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_vfocal.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_vfocal.py
new file mode 100644
index 00000000..1030a01c
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_vfocal.py
@@ -0,0 +1,188 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att=None,
+    obj_loss_type='v_focal',
+    reg_loss_type='iou'
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/data/coco'
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=1,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_wonorm.py b/configs/detection/yolox/yolox_s_8xb16_300e_wonorm.py
new file mode 100644
index 00000000..4b1728f6
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_wonorm.py
@@ -0,0 +1 @@
+https://quanxi-account.aliyun-inc.com/#/MyAccountManage
\ No newline at end of file
diff --git a/docs/source/tutorials/export.md b/docs/source/tutorials/export.md
new file mode 100644
index 00000000..777bafd3
--- /dev/null
+++ b/docs/source/tutorials/export.md
@@ -0,0 +1,141 @@
+# Export tutorial
+
+We support three kinds of the export model, the original model, the script model, and the blade model. Script (Jit) and Blade are used to accelerate the inference process. We also support the end2end export mode to wrapper the preprocess and postprocess  with the model.
+
+### Export model
+
+```shell
+python tools/export.py \
+		${CONFIG_PATH} \
+		${CHECKPOINT} \
+		${EXPORT_PATH}
+```
+
+<details>
+<summary>Arguments</summary>
+
+
+- `CONFIG_PATH`: the config file path of a detection method
+- `CHECKPOINT`:your checkpoint file of a detection method named as epoch_*.pth.
+- `EXPORT_PATH`: your path to save export model
+
+</details>
+
+**Examples:**
+
+```shell
+python tools/export.py configs/detection/yolox/yolox_s_8xb16_300e_coco.py \
+        work_dirs/detection/yolox/epoch_300.pth \
+        work_dirs/detection/yolox/epoch_300_export.pth
+```
+
+#### Original model
+
+Eport the orginal model by setting the export config as:
+
+```shell
+export = dict(use_jit=False, export_blade=False, end2end=False)
+```
+
+#### Script model
+
+Eport the script model by setting the export config as:
+
+```shell
+export = dict(use_jit=True, export_blade=False, end2end=False)
+```
+
+#### Blade model
+
+Eport the blade model by setting the export config as:
+
+```shell
+export = dict(use_jit=True, export_blade=True, end2end=False)
+```
+
+You can choose not to save the jit model by setting use_jit=False.
+
+The blade environment must be installed successfully to export a blade model.
+
+To install the blade, you can refer to https://help.aliyun.com/document_detail/205134.html.
+
+#### End2end model
+
+Eport the model in the end2end mode by setting ''end2end=True'' in the export config:
+
+```shell
+export = dict(use_jit=True, export_blade=True, end2end=True)
+```
+
+You should define your own preprocess and postprocess as below or the default test pipeline will be used.
+
+```python
+@torch.jit.script
+class PreProcess:
+    """Process the data input to model."""
+		def __init__(self, args):
+				pass
+    def __call__(self, image: torch.Tensor
+        ) -> Output Type:
+
+@torch.jit.script
+class PostProcess:
+    """Process output values of detection models."""
+    def __init__(self, args):
+				pass
+    def __call__(self, args) -> Output Type:
+```
+
+
+
+### Inference with the Exported Model
+
+#### Non-End2end model
+
+```python
+input_data_list = [np.asarray(Image.open(img))]
+
+with io.open(jit_model_path, 'rb') as infile:
+    device = 'cuda' if torch.cuda.is_available() else 'cpu'
+    model = torch.jit.load(infile, device)
+
+    for idx, img in enumerate(input_data_list):
+        if type(img) is not np.ndarray:
+            img = np.asarray(img)
+        img = preprocess(img)
+        output = model(img)
+        output = postprocess(output)
+        print(output)
+```
+
+#### End2end model
+
+
+```python
+input_data_list = [np.asarray(Image.open(img))]
+
+with io.open(jit_model_path, 'rb') as infile:
+    device = 'cuda' if torch.cuda.is_available() else 'cpu'
+    model = torch.jit.load(infile, device)
+
+    for idx, img in enumerate(input_data_list):
+        if type(img) is not np.ndarray:
+            img = np.asarray(img)
+        img = torch.from_numpy(img).to(device)
+        output = model(img)
+        print(output)
+```
+
+
+
+### Inference Time Comparisons
+
+Use the YOLOX-S model as an example, the inference process can be greatly accelerated by using the script and blade model.
+
+|  Model  |       Mode       |  FPS   |
+| :-----: | :--------------: | :----: |
+| YOLOX-S |     Original     | 54.02  |
+| YOLOX-S |      Script      | 89.33  |
+| YOLOX-S |      Blade       | 174.38 |
+| YOLOX-S | Script (End2End) | 86.62  |
+| YOLOX-S | Blade (End2End)  | 160.86 |
diff --git a/easycv/datasets/detection/raw.py b/easycv/datasets/detection/raw.py
index 03b7b2f8..d36f63c7 100644
--- a/easycv/datasets/detection/raw.py
+++ b/easycv/datasets/detection/raw.py
@@ -101,6 +101,9 @@ def visualize(self, results, vis_num=10, score_thr=0.3, **kwargs):
                     dict of image meta info, containing filename, img_shape,
                     origin_img_shape, scale_factor and so on.
         """
+        import copy
+        results = copy.deepcopy(results)
+
         class_names = None
         if hasattr(self.data_source, 'CLASSES'):
             class_names = self.data_source.CLASSES
diff --git a/easycv/models/detection/yolox/yolo_head.py b/easycv/models/detection/yolox/yolo_head.py
index 8e0bccb7..f71a5147 100644
--- a/easycv/models/detection/yolox/yolo_head.py
+++ b/easycv/models/detection/yolox/yolo_head.py
@@ -123,19 +123,19 @@ def __init__(self,
 
         self.bcewithlog_loss = nn.BCEWithLogitsLoss(reduction='none')
 
-        if reg_loss_type=='l1':
-            self.use_l1 = True
-            self.l1_loss = nn.L1Loss(reduction='none')
-        else:
-            self.use_l1 = False
+        # if reg_loss_type=='l1':
+        self.use_l1 = False
+        self.l1_loss = nn.L1Loss(reduction='none')
+        # else:
+        #     self.use_l1 = False
 
         self.iou_loss = IOUloss(reduction='none',loss_type=reg_loss_type)
 
-        if obj_loss_type=='BCE':
+        if obj_loss_type == 'BCE':
             self.obj_loss = nn.BCEWithLogitsLoss(reduction='none')
-        elif obj_loss_type=='focal':
+        elif obj_loss_type == 'focal':
             self.obj_loss = FocalLoss(reduction='none')
-        elif obj_loss_type=='v_focal':
+        elif obj_loss_type == 'v_focal':
             self.obj_loss = VarifocalLoss(reduction='none')
         else:
             assert "Undefined loss type: {}".format(obj_loss_type)
@@ -209,6 +209,7 @@ def forward(self, xin, labels=None, imgs=None):
             outputs.append(output)
 
         if self.training:
+
             return self.get_losses(
                 imgs,
                 x_shifts,
@@ -219,6 +220,7 @@ def forward(self, xin, labels=None, imgs=None):
                 origin_preds,
                 dtype=xin[0].dtype,
             )
+
         else:
             self.hw = [x.shape[-2:] for x in outputs]
             # [batch, n_anchors_all, 85]
@@ -303,6 +305,7 @@ def get_losses(
 
         for batch_idx in range(outputs.shape[0]):
             num_gt = int(nlabel[batch_idx])
+
             num_gts += num_gt
             if num_gt == 0:
                 cls_target = outputs.new_zeros((0, self.num_classes))
@@ -338,6 +341,7 @@ def get_losses(
                         labels,
                         imgs,
                     )
+
                 except RuntimeError:
                     logging.error(
                         'OOM RuntimeError is raised due to the huge memory cost during label assignment. \
@@ -420,6 +424,8 @@ def get_losses(
         reg_weight = 5.0
         loss = reg_weight * loss_iou + loss_obj + loss_cls + loss_l1
 
+        print(loss_iou)
+
         return (
             loss,
             reg_weight * loss_iou,
@@ -482,6 +488,7 @@ def get_assignments(
         # reference to: https://github.com/Megvii-BaseDetection/YOLOX/pull/811
         # NOTE: Fix `selected index k out of range`
         npa: int = fg_mask.sum().item()  # number of positive anchors
+
         if npa == 0:
             gt_matched_classes = torch.zeros(0, device=fg_mask.device).long()
             pred_ious_this_matching = torch.rand(0, device=fg_mask.device)
@@ -515,6 +522,11 @@ def get_assignments(
         pair_wise_ious = bboxes_iou(gt_bboxes_per_image,
                                     bboxes_preds_per_image, False)
 
+
+        if (torch.isnan(pair_wise_ious.max())):
+            pair_wise_ious = bboxes_iou(gt_bboxes_per_image,
+                                        bboxes_preds_per_image, False)
+
         gt_cls_per_image = (
             F.one_hot(gt_classes.to(torch.int64),
                       self.num_classes).float().unsqueeze(1).repeat(
@@ -556,6 +568,7 @@ def get_assignments(
             matched_gt_inds,
         ) = self.dynamic_k_matching(cost, pair_wise_ious, gt_classes, num_gt,
                                     fg_mask)
+
         del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss
 
         if mode == 'cpu':
@@ -656,15 +669,18 @@ def get_in_boxes_info(
 
     def dynamic_k_matching(self, cost, pair_wise_ious, gt_classes, num_gt,
                            fg_mask):
+
         # Dynamic K
         # ---------------------------------------------------------------
         matching_matrix = torch.zeros_like(cost, dtype=torch.uint8)
 
         ious_in_boxes_matrix = pair_wise_ious
         n_candidate_k = min(10, ious_in_boxes_matrix.size(1))
+
         topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=1)
         dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)
         dynamic_ks = dynamic_ks.tolist()
+
         for gt_idx in range(num_gt):
             _, pos_idx = torch.topk(
                 cost[gt_idx], k=dynamic_ks[gt_idx], largest=False)
@@ -687,4 +703,5 @@ def dynamic_k_matching(self, cost, pair_wise_ious, gt_classes, num_gt,
 
         pred_ious_this_matching = (matching_matrix *
                                    pair_wise_ious).sum(0)[fg_mask_inboxes]
+
         return num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds
diff --git a/easycv/models/detection/yolox/yolo_pafpn.py b/easycv/models/detection/yolox/yolo_pafpn.py
index ab75123f..0d48850a 100644
--- a/easycv/models/detection/yolox/yolo_pafpn.py
+++ b/easycv/models/detection/yolox/yolo_pafpn.py
@@ -150,7 +150,7 @@ def forward(self, input):
 
         outputs = (pan_out2, pan_out1, pan_out0)
 
-        if self.use_att=='ASFF':
+        if self.use_att == 'ASFF':
             pan_out0 = self.asff_1(outputs)
             pan_out1 = self.asff_2(outputs)
             pan_out2 = self.asff_3(outputs)
diff --git a/easycv/models/loss/iou_loss.py b/easycv/models/loss/iou_loss.py
index 3bcf0731..47397457 100644
--- a/easycv/models/loss/iou_loss.py
+++ b/easycv/models/loss/iou_loss.py
@@ -34,6 +34,32 @@ def forward(self, pred, target):
 
         if self.loss_type == 'iou':
             loss = 1 - iou**2
+
+        elif self.loss_type == "siou":
+            # angle cost
+            c_h = torch.max(pred[:, 1], target[:, 1]) - torch.min(pred[:, 1], target[:, 1])
+            c_w = torch.max(pred[:, 0], target[:, 0]) - torch.min(pred[:, 0], target[:, 0])
+            sigma = torch.sqrt(((pred[:, :2] - target[:, :2]) ** 2).sum(dim=1))
+            # angle_cost = 1 - 2 * torch.pow(torch.sin(torch.arctan(c_h / c_w) - torch.tensor(math.pi / 4)),2)
+            angle_cost = 2*(c_h*c_w)/(sigma**2)
+
+            # distance cost
+            gamma = 2 - angle_cost
+            # gamma = 1
+            c_dw = torch.max(pred[:, 0], target[:, 0]) - torch.min(pred[:, 0], target[:, 0]) + (pred[:, 2] + target[:, 2])/2
+            c_dh = torch.max(pred[:, 1], target[:, 1]) - torch.min(pred[:, 1], target[:, 1]) + (pred[:, 3] + target[:, 3])/2
+            p_x = ((target[:, 0] - pred[:, 0]) / c_dw) ** 2
+            p_y = ((target[:, 1] - pred[:, 1]) / c_dh) ** 2
+            dist_cost = 2 - torch.exp(-gamma * p_x) - torch.exp(-gamma * p_y)
+
+            # shape cost
+            theta = 4
+            w_w = torch.abs(pred[:, 2] - target[:, 2]) / torch.max(pred[:, 2], target[:, 2])
+            w_h = torch.abs(pred[:, 3] - target[:, 3]) / torch.max(pred[:, 3], target[:, 3])
+            shape_cost = torch.pow((1 - torch.exp(-w_w)), theta) + torch.pow((1 - torch.exp(-w_h)), theta)
+
+            loss = 1 - iou + (dist_cost + shape_cost) / 2
+
         elif self.loss_type == 'giou':
             c_tl = torch.min((pred[:, :2] - pred[:, 2:] / 2),
                              (target[:, :2] - target[:, 2:] / 2))
diff --git a/easycv/utils/test_util.py b/easycv/utils/test_util.py
index a04b8a86..ed92732d 100644
--- a/easycv/utils/test_util.py
+++ b/easycv/utils/test_util.py
@@ -11,6 +11,7 @@
 import numpy as np
 import torch
 
+
 TEST_DIR = '/tmp/ev_pytorch_test'
 
 
@@ -156,3 +157,4 @@ def pseudo_dist_init():
     torch.cuda.set_device(0)
     from torch import distributed as dist
     dist.init_process_group(backend='nccl')
+
diff --git a/tools/eval.py b/tools/eval.py
index 7b194179..62a4cb08 100644
--- a/tools/eval.py
+++ b/tools/eval.py
@@ -211,10 +211,8 @@ def main():
                 workers_per_gpu=cfg.data.workers_per_gpu,
                 dist=distributed,
                 shuffle=False)
-<<<<<<< HEAD
                 # oss_config=cfg.get('oss_io_config', None))
-=======
->>>>>>> upstream/master
+
 
         if not distributed:
             outputs = single_gpu_test(

From a1e5bc146d45a756614d18288ee70c91ae9023cc Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Mon, 13 Jun 2022 13:35:58 +0800
Subject: [PATCH 05/69] add tah,various attention layers, and different loss
 functions

---
 .../yolox/yolox_s_8xb16_300e_coco.py          |   3 +-
 .../yolox/yolox_s_8xb16_300e_coco_asff.py     |   4 +-
 .../yolox/yolox_s_8xb16_300e_coco_focal.py    |   4 +-
 .../yolox/yolox_s_8xb16_300e_coco_sppf.py     | 189 ++++
 .../yolox/yolox_s_8xb16_300e_coco_vfocal.py   |   4 +-
 .../detection/yolox/yolox_s_8xb16_300e_tal.py | 189 ++++
 .../yolox/yolox_s_8xb16_300e_wonorm.py        | 187 +++-
 easycv/models/backbones/darknet.py            |  82 +-
 easycv/models/backbones/network_blocks.py     |  32 +
 easycv/models/detection/utils/utils.py        |  31 +
 easycv/models/detection/yolox/tood_head.py    | 835 ++++++++++++++++++
 easycv/models/detection/yolox/yolo_head.py    |  23 +-
 easycv/models/detection/yolox/yolo_pafpn.py   |   5 +-
 easycv/models/detection/yolox/yolox.py        |  13 +-
 easycv/predictors/detector.py                 | 203 +++--
 easycv/utils/checkpoint.py                    |   1 +
 show_predict.py                               |  88 ++
 tools/eval.py                                 |   9 +-
 18 files changed, 1812 insertions(+), 90 deletions(-)
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_sppf.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_tal.py
 create mode 100644 easycv/models/detection/yolox/tood_head.py
 create mode 100644 show_predict.py

diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index e5f2db95..6aa8789a 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -105,7 +105,8 @@
             dict(type='LoadAnnotations', with_bbox=True)
         ],
         classes=CLASSES,
-        filter_empty_gt=False,
+        filter_empty_gt=True,
+        test_mode = True,
         iscrowd=True),
     pipeline=test_pipeline,
     dynamic_scale=None,
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff.py
index 2b77ea71..afff65fa 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff.py
@@ -39,8 +39,8 @@
 ]
 
 # dataset settings
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
 
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_focal.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_focal.py
index e4651286..820ee1ad 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_focal.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_focal.py
@@ -39,8 +39,8 @@
 ]
 
 # dataset settings
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
 # data_root = '/data/coco'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_sppf.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_sppf.py
new file mode 100644
index 00000000..40aee4fa
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_sppf.py
@@ -0,0 +1,189 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att=None,
+    obj_loss_type='BCE',
+    reg_loss_type='iou',
+    spp_type='sppf'
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=1,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_vfocal.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_vfocal.py
index 1030a01c..d0909cdb 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_vfocal.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_vfocal.py
@@ -39,8 +39,8 @@
 ]
 
 # dataset settings
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
 # data_root = '/data/coco'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_tal.py b/configs/detection/yolox/yolox_s_8xb16_300e_tal.py
new file mode 100644
index 00000000..ec01654a
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_tal.py
@@ -0,0 +1,189 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att=None,
+    obj_loss_type='BCE',
+    reg_loss_type='iou',
+    head_type ='tood' # yolox
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=1,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_wonorm.py b/configs/detection/yolox/yolox_s_8xb16_300e_wonorm.py
index 4b1728f6..782e5a41 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_wonorm.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_wonorm.py
@@ -1 +1,186 @@
-https://quanxi-account.aliyun-inc.com/#/MyAccountManage
\ No newline at end of file
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    # dict(type='MMPhotoMetricDistortion',
+    #      brightness_delta=32,
+    #      contrast_range=(0.5, 1.5),
+    #      saturation_range=(0.5, 1.5),
+    #      hue_delta=18),  # only support float32
+    dict(type='MMYOLOXHSVRandomAug'),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    # dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle', img_to_float=True),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    # dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle', img_to_float=True),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=False),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=32,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[dict(type='LoadImageFromFile', to_float32=False)],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16,
+    workers_per_gpu=4,
+    persistent_workers=True,
+    train=train_dataset,
+    val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=True,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        dist_eval=True,
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        # dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False)
+mp_start_method = 'fork'
\ No newline at end of file
diff --git a/easycv/models/backbones/darknet.py b/easycv/models/backbones/darknet.py
index 0966fb0d..ebd9025a 100644
--- a/easycv/models/backbones/darknet.py
+++ b/easycv/models/backbones/darknet.py
@@ -1,9 +1,9 @@
 # Copyright (c) Alibaba, Inc. and its affiliates.
 
 from torch import nn
-
+import torch
 from .network_blocks import (BaseConv, CSPLayer, DWConv, Focus, ResLayer,
-                             SPPBottleneck)
+                             SPPBottleneck, SPPFBottleneck)
 
 
 class Darknet(nn.Module):
@@ -16,6 +16,7 @@ def __init__(
             in_channels=3,
             stem_out_channels=32,
             out_features=('dark3', 'dark4', 'dark5'),
+            spp_type = 'spp'
     ):
         """
         Args:
@@ -49,11 +50,18 @@ def __init__(
             *self.make_group_layer(in_channels, num_blocks[2], stride=2))
         in_channels *= 2  # 512
 
-        self.dark5 = nn.Sequential(
-            *self.make_group_layer(in_channels, num_blocks[3], stride=2),
-            *self.make_spp_block([in_channels, in_channels * 2],
-                                 in_channels * 2),
-        )
+        if spp_type=='spp':
+            self.dark5 = nn.Sequential(
+                *self.make_group_layer(in_channels, num_blocks[3], stride=2),
+                *self.make_spp_block([in_channels, in_channels * 2],
+                                     in_channels * 2),
+            )
+        elif spp_type=='sppf':
+            self.dark5 = nn.Sequential(
+                *self.make_group_layer(in_channels, num_blocks[3], stride=2),
+                *self.make_sppf_block([in_channels, in_channels * 2],
+                                       in_channels * 2),
+            )
 
     def make_group_layer(self,
                          in_channels: int,
@@ -87,6 +95,23 @@ def make_spp_block(self, filters_list, in_filters):
         ])
         return m
 
+    def make_sppf_block(self, filters_list, in_filters):
+        m = nn.Sequential(*[
+            BaseConv(in_filters, filters_list[0], 1, stride=1, act='lrelu'),
+            BaseConv(
+                filters_list[0], filters_list[1], 3, stride=1, act='lrelu'),
+            SPPBottleneck(
+                in_channels=filters_list[1],
+                out_channels=filters_list[0],
+                activation='lrelu',
+            ),
+            BaseConv(
+                filters_list[0], filters_list[1], 3, stride=1, act='lrelu'),
+            BaseConv(
+                filters_list[1], filters_list[0], 1, stride=1, act='lrelu'),
+        ])
+        return m
+
     def forward(self, x):
         outputs = {}
         x = self.stem(x)
@@ -111,6 +136,7 @@ def __init__(
         out_features=('dark3', 'dark4', 'dark5'),
         depthwise=False,
         act='silu',
+        spp_type='spp'
     ):
         super().__init__()
         assert out_features, 'please provide output features of Darknet'
@@ -160,19 +186,35 @@ def __init__(
         )
 
         # dark5
-        self.dark5 = nn.Sequential(
-            Conv(base_channels * 8, base_channels * 16, 3, 2, act=act),
-            SPPBottleneck(
-                base_channels * 16, base_channels * 16, activation=act),
-            CSPLayer(
-                base_channels * 16,
-                base_channels * 16,
-                n=base_depth,
-                shortcut=False,
-                depthwise=depthwise,
-                act=act,
-            ),
-        )
+        if spp_type=='spp':
+            self.dark5 = nn.Sequential(
+                Conv(base_channels * 8, base_channels * 16, 3, 2, act=act),
+                SPPBottleneck(
+                    base_channels * 16, base_channels * 16, activation=act),
+                CSPLayer(
+                    base_channels * 16,
+                    base_channels * 16,
+                    n=base_depth,
+                    shortcut=False,
+                    depthwise=depthwise,
+                    act=act,
+                ),
+            )
+
+        elif spp_type=='sppf':
+            self.dark5 = nn.Sequential(
+                Conv(base_channels * 8, base_channels * 16, 3, 2, act=act),
+                SPPFBottleneck(
+                    base_channels * 16, base_channels * 16, activation=act),
+                CSPLayer(
+                    base_channels * 16,
+                    base_channels * 16,
+                    n=base_depth,
+                    shortcut=False,
+                    depthwise=depthwise,
+                    act=act,
+                ),
+            )
 
     def forward(self, x):
         outputs = {}
diff --git a/easycv/models/backbones/network_blocks.py b/easycv/models/backbones/network_blocks.py
index e8716daf..ee5ed88e 100644
--- a/easycv/models/backbones/network_blocks.py
+++ b/easycv/models/backbones/network_blocks.py
@@ -147,6 +147,38 @@ def forward(self, x):
         return x + out
 
 
+class SPPFBottleneck(nn.Module):
+    """Spatial pyramid pooling layer used in YOLOv3-SPP"""
+
+    def __init__(self,
+                 in_channels,
+                 out_channels,
+                 kernel_size=5,
+                 activation='silu'):
+        super().__init__()
+        hidden_channels = in_channels // 2
+        self.conv1 = BaseConv(
+            in_channels, hidden_channels, 1, stride=1, act=activation)
+        # self.m = nn.ModuleList([
+        #     nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2)
+        #     for ks in kernel_sizes
+        # ])
+        self.m = nn.MaxPool2d(kernel_size=kernel_size, stride=1, padding=kernel_size // 2)
+
+        conv2_channels = hidden_channels * 4
+        self.conv2 = BaseConv(
+            conv2_channels, out_channels, 1, stride=1, act=activation)
+
+    def forward(self, x):
+        x = self.conv1(x)
+        x1 = self.m(x)
+        x2 = self.m(x1)
+        x = self.conv2(torch.cat([x, x1, x2, self.m(x2)], 1))
+        # x = torch.cat([x] + [m(x) for m in self.m], dim=1)
+        # x = self.conv2(x)
+        return x
+
+
 class SPPBottleneck(nn.Module):
     """Spatial pyramid pooling layer used in YOLOv3-SPP"""
 
diff --git a/easycv/models/detection/utils/utils.py b/easycv/models/detection/utils/utils.py
index 956cf885..9e7ba775 100644
--- a/easycv/models/detection/utils/utils.py
+++ b/easycv/models/detection/utils/utils.py
@@ -1,5 +1,36 @@
 # Copyright (c) Alibaba, Inc. and its affiliates.
 import numpy as np
+from torch.autograd import Function
+from torch.nn import functional as F
+
+
+class SigmoidGeometricMean(Function):
+    """Forward and backward function of geometric mean of two sigmoid
+    functions.
+
+    This implementation with analytical gradient function substitutes
+    the autograd function of (x.sigmoid() * y.sigmoid()).sqrt(). The
+    original implementation incurs none during gradient backprapagation
+    if both x and y are very small values.
+    """
+
+    @staticmethod
+    def forward(ctx, x, y):
+        x_sigmoid = x.sigmoid()
+        y_sigmoid = y.sigmoid()
+        z = (x_sigmoid * y_sigmoid).sqrt()
+        ctx.save_for_backward(x_sigmoid, y_sigmoid, z)
+        return z
+
+    @staticmethod
+    def backward(ctx, grad_output):
+        x_sigmoid, y_sigmoid, z = ctx.saved_tensors
+        grad_x = grad_output * z * (1 - x_sigmoid) / 2
+        grad_y = grad_output * z * (1 - y_sigmoid) / 2
+        return grad_x, grad_y
+
+
+sigmoid_geometric_mean = SigmoidGeometricMean.apply
 
 
 def output_postprocess(outputs, img_metas=None):
diff --git a/easycv/models/detection/yolox/tood_head.py b/easycv/models/detection/yolox/tood_head.py
new file mode 100644
index 00000000..0477530a
--- /dev/null
+++ b/easycv/models/detection/yolox/tood_head.py
@@ -0,0 +1,835 @@
+# Copyright (c) 2014-2021 Megvii Inc And Alibaba PAI-Teams. All rights reserved.
+import logging
+import math
+from distutils.version import LooseVersion
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from easycv.models.backbones.network_blocks import BaseConv, DWConv
+from easycv.models.detection.utils import bboxes_iou
+from easycv.models.loss import IOUloss
+from easycv.models.loss import FocalLoss, VarifocalLoss
+from mmcv.cnn import ConvModule, normal_init
+
+
+class TaskDecomposition(nn.Module):
+    """Task decomposition module in task-aligned predictor of TOOD.
+
+    Args:
+        feat_channels (int): Number of feature channels in TOOD head.
+        stacked_convs (int): Number of conv layers in TOOD head.
+        la_down_rate (int): Downsample rate of layer attention.
+        conv_cfg (dict): Config dict for convolution layer.
+        norm_cfg (dict): Config dict for normalization layer.
+    """
+
+    def __init__(self,
+                 feat_channels,
+                 stacked_convs,
+                 la_down_rate=8,
+                 conv_cfg=None,
+                 norm_cfg=None):
+        super(TaskDecomposition, self).__init__()
+        self.feat_channels = feat_channels
+        self.stacked_convs = stacked_convs
+        self.in_channels = self.feat_channels * self.stacked_convs
+        self.norm_cfg = norm_cfg
+        self.layer_attention = nn.Sequential(
+            nn.Conv2d(self.in_channels, self.in_channels // la_down_rate, 1),
+            nn.ReLU(inplace=True),
+            nn.Conv2d(
+                self.in_channels // la_down_rate,
+                self.stacked_convs,
+                1,
+                padding=0), nn.Sigmoid())
+
+        self.reduction_conv = ConvModule(
+            self.in_channels,
+            self.feat_channels,
+            1,
+            stride=1,
+            padding=0,
+            conv_cfg=conv_cfg,
+            norm_cfg=norm_cfg,
+            bias=norm_cfg is None)
+
+    def init_weights(self):
+        for m in self.layer_attention.modules():
+            if isinstance(m, nn.Conv2d):
+                normal_init(m, std=0.001)
+        normal_init(self.reduction_conv.conv, std=0.01)
+
+    def forward(self, feat, avg_feat=None):
+        b, c, h, w = feat.shape
+        if avg_feat is None:
+            avg_feat = F.adaptive_avg_pool2d(feat, (1, 1))
+        weight = self.layer_attention(avg_feat)
+
+        # here we first compute the product between layer attention weight and
+        # conv weight, and then compute the convolution between new conv weight
+        # and feature map, in order to save memory and FLOPs.
+        conv_weight = weight.reshape(
+            b, 1, self.stacked_convs,
+            1) * self.reduction_conv.conv.weight.reshape(
+            1, self.feat_channels, self.stacked_convs, self.feat_channels)
+        conv_weight = conv_weight.reshape(b, self.feat_channels,
+                                          self.in_channels)
+        feat = feat.reshape(b, self.in_channels, h * w)
+        feat = torch.bmm(conv_weight, feat).reshape(b, self.feat_channels, h,
+                                                    w)
+        if self.norm_cfg is not None:
+            feat = self.reduction_conv.norm(feat)
+        feat = self.reduction_conv.activate(feat)
+
+        return feat
+
+
+class TOODHead(nn.Module):
+
+    def __init__(self,
+                 num_classes,
+                 width=1.0,
+                 strides=[8, 16, 32],
+                 in_channels=[256, 512, 1024],
+                 act='silu',
+                 depthwise=False,
+                 stage='CLOUD',
+                 obj_loss_type='l1',
+                 reg_loss_type='iou',
+                 stacked_convs=6,
+                 conv_cfg=None,
+                 norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
+                 ):
+        """
+        Args:
+            num_classes (int): detection class numbers.
+            width (float): model width. Default value: 1.0.
+            strides (list): expanded strides. Default value: [8, 16, 32].
+            in_channels (list): model conv channels set. Default value: [256, 512, 1024].
+            act (str): activation type of conv. Defalut value: "silu".
+            depthwise (bool): whether apply depthwise conv in conv branch. Default value: False.
+            stage (str): model stage, distinguish edge head to cloud head. Default value: CLOUD.
+            obj_loss_type (str): the loss function of the obj conf. Default value: l1.
+            reg_loss_type (str): the loss function of the box prediction. Default value: l1.
+        """
+        super().__init__()
+
+        self.n_anchors = 1
+        self.num_classes = num_classes
+        self.stage = stage
+        self.decode_in_inference = True  # for deploy, set to False
+
+        self.stacked_convs = stacked_convs
+        self.conv_cfg = conv_cfg
+        self.norm_cfg = norm_cfg
+        self.feat_channels = int(256 * width)
+
+        self.cls_convs = nn.ModuleList()
+        self.reg_convs = nn.ModuleList()
+        self.cls_preds = nn.ModuleList()
+        self.reg_preds = nn.ModuleList()
+        self.obj_preds = nn.ModuleList()
+        self.cls_decomps = nn.ModuleList()
+        self.reg_decomps = nn.ModuleList()
+        self.stems = nn.ModuleList()
+
+        self.inter_convs = nn.ModuleList()
+
+        Conv = DWConv if depthwise else BaseConv
+
+        for i in range(len(in_channels)):
+            self.stems.append(
+                BaseConv(
+                    in_channels=int(in_channels[i] * width),
+                    out_channels=int(256 * width),
+                    ksize=1,
+                    stride=1,
+                    act=act,
+                ))
+            self.cls_convs.append(
+                nn.Sequential(*[
+                    Conv(
+                        in_channels=int(256 * width),
+                        out_channels=int(256 * width),
+                        ksize=3,
+                        stride=1,
+                        act=act,
+                    ),
+                    Conv(
+                        in_channels=int(256 * width),
+                        out_channels=int(256 * width),
+                        ksize=3,
+                        stride=1,
+                        act=act,
+                    ),
+                ]))
+            self.reg_convs.append(
+                nn.Sequential(*[
+                    Conv(
+                        in_channels=int(256 * width),
+                        out_channels=int(256 * width),
+                        ksize=3,
+                        stride=1,
+                        act=act,
+                    ),
+                    Conv(
+                        in_channels=int(256 * width),
+                        out_channels=int(256 * width),
+                        ksize=3,
+                        stride=1,
+                        act=act,
+                    ),
+                ]))
+            self.cls_preds.append(
+                nn.Conv2d(
+                    in_channels=int(256 * width),
+                    out_channels=self.n_anchors * self.num_classes,
+                    kernel_size=1,
+                    stride=1,
+                    padding=0,
+                ))
+            self.reg_preds.append(
+                nn.Conv2d(
+                    in_channels=int(256 * width),
+                    out_channels=4,
+                    kernel_size=1,
+                    stride=1,
+                    padding=0,
+                ))
+            self.obj_preds.append(
+                nn.Conv2d(
+                    in_channels=int(256 * width),
+                    out_channels=self.n_anchors * 1,
+                    kernel_size=1,
+                    stride=1,
+                    padding=0,
+                ))
+            self.cls_decomps.append(
+                TaskDecomposition(self.feat_channels,
+                                  self.stacked_convs,
+                                  self.stacked_convs * 8,
+                                  self.conv_cfg, self.norm_cfg))
+            self.reg_decomps.append(
+                TaskDecomposition(self.feat_channels,
+                                  self.stacked_convs,
+                                  self.stacked_convs * 8,
+                                  self.conv_cfg, self.norm_cfg)
+            )
+
+        for i in range(self.stacked_convs):
+            conv_cfg = self.conv_cfg
+            chn = self.feat_channels
+            self.inter_convs.append(
+                ConvModule(
+                    chn,
+                    self.feat_channels,
+                    3,
+                    stride=1,
+                    padding=1,
+                    conv_cfg=conv_cfg,
+                    norm_cfg=self.norm_cfg))
+
+        self.bcewithlog_loss = nn.BCEWithLogitsLoss(reduction='none')
+
+        self.use_l1 = False
+        self.l1_loss = nn.L1Loss(reduction='none')
+
+        self.iou_loss = IOUloss(reduction='none', loss_type=reg_loss_type)
+
+        self.obj_loss_type = obj_loss_type
+        if obj_loss_type == 'BCE':
+            self.obj_loss = nn.BCEWithLogitsLoss(reduction='none')
+        elif obj_loss_type == 'focal':
+            self.obj_loss = FocalLoss(reduction='none')
+
+        elif obj_loss_type == 'v_focal':
+            self.obj_loss = VarifocalLoss(reduction='none')
+        else:
+            assert "Undefined loss type: {}".format(obj_loss_type)
+
+        self.strides = strides
+        self.grids = [torch.zeros(1)] * len(in_channels)
+
+    def initialize_biases(self, prior_prob):
+        for conv in self.cls_preds:
+            b = conv.bias.view(self.n_anchors, -1)
+            b.data.fill_(-math.log((1 - prior_prob) / prior_prob))
+            conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+
+        for conv in self.obj_preds:
+            b = conv.bias.view(self.n_anchors, -1)
+            b.data.fill_(-math.log((1 - prior_prob) / prior_prob))
+            conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+
+    def forward(self, xin, labels=None, imgs=None):
+        outputs = []
+        origin_preds = []
+        x_shifts = []
+        y_shifts = []
+        expanded_strides = []
+
+        for k, (cls_decomp, reg_decomp, cls_conv, reg_conv, stride_this_level, x) in enumerate(
+                zip(self.cls_decomps, self.reg_decomps, self.cls_convs, self.reg_convs, self.strides, xin)):
+            x = self.stems[k](x)
+
+            inter_feats = []
+            for inter_conv in self.inter_convs:
+                x = inter_conv(x)
+                inter_feats.append(x)
+            feat = torch.cat(inter_feats, 1)
+
+            avg_feat = F.adaptive_avg_pool2d(feat, (1, 1))
+            cls_x = cls_decomp(feat, avg_feat)
+            reg_x = reg_decomp(feat, avg_feat)
+
+            cls_feat = cls_conv(cls_x)
+            cls_output = self.cls_preds[k](cls_feat)
+
+            reg_feat = reg_conv(reg_x)
+            reg_output = self.reg_preds[k](reg_feat)
+            obj_output = self.obj_preds[k](reg_feat)
+
+            if self.training:
+                output = torch.cat([reg_output, obj_output, cls_output], 1)
+                output, grid = self.get_output_and_grid(
+                    output, k, stride_this_level, xin[0].type())
+                x_shifts.append(grid[:, :, 0])
+                y_shifts.append(grid[:, :, 1])
+                expanded_strides.append(
+                    torch.zeros(
+                        1, grid.shape[1]).fill_(stride_this_level).type_as(
+                        xin[0]))
+                if self.use_l1:
+                    batch_size = reg_output.shape[0]
+                    hsize, wsize = reg_output.shape[-2:]
+                    reg_output = reg_output.view(batch_size, self.n_anchors, 4,
+                                                 hsize, wsize)
+                    reg_output = reg_output.permute(0, 1, 3, 4, 2).reshape(
+                        batch_size, -1, 4)
+                    origin_preds.append(reg_output.clone())
+
+            else:
+                if self.stage == 'EDGE':
+                    m = nn.Hardsigmoid()
+                    output = torch.cat(
+                        [reg_output, m(obj_output),
+                         m(cls_output)], 1)
+                else:
+                    output = torch.cat([
+                        reg_output,
+                        obj_output.sigmoid(),
+                        cls_output.sigmoid()
+                    ], 1)
+
+            outputs.append(output)
+
+        if self.training:
+
+            return self.get_losses(
+                imgs,
+                x_shifts,
+                y_shifts,
+                expanded_strides,
+                labels,
+                torch.cat(outputs, 1),
+                origin_preds,
+                dtype=xin[0].dtype,
+            )
+
+        else:
+            self.hw = [x.shape[-2:] for x in outputs]
+            # [batch, n_anchors_all, 85]
+            outputs = torch.cat([x.flatten(start_dim=2) for x in outputs],
+                                dim=2).permute(0, 2, 1)
+            if self.decode_in_inference:
+                return self.decode_outputs(outputs, dtype=xin[0].type())
+            else:
+                return outputs
+
+    def get_output_and_grid(self, output, k, stride, dtype):
+        grid = self.grids[k]
+
+        batch_size = output.shape[0]
+        n_ch = 5 + self.num_classes
+        hsize, wsize = output.shape[-2:]
+        if grid.shape[2:4] != output.shape[2:4]:
+            yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])
+            grid = torch.stack((xv, yv), 2).view(1, 1, hsize, wsize,
+                                                 2).type(dtype)
+            self.grids[k] = grid
+
+        output = output.view(batch_size, self.n_anchors, n_ch, hsize, wsize)
+        output = output.permute(0, 1, 3, 4,
+                                2).reshape(batch_size,
+                                           self.n_anchors * hsize * wsize, -1)
+        grid = grid.view(1, -1, 2)
+        output[..., :2] = (output[..., :2] + grid) * stride
+        output[..., 2:4] = torch.exp(output[..., 2:4]) * stride
+        return output, grid
+
+    def decode_outputs(self, outputs, dtype):
+        grids = []
+        strides = []
+        for (hsize, wsize), stride in zip(self.hw, self.strides):
+            yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])
+            grid = torch.stack((xv, yv), 2).view(1, -1, 2)
+            grids.append(grid)
+            shape = grid.shape[:2]
+            strides.append(torch.full((*shape, 1), stride, dtype=torch.int))
+
+        grids = torch.cat(grids, dim=1).type(dtype)
+        strides = torch.cat(strides, dim=1).type(dtype)
+
+        outputs[..., :2] = (outputs[..., :2] + grids) * strides
+        outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides
+        return outputs
+
+    def get_losses(
+            self,
+            imgs,
+            x_shifts,
+            y_shifts,
+            expanded_strides,
+            labels,
+            outputs,
+            origin_preds,
+            dtype,
+    ):
+        bbox_preds = outputs[:, :, :4]  # [batch, n_anchors_all, 4]
+        obj_preds = outputs[:, :, 4].unsqueeze(-1)  # [batch, n_anchors_all, 1]
+        cls_preds = outputs[:, :, 5:]  # [batch, n_anchors_all, n_cls]
+
+        # calculate targets
+        nlabel = (labels.sum(dim=2) > 0).sum(dim=1)  # number of objects
+
+        total_num_anchors = outputs.shape[1]
+        x_shifts = torch.cat(x_shifts, 1)  # [1, n_anchors_all]
+        y_shifts = torch.cat(y_shifts, 1)  # [1, n_anchors_all]
+        expanded_strides = torch.cat(expanded_strides, 1)
+        if self.use_l1:
+            origin_preds = torch.cat(origin_preds, 1)
+
+        cls_targets = []
+        reg_targets = []
+        l1_targets = []
+        obj_targets = []
+        fg_masks = []
+
+        num_fg = 0.0
+        num_gts = 0.0
+
+        for batch_idx in range(outputs.shape[0]):
+            num_gt = int(nlabel[batch_idx])
+
+            num_gts += num_gt
+            if num_gt == 0:
+                cls_target = outputs.new_zeros((0, self.num_classes))
+                reg_target = outputs.new_zeros((0, 4))
+                l1_target = outputs.new_zeros((0, 4))
+                obj_target = outputs.new_zeros((total_num_anchors, 1))
+                fg_mask = outputs.new_zeros(total_num_anchors).bool()
+            else:
+                gt_bboxes_per_image = labels[batch_idx, :num_gt, 1:5]
+                gt_classes = labels[batch_idx, :num_gt, 0]
+                bboxes_preds_per_image = bbox_preds[batch_idx]
+
+                try:
+                    (
+                        gt_matched_classes,
+                        fg_mask,
+                        pred_ious_this_matching,
+                        matched_gt_inds,
+                        num_fg_img,
+                    ) = self.get_assignments(  # noqa
+                        batch_idx,
+                        num_gt,
+                        total_num_anchors,
+                        gt_bboxes_per_image,
+                        gt_classes,
+                        bboxes_preds_per_image,
+                        expanded_strides,
+                        x_shifts,
+                        y_shifts,
+                        cls_preds,
+                        bbox_preds,
+                        obj_preds,
+                        labels,
+                        imgs,
+                    )
+
+                except RuntimeError:
+                    logging.error(
+                        'OOM RuntimeError is raised due to the huge memory cost during label assignment. \
+                           CPU mode is applied in this batch. If you want to avoid this issue, \
+                           try to reduce the batch size or image size.')
+                    torch.cuda.empty_cache()
+                    (
+                        gt_matched_classes,
+                        fg_mask,
+                        pred_ious_this_matching,
+                        matched_gt_inds,
+                        num_fg_img,
+                    ) = self.get_assignments(  # noqa
+                        batch_idx,
+                        num_gt,
+                        total_num_anchors,
+                        gt_bboxes_per_image,
+                        gt_classes,
+                        bboxes_preds_per_image,
+                        expanded_strides,
+                        x_shifts,
+                        y_shifts,
+                        cls_preds,
+                        bbox_preds,
+                        obj_preds,
+                        labels,
+                        imgs,
+                        'cpu',
+                    )
+
+                torch.cuda.empty_cache()
+                num_fg += num_fg_img
+
+                cls_target = F.one_hot(
+                    gt_matched_classes.to(torch.int64),
+                    self.num_classes) * pred_ious_this_matching.unsqueeze(-1)
+                obj_target = fg_mask.unsqueeze(-1)
+                reg_target = gt_bboxes_per_image[matched_gt_inds]
+
+                if self.use_l1:
+                    l1_target = self.get_l1_target(
+                        outputs.new_zeros((num_fg_img, 4)),
+                        gt_bboxes_per_image[matched_gt_inds],
+                        expanded_strides[0][fg_mask],
+                        x_shifts=x_shifts[0][fg_mask],
+                        y_shifts=y_shifts[0][fg_mask],
+                    )
+
+            cls_targets.append(cls_target)
+            reg_targets.append(reg_target)
+            obj_targets.append(obj_target.to(dtype))
+            fg_masks.append(fg_mask)
+            if self.use_l1:
+                l1_targets.append(l1_target)
+
+        cls_targets = torch.cat(cls_targets, 0)
+        reg_targets = torch.cat(reg_targets, 0)
+        obj_targets = torch.cat(obj_targets, 0)
+        fg_masks = torch.cat(fg_masks, 0)
+
+        if self.use_l1:
+            l1_targets = torch.cat(l1_targets, 0)
+
+        num_fg = max(num_fg, 1)
+
+        loss_iou = (self.iou_loss(
+            bbox_preds.view(-1, 4)[fg_masks], reg_targets)).sum() / num_fg
+
+        if self.obj_loss_type == 'focal':
+            loss_obj = (
+                           self.focal_loss(obj_preds.sigmoid().view(-1, 1), obj_targets)
+                       ).sum() / num_fg
+        else:
+            loss_obj = (self.obj_loss(obj_preds.view(-1, 1),
+                                      obj_targets)).sum() / num_fg
+        loss_cls = (self.bcewithlog_loss(
+            cls_preds.view(-1, self.num_classes)[fg_masks],
+            cls_targets)).sum() / num_fg
+
+        if self.use_l1:
+            loss_l1 = (self.l1_loss(
+                origin_preds.view(-1, 4)[fg_masks], l1_targets)).sum() / num_fg
+        else:
+            loss_l1 = 0.0
+
+        reg_weight = 5.0
+        loss = reg_weight * loss_iou + loss_obj + loss_cls + loss_l1
+
+        return (
+            loss,
+            reg_weight * loss_iou,
+            loss_obj,
+            loss_cls,
+            loss_l1,
+            num_fg / max(num_gts, 1),
+        )
+
+    def focal_loss(self, pred, gt):
+        pos_inds = gt.eq(1).float()
+        neg_inds = gt.eq(0).float()
+        pos_loss = torch.log(pred + 1e-5) * torch.pow(1 - pred, 2) * pos_inds * 0.75
+        neg_loss = torch.log(1 - pred + 1e-5) * torch.pow(pred, 2) * neg_inds * 0.25
+        loss = -(pos_loss + neg_loss)
+        return loss
+
+    def get_l1_target(self,
+                      l1_target,
+                      gt,
+                      stride,
+                      x_shifts,
+                      y_shifts,
+                      eps=1e-8):
+        l1_target[:, 0] = gt[:, 0] / stride - x_shifts
+        l1_target[:, 1] = gt[:, 1] / stride - y_shifts
+        l1_target[:, 2] = torch.log(gt[:, 2] / stride + eps)
+        l1_target[:, 3] = torch.log(gt[:, 3] / stride + eps)
+        return l1_target
+
+    @torch.no_grad()
+    def get_assignments(
+            self,
+            batch_idx,
+            num_gt,
+            total_num_anchors,
+            gt_bboxes_per_image,
+            gt_classes,
+            bboxes_preds_per_image,
+            expanded_strides,
+            x_shifts,
+            y_shifts,
+            cls_preds,
+            bbox_preds,
+            obj_preds,
+            labels,
+            imgs,
+            mode='gpu',
+    ):
+
+        if mode == 'cpu':
+            print('------------CPU Mode for This Batch-------------')
+            gt_bboxes_per_image = gt_bboxes_per_image.cpu().float()
+            bboxes_preds_per_image = bboxes_preds_per_image.cpu().float()
+            gt_classes = gt_classes.cpu().float()
+            expanded_strides = expanded_strides.cpu().float()
+            x_shifts = x_shifts.cpu()
+            y_shifts = y_shifts.cpu()
+
+        fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(
+            gt_bboxes_per_image,
+            expanded_strides,
+            x_shifts,
+            y_shifts,
+            total_num_anchors,
+            num_gt,
+        )
+        # reference to: https://github.com/Megvii-BaseDetection/YOLOX/pull/811
+        # NOTE: Fix `selected index k out of range`
+        npa: int = fg_mask.sum().item()  # number of positive anchors
+
+        if npa == 0:
+            gt_matched_classes = torch.zeros(0, device=fg_mask.device).long()
+            pred_ious_this_matching = torch.rand(0, device=fg_mask.device)
+            matched_gt_inds = gt_matched_classes
+            num_fg = npa
+
+            if mode == 'cpu':
+                gt_matched_classes = gt_matched_classes.cuda()
+                fg_mask = fg_mask.cuda()
+                pred_ious_this_matching = pred_ious_this_matching.cuda()
+                matched_gt_inds = matched_gt_inds.cuda()
+                num_fg = num_fg.cuda()
+
+            return (
+                gt_matched_classes,
+                fg_mask,
+                pred_ious_this_matching,
+                matched_gt_inds,
+                num_fg,
+            )
+
+        bboxes_preds_per_image = bboxes_preds_per_image[fg_mask]
+        cls_preds_ = cls_preds[batch_idx][fg_mask]
+        obj_preds_ = obj_preds[batch_idx][fg_mask]
+        num_in_boxes_anchor = bboxes_preds_per_image.shape[0]
+
+        if mode == 'cpu':
+            gt_bboxes_per_image = gt_bboxes_per_image.cpu()
+            bboxes_preds_per_image = bboxes_preds_per_image.cpu()
+
+        pair_wise_ious = bboxes_iou(gt_bboxes_per_image,
+                                    bboxes_preds_per_image, False)
+
+        if (torch.isnan(pair_wise_ious.max())):
+            pair_wise_ious = bboxes_iou(gt_bboxes_per_image,
+                                        bboxes_preds_per_image, False)
+
+        gt_cls_per_image = (
+            F.one_hot(gt_classes.to(torch.int64),
+                      self.num_classes).float().unsqueeze(1).repeat(
+                1, num_in_boxes_anchor, 1))
+        pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8)
+
+        if mode == 'cpu':
+            cls_preds_, obj_preds_ = cls_preds_.cpu(), obj_preds_.cpu()
+
+        if LooseVersion(torch.__version__) >= LooseVersion('1.6.0'):
+            with torch.cuda.amp.autocast(enabled=False):
+                cls_preds_ = (
+                        cls_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                               1).sigmoid_() *
+                        obj_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                               1).sigmoid_())
+                pair_wise_cls_loss = F.binary_cross_entropy(
+                    cls_preds_.sqrt_(), gt_cls_per_image,
+                    reduction='none').sum(-1)
+        else:
+            cls_preds_ = (
+                    cls_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                           1).sigmoid_() *
+                    obj_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                           1).sigmoid_())
+            pair_wise_cls_loss = F.binary_cross_entropy(
+                cls_preds_.sqrt_(), gt_cls_per_image, reduction='none').sum(-1)
+
+        del cls_preds_
+
+        cost = (
+                pair_wise_cls_loss + 3.0 * pair_wise_ious_loss + 100000.0 *
+                (~is_in_boxes_and_center))
+
+        (
+            num_fg,
+            gt_matched_classes,
+            pred_ious_this_matching,
+            matched_gt_inds,
+        ) = self.dynamic_k_matching(cost, pair_wise_ious, gt_classes, num_gt,
+                                    fg_mask)
+
+        del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss
+
+        if mode == 'cpu':
+            gt_matched_classes = gt_matched_classes.cuda()
+            fg_mask = fg_mask.cuda()
+            pred_ious_this_matching = pred_ious_this_matching.cuda()
+            matched_gt_inds = matched_gt_inds.cuda()
+
+        return (
+            gt_matched_classes,
+            fg_mask,
+            pred_ious_this_matching,
+            matched_gt_inds,
+            num_fg,
+        )
+
+    def get_in_boxes_info(
+            self,
+            gt_bboxes_per_image,
+            expanded_strides,
+            x_shifts,
+            y_shifts,
+            total_num_anchors,
+            num_gt,
+    ):
+        expanded_strides_per_image = expanded_strides[0]
+        x_shifts_per_image = x_shifts[0] * expanded_strides_per_image
+        y_shifts_per_image = y_shifts[0] * expanded_strides_per_image
+        x_centers_per_image = (
+            (x_shifts_per_image +
+             0.5 * expanded_strides_per_image).unsqueeze(0).repeat(num_gt, 1)
+        )  # [n_anchor] -> [n_gt, n_anchor]
+        y_centers_per_image = (
+            (y_shifts_per_image +
+             0.5 * expanded_strides_per_image).unsqueeze(0).repeat(num_gt, 1))
+
+        gt_bboxes_per_image_l = (
+            (gt_bboxes_per_image[:, 0] -
+             0.5 * gt_bboxes_per_image[:, 2]).unsqueeze(1).repeat(
+                1, total_num_anchors))
+        gt_bboxes_per_image_r = (
+            (gt_bboxes_per_image[:, 0] +
+             0.5 * gt_bboxes_per_image[:, 2]).unsqueeze(1).repeat(
+                1, total_num_anchors))
+        gt_bboxes_per_image_t = (
+            (gt_bboxes_per_image[:, 1] -
+             0.5 * gt_bboxes_per_image[:, 3]).unsqueeze(1).repeat(
+                1, total_num_anchors))
+        gt_bboxes_per_image_b = (
+            (gt_bboxes_per_image[:, 1] +
+             0.5 * gt_bboxes_per_image[:, 3]).unsqueeze(1).repeat(
+                1, total_num_anchors))
+
+        b_l = x_centers_per_image - gt_bboxes_per_image_l
+        b_r = gt_bboxes_per_image_r - x_centers_per_image
+        b_t = y_centers_per_image - gt_bboxes_per_image_t
+        b_b = gt_bboxes_per_image_b - y_centers_per_image
+        bbox_deltas = torch.stack([b_l, b_t, b_r, b_b], 2)
+
+        is_in_boxes = bbox_deltas.min(dim=-1).values > 0.0
+        is_in_boxes_all = is_in_boxes.sum(dim=0) > 0
+        # in fixed center
+
+        center_radius = 2.5
+
+        gt_bboxes_per_image_l = (
+                                    gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
+            1, total_num_anchors
+        ) - center_radius * expanded_strides_per_image.unsqueeze(0)
+        gt_bboxes_per_image_r = (
+                                    gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
+            1, total_num_anchors
+        ) + center_radius * expanded_strides_per_image.unsqueeze(0)
+        gt_bboxes_per_image_t = (
+                                    gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
+            1, total_num_anchors
+        ) - center_radius * expanded_strides_per_image.unsqueeze(0)
+        gt_bboxes_per_image_b = (
+                                    gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
+            1, total_num_anchors
+        ) + center_radius * expanded_strides_per_image.unsqueeze(0)
+
+        c_l = x_centers_per_image - gt_bboxes_per_image_l
+        c_r = gt_bboxes_per_image_r - x_centers_per_image
+        c_t = y_centers_per_image - gt_bboxes_per_image_t
+        c_b = gt_bboxes_per_image_b - y_centers_per_image
+        center_deltas = torch.stack([c_l, c_t, c_r, c_b], 2)
+        is_in_centers = center_deltas.min(dim=-1).values > 0.0
+        is_in_centers_all = is_in_centers.sum(dim=0) > 0
+
+        # in boxes and in centers
+        is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all
+
+        is_in_boxes_and_center = (
+                is_in_boxes[:, is_in_boxes_anchor]
+                & is_in_centers[:, is_in_boxes_anchor])
+        return is_in_boxes_anchor, is_in_boxes_and_center
+
+    def dynamic_k_matching(self, cost, pair_wise_ious, gt_classes, num_gt,
+                           fg_mask):
+
+        # Dynamic K
+        # ---------------------------------------------------------------
+        matching_matrix = torch.zeros_like(cost, dtype=torch.uint8)
+
+        ious_in_boxes_matrix = pair_wise_ious
+        n_candidate_k = min(10, ious_in_boxes_matrix.size(1))
+
+        topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=1)
+        dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)
+        dynamic_ks = dynamic_ks.tolist()
+
+        for gt_idx in range(num_gt):
+            _, pos_idx = torch.topk(
+                cost[gt_idx], k=dynamic_ks[gt_idx], largest=False)
+            matching_matrix[gt_idx][pos_idx] = 1
+
+        del topk_ious, dynamic_ks, pos_idx
+
+        anchor_matching_gt = matching_matrix.sum(0)
+        if (anchor_matching_gt > 1).sum() > 0:
+            _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
+            matching_matrix[:, anchor_matching_gt > 1] *= 0
+            matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1
+        fg_mask_inboxes = matching_matrix.sum(0) > 0
+        num_fg = fg_mask_inboxes.sum().item()
+
+        fg_mask[fg_mask.clone()] = fg_mask_inboxes
+
+        matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
+        gt_matched_classes = gt_classes[matched_gt_inds]
+
+        pred_ious_this_matching = (matching_matrix *
+                                   pair_wise_ious).sum(0)[fg_mask_inboxes]
+
+        return num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds
diff --git a/easycv/models/detection/yolox/yolo_head.py b/easycv/models/detection/yolox/yolo_head.py
index f71a5147..a93ef79c 100644
--- a/easycv/models/detection/yolox/yolo_head.py
+++ b/easycv/models/detection/yolox/yolo_head.py
@@ -131,10 +131,12 @@ def __init__(self,
 
         self.iou_loss = IOUloss(reduction='none',loss_type=reg_loss_type)
 
+        self.obj_loss_type = obj_loss_type
         if obj_loss_type == 'BCE':
             self.obj_loss = nn.BCEWithLogitsLoss(reduction='none')
         elif obj_loss_type == 'focal':
             self.obj_loss = FocalLoss(reduction='none')
+
         elif obj_loss_type == 'v_focal':
             self.obj_loss = VarifocalLoss(reduction='none')
         else:
@@ -409,8 +411,14 @@ def get_losses(
 
         loss_iou = (self.iou_loss(
             bbox_preds.view(-1, 4)[fg_masks], reg_targets)).sum() / num_fg
-        loss_obj = (self.obj_loss(obj_preds.view(-1, 1),
-                                         obj_targets)).sum() / num_fg
+
+        if self.obj_loss_type == 'focal':
+            loss_obj = (
+                           self.focal_loss(obj_preds.sigmoid().view(-1, 1), obj_targets)
+                       ).sum() / num_fg
+        else:
+            loss_obj = (self.obj_loss(obj_preds.view(-1, 1),
+                                             obj_targets)).sum() / num_fg
         loss_cls = (self.bcewithlog_loss(
             cls_preds.view(-1, self.num_classes)[fg_masks],
             cls_targets)).sum() / num_fg
@@ -424,8 +432,6 @@ def get_losses(
         reg_weight = 5.0
         loss = reg_weight * loss_iou + loss_obj + loss_cls + loss_l1
 
-        print(loss_iou)
-
         return (
             loss,
             reg_weight * loss_iou,
@@ -435,6 +441,15 @@ def get_losses(
             num_fg / max(num_gts, 1),
         )
 
+    def focal_loss(self, pred, gt):
+        pos_inds = gt.eq(1).float()
+        neg_inds = gt.eq(0).float()
+        pos_loss = torch.log(pred + 1e-5) * torch.pow(1 - pred, 2) * pos_inds * 0.75
+        neg_loss = torch.log(1 - pred + 1e-5) * torch.pow(pred, 2) * neg_inds * 0.25
+        loss = -(pos_loss + neg_loss)
+        return loss
+
+
     def get_l1_target(self,
                       l1_target,
                       gt,
diff --git a/easycv/models/detection/yolox/yolo_pafpn.py b/easycv/models/detection/yolox/yolo_pafpn.py
index 0d48850a..e13f0a10 100644
--- a/easycv/models/detection/yolox/yolo_pafpn.py
+++ b/easycv/models/detection/yolox/yolo_pafpn.py
@@ -21,10 +21,11 @@ def __init__(
         in_channels=[256, 512, 1024],
         depthwise=False,
         act='silu',
-        use_att=None
+        use_att=None,
+        spp_type='spp'
     ):
         super().__init__()
-        self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act)
+        self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act,spp_type=spp_type)
         self.in_features = in_features
         self.in_channels = in_channels
         Conv = DWConv if depthwise else BaseConv
diff --git a/easycv/models/detection/yolox/yolox.py b/easycv/models/detection/yolox/yolox.py
index e0c1aa31..492f7870 100644
--- a/easycv/models/detection/yolox/yolox.py
+++ b/easycv/models/detection/yolox/yolox.py
@@ -11,6 +11,7 @@
 from easycv.models.detection.utils import postprocess
 from .yolo_head import YOLOXHead
 from .yolo_pafpn import YOLOPAFPN
+from .tood_head import TOODHead
 
 
 def init_yolo(M):
@@ -47,6 +48,8 @@ def __init__(self,
                  use_att: str = None,
                  obj_loss_type: str = 'l1',
                  reg_loss_type: str = 'l1',
+                 spp_type: str = 'spp',
+                 head_type: str = 'yolox',
                  pretrained: str = None):
         super(YOLOX, self).__init__()
         assert model_type in self.param_map, f'invalid model_type for yolox {model_type}, valid ones are {list(self.param_map.keys())}'
@@ -55,8 +58,12 @@ def __init__(self,
         depth = self.param_map[model_type][0]
         width = self.param_map[model_type][1]
 
-        self.backbone = YOLOPAFPN(depth, width, in_channels=in_channels, use_att=use_att)
-        self.head = YOLOXHead(num_classes, width, in_channels=in_channels, obj_loss_type=obj_loss_type, reg_loss_type=reg_loss_type)
+        self.backbone = YOLOPAFPN(depth, width, in_channels=in_channels, use_att=use_att, spp_type=spp_type)
+
+        if head_type=='yolox':
+            self.head = YOLOXHead(num_classes, width, in_channels=in_channels, obj_loss_type=obj_loss_type, reg_loss_type=reg_loss_type)
+        elif head_type=='tood':
+            self.head = TOODHead(num_classes, width, in_channels=in_channels, obj_loss_type=obj_loss_type, reg_loss_type=reg_loss_type)
 
         self.apply(init_yolo)  # init_yolo(self)
         self.head.initialize_biases(1e-2)
@@ -86,6 +93,8 @@ def forward_train(self,
 
         targets = torch.cat([gt_labels, gt_bboxes], dim=2)
 
+
+
         loss, iou_loss, conf_loss, cls_loss, l1_loss, num_fg = self.head(
             fpn_outs, targets, img)
 
diff --git a/easycv/predictors/detector.py b/easycv/predictors/detector.py
index 61ac7d8e..f021241f 100644
--- a/easycv/predictors/detector.py
+++ b/easycv/predictors/detector.py
@@ -49,53 +49,111 @@ def __init__(self,
         """
         self.model_path = model_path
         self.max_det = max_det
+        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
+        self.use_jit = model_path.endswith('jit') or model_path.endswith(
+            'blade')
+
+        self.use_blade = model_path.endswith('blade')
+
+        if self.use_blade:
+            import torch_blade
+
         if model_config:
             model_config = json.loads(model_config)
         else:
             model_config = {}
+
         self.score_thresh = model_config[
             'score_thresh'] if 'score_thresh' in model_config else score_thresh
 
-        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
-
-        with io.open(self.model_path, 'rb') as infile:
-            checkpoint = torch.load(infile, map_location='cpu')
+        if self.use_jit:
+            with io.open(model_path, 'rb') as infile:
+                map_location = 'cpu' if self.device == 'cpu' else 'cuda'
+                self.model = torch.jit.load(infile, map_location)
 
-        assert 'meta' in checkpoint and 'config' in checkpoint[
-            'meta'], 'meta.config is missing from checkpoint'
-        config_str = checkpoint['meta']['config']
-        # get config
-        basename = os.path.basename(self.model_path)
-        fname, _ = os.path.splitext(basename)
-        self.local_config_file = os.path.join(CACHE_DIR,
-                                              f'{fname}_config.json')
-        if not os.path.exists(CACHE_DIR):
-            os.makedirs(CACHE_DIR)
-        with open(self.local_config_file, 'w') as ofile:
-            ofile.write(config_str)
+            with io.open(model_path + '.config.json', 'r') as infile:
+                self.cfg = json.load(infile)
+                test_pipeline = self.cfg['test_pipeline']
+                self.CLASSES = self.cfg['classes']
+                self.end2end = self.cfg['export']['end2end']
 
-        self.cfg = mmcv_config_fromfile(self.local_config_file)
+            self.traceable = True
 
-        # build model
-        self.model = build_model(self.cfg.model)
-
-        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
-        map_location = 'cpu' if self.device == 'cpu' else 'cuda'
-        self.ckpt = load_checkpoint(
-            self.model, self.model_path, map_location=map_location)
-
-        self.model.to(self.device)
-        self.model.eval()
-
-        test_pipeline = self.cfg.test_pipeline
-
-        self.CLASSES = self.cfg.CLASSES
+        else:
+            self.end2end = False
+            with io.open(self.model_path, 'rb') as infile:
+                checkpoint = torch.load(infile, map_location='cpu')
+
+            assert 'meta' in checkpoint and 'config' in checkpoint[
+                'meta'], 'meta.config is missing from checkpoint'
+
+            config_str = checkpoint['meta']['config']
+            config_str = config_str[config_str.find('_base_'):]
+            # get config
+            basename = os.path.basename(self.model_path)
+            fname, _ = os.path.splitext(basename)
+            self.local_config_file = os.path.join(CACHE_DIR,
+                                                  f'{fname}_config.py')
+            if not os.path.exists(CACHE_DIR):
+                os.makedirs(CACHE_DIR)
+            with open(self.local_config_file, 'w') as ofile:
+                ofile.write(config_str)
+
+            self.cfg = mmcv_config_fromfile(self.local_config_file)
+
+            # build model
+            self.model = build_model(self.cfg.model)
+            self.traceable = getattr(self.model, 'trace_able', False)
+
+            self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
+            map_location = 'cpu' if self.device == 'cpu' else 'cuda'
+            self.ckpt = load_checkpoint(
+                self.model, self.model_path, map_location=map_location)
+
+            self.model.to(self.device)
+            self.model.eval()
+
+            test_pipeline = self.cfg.test_pipeline
+            self.CLASSES = self.cfg.CLASSES
 
         # build pipeline
         pipeline = [build_from_cfg(p, PIPELINES) for p in test_pipeline]
         self.pipeline = Compose(pipeline)
 
-    def predict(self, input_data_list, batch_size=-1):
+    def post_assign(self, outputs, img_metas):
+        detection_boxes = []
+        detection_scores = []
+        detection_classes = []
+        img_metas_list = []
+
+        for i in range(len(outputs)):
+            if img_metas:
+                img_metas_list.append(img_metas[i])
+            if outputs[i].requires_grad == True:
+                outputs[i] = outputs[i].detach()
+            if outputs[i] is not None:
+                bboxes = outputs[i][:, 0:4] if outputs[i] is not None else None
+                if img_metas:
+                    bboxes /= img_metas[i]['scale_factor'][0]
+                detection_boxes.append(bboxes.cpu().numpy())
+                detection_scores.append(
+                    (outputs[i][:, 4] * outputs[i][:, 5]).cpu().numpy())
+                detection_classes.append(outputs[i][:, 6].cpu().numpy().astype(
+                    np.int32))
+            else:
+                detection_boxes.append(None)
+                detection_scores.append(None)
+                detection_classes.append(None)
+
+        test_outputs = {
+            'detection_boxes': detection_boxes,
+            'detection_scores': detection_scores,
+            'detection_classes': detection_classes,
+            'img_metas': img_metas_list
+        }
+        return test_outputs
+
+    def predict(self, input_data_list, batch_size=-1, to_numpy=True):
         """
     using session run predict a number of samples using batch_size
 
@@ -115,29 +173,68 @@ def predict(self, input_data_list, batch_size=-1):
                 img = np.asarray(img)
 
             ori_img_shape = img.shape[:2]
-            data_dict = {
-                'ori_img_shape': ori_img_shape,
-                'img': cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
-            }
-            data_dict = self.pipeline(data_dict)
-            img = data_dict['img']
-            img = torch.unsqueeze(img._data, 0).to(self.device)
-            data_dict.pop('img')
-            det_out = self.model(
-                img, mode='test', img_metas=[data_dict['img_metas']._data])
-            # det_out = det_out[:self.max_det]
-            # scale box to original image scale, this logic has some operation
-            # that can not be traced, see
-            # https://discuss.pytorch.org/t/windows-libtorch-c-load-cuda-module-with-std-runtime-error-message-shape-4-is-invalid-for-input-if-size-40/63073/4
-            # det_out = scale_coords(img.shape[2:], det_out, ori_img_shape, (scale_factor, pad))
-
-            detection_scores = det_out['detection_scores'][0]
-            sel_ids = detection_scores > self.score_thresh
-            detection_boxes = det_out['detection_boxes'][0][sel_ids]
-            detection_classes = det_out['detection_classes'][0][sel_ids]
+
+            if self.end2end:
+                # the input should also be as the type of uint8 as mmcv
+                img = torch.from_numpy(img).to(self.device)
+                det_out = self.model(img)
+
+                detection_scores = det_out['detection_scores']
+
+                if detection_scores is not None:
+                    sel_ids = detection_scores > self.score_thresh
+                    detection_scores = detection_scores[sel_ids]
+                    detection_boxes = det_out['detection_boxes'][sel_ids]
+                    detection_classes = det_out['detection_classes'][sel_ids]
+                else:
+                    detection_boxes = []
+                    detection_classes = []
+
+                if to_numpy:
+                    detection_scores = detection_scores.detach().numpy()
+                    detection_boxes = detection_boxes.detach().numpy()
+                    detection_classes = detection_classes.detach().numpy()
+
+            else:
+                data_dict = {'img': img}
+                data_dict = self.pipeline(data_dict)
+                img = data_dict['img']
+                img = torch.unsqueeze(img._data, 0).to(self.device)
+                data_dict.pop('img')
+
+                if self.traceable:
+                    with torch.no_grad():
+                        det_out = self.post_assign(
+                            self.model(img),
+                            img_metas=[data_dict['img_metas']._data])
+                else:
+                    with torch.no_grad():
+                        det_out = self.model(
+                            img,
+                            mode='test',
+                            img_metas=[data_dict['img_metas']._data])
+
+                # det_out = det_out[:self.max_det]
+                # scale box to original image scale, this logic has some operation
+                # that can not be traced, see
+                # https://discuss.pytorch.org/t/windows-libtorch-c-load-cuda-module-with-std-runtime-error-message-shape-4-is-invalid-for-input-if-size-40/63073/4
+                # det_out = scale_coords(img.shape[2:], det_out, ori_img_shape, (scale_factor, pad))
+
+                detection_scores = det_out['detection_scores'][0]
+
+                if detection_scores is not None:
+                    sel_ids = detection_scores > self.score_thresh
+                    detection_scores = detection_scores[sel_ids]
+                    detection_boxes = det_out['detection_boxes'][0][sel_ids]
+                    detection_classes = det_out['detection_classes'][0][
+                        sel_ids]
+                else:
+                    detection_boxes = None
+                    detection_classes = None
+
             num_boxes = detection_classes.shape[
                 0] if detection_classes is not None else 0
-            # print(num_boxes)
+
             detection_classes_names = [
                 self.CLASSES[detection_classes[idx]]
                 for idx in range(num_boxes)
diff --git a/easycv/utils/checkpoint.py b/easycv/utils/checkpoint.py
index 5a60f713..ae0a90dd 100644
--- a/easycv/utils/checkpoint.py
+++ b/easycv/utils/checkpoint.py
@@ -5,6 +5,7 @@
 import torch
 from mmcv.parallel import is_module_wrapper
 from mmcv.runner import load_checkpoint as mmcv_load_checkpoint
+
 from mmcv.runner.checkpoint import (_save_to_state_dict, get_state_dict,
                                     weights_to_cpu)
 from torch.optim import Optimizer
diff --git a/show_predict.py b/show_predict.py
new file mode 100644
index 00000000..d335f495
--- /dev/null
+++ b/show_predict.py
@@ -0,0 +1,88 @@
+import os
+import numpy as np
+from PIL import Image
+import cv2
+import random
+from easycv.predictors.detector import TorchYoloXPredictor
+
+colors = [[255, 0, 0], [255, 255, 0], [255, 255, 0], [0, 255, 255]
+          ] + [[random.randint(0, 255) for _ in range(3)] for _ in range(2000)]
+
+
+def plot_boxes(outputs, imgs, save_path=None, color=None, line_thickness=None):
+    x = outputs['detection_boxes']
+    score = outputs['detection_scores']
+    id = outputs['detection_classes']
+    label = outputs['detection_class_names']
+
+
+    # Plots one bounding box on image img
+    tl = int(
+        line_thickness or round(0.002 *
+                                (imgs.shape[0] + imgs.shape[1]) /
+                                2)) + 1  # line/font thickness
+    # tl = int(line_thickness)
+
+    for num in range(x.shape[0]):
+        c1, c2 = (int(x[num][0]), int(x[num][1])), (int(x[num][2]),
+                                                    int(x[num][3]))
+        cv2.rectangle(
+            imgs,
+            c1,
+            c2,
+            colors[id[num]],
+            thickness=tl,
+            lineType=cv2.LINE_AA)
+
+        tf = max(tl - 1, 1)  # font thickness
+        t_size = cv2.getTextSize(
+            label[num], 0, fontScale=tl / 10, thickness=tf)[0]
+
+        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
+        cv2.rectangle(imgs, c1, c2, colors[id[num]], -1,
+                      cv2.LINE_AA)  # filled
+        cv2.putText(
+            imgs,
+            label[num], (c1[0], c1[1] - 2),
+            0,
+            0.2, [225, 0, 255],
+            thickness=1,
+            lineType=cv2.LINE_AA)
+        cv2.putText(
+            imgs,
+            str(score[num]), (c1[0], c1[1] - 10),
+            0,
+            0.2, [225, 0, 255],
+            thickness=1,
+            lineType=cv2.LINE_AA)
+
+    imgs = cv2.cvtColor(imgs, cv2.COLOR_BGR2RGB)
+    cv2.imwrite(save_path + '/result_39.6.jpg', imgs)
+
+    return
+
+
+def main():
+    pretrain_path = '/apsarapangu/disk5/zxy/pretrained/models/epoch_300_39.6.pth'
+    data_path = '/apsarapangu/disk5/zxy/data/coco/'
+    detection_model_path = pretrain_path
+
+    img = os.path.join(data_path,
+                       'val2017/000000037777.jpg')
+
+    input_data_list = [np.asarray(Image.open(img))]
+    predictor = TorchYoloXPredictor(
+        model_path=detection_model_path, score_thresh=0.5)
+
+    output = predictor.predict(input_data_list)[0]
+
+    img = cv2.imread(img)
+    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+    plot_boxes(output, img, save_path='./result')
+    print(output)
+
+
+
+
+if __name__ == '__main__':
+    main()
diff --git a/tools/eval.py b/tools/eval.py
index 62a4cb08..9221e84e 100644
--- a/tools/eval.py
+++ b/tools/eval.py
@@ -33,7 +33,7 @@
 # from tools.fuse_conv_bn import fuse_module
 
 # from mmcv import Config
-
+from mmcv.runner.checkpoint import _load_checkpoint
 
 def parse_args():
     parser = argparse.ArgumentParser(
@@ -172,6 +172,13 @@ def main():
     device = 'cuda' if torch.cuda.is_available() else 'cpu'
     print(f'use device {device}')
     checkpoint = load_checkpoint(model, args.checkpoint, map_location=device)
+    #
+    # official_path = "/apsarapangu/disk5/zxy/pretrained/models/yolox_s_official_40.5.pth"
+    # if 'official' in official_path:
+    #     checkpoint_model = _load_checkpoint(official_path, device)
+    #     state_dict = checkpoint_model['model']
+    #     model.load_state_dict(state_dict)
+
     model.to(device)
     # if args.fuse_conv_bn:
     #     model = fuse_module(model)

From a88c747a17641f0a098b2cd774d279637b7310c6 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Mon, 11 Jul 2022 16:45:50 +0800
Subject: [PATCH 06/69] fix export error

---
 easycv/predictors/detector.py | 4 ++--
 tests/ut_config.py            | 4 +++-
 2 files changed, 5 insertions(+), 3 deletions(-)

diff --git a/easycv/predictors/detector.py b/easycv/predictors/detector.py
index db214c85..a8ce471d 100644
--- a/easycv/predictors/detector.py
+++ b/easycv/predictors/detector.py
@@ -94,12 +94,11 @@ def __init__(self,
                 'meta'], 'meta.config is missing from checkpoint'
 
             config_str = checkpoint['meta']['config']
-            config_str = config_str[config_str.find('_base_'):]
             # get config
             basename = os.path.basename(self.model_path)
             fname, _ = os.path.splitext(basename)
             self.local_config_file = os.path.join(CACHE_DIR,
-                                                  f'{fname}_config.py')
+                                                  f'{fname}_config.json')
             if not os.path.exists(CACHE_DIR):
                 os.makedirs(CACHE_DIR)
             with open(self.local_config_file, 'w') as ofile:
@@ -109,6 +108,7 @@ def __init__(self,
 
             # build model
             self.model = build_model(self.cfg.model)
+
             self.traceable = getattr(self.model, 'trace_able', False)
 
             self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
diff --git a/tests/ut_config.py b/tests/ut_config.py
index 229ad81b..a8f57c8c 100644
--- a/tests/ut_config.py
+++ b/tests/ut_config.py
@@ -82,8 +82,10 @@
 PRETRAINED_MODEL_FACEID = os.path.join(BASE_LOCAL_PATH,
                                        'pretrained_models/faceid')
 
+# PRETRAINED_MODEL_YOLOXS_EXPORT = os.path.join(
+#     BASE_LOCAL_PATH, 'pretrained_models/detection/yolox_s/epoch_300.pth')
 PRETRAINED_MODEL_YOLOXS_EXPORT = os.path.join(
-    BASE_LOCAL_PATH, 'pretrained_models/detection/yolox_s/epoch_300.pth')
+    BASE_LOCAL_PATH, 'pretrained_models/detection/yolox_s/epoch_300_export.pt')
 PRETRAINED_MODEL_YOLOXS_END2END_JIT = os.path.join(
     BASE_LOCAL_PATH,
     'pretrained_models/detection/yolox_s/epoch_300_end2end.jit')

From a837d97f4547c2a0429c13619c1cf27af8cc2375 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Wed, 27 Jul 2022 10:59:40 +0800
Subject: [PATCH 07/69] add asff sim, gsconv

---
 compute_model_params.py                       | 310 ++++++
 configs/detection/yolox/yolox_best_asff_1.py  | 190 ++++
 configs/detection/yolox/yolox_best_asff_2.py  | 192 ++++
 configs/detection/yolox/yolox_best_asff_4.py  | 190 ++++
 configs/detection/yolox/yolox_best_asff_8.py  | 190 ++++
 configs/detection/yolox/yolox_best_conv1.py   | 194 ++++
 configs/detection/yolox/yolox_best_la_16.py   | 194 ++++
 configs/detection/yolox/yolox_best_la_32.py   | 196 ++++
 .../yolox/yolox_best_la_32_sconv_2.py         | 194 ++++
 .../yolox/yolox_best_la_32_sconv_4.py         | 194 ++++
 configs/detection/yolox/yolox_best_lrelu.py   | 190 ++++
 configs/detection/yolox/yolox_best_ori.py     | 190 ++++
 configs/detection/yolox/yolox_best_relu.py    | 190 ++++
 configs/detection/yolox/yolox_best_stack_1.py | 194 ++++
 configs/detection/yolox/yolox_best_stack_2.py | 194 ++++
 configs/detection/yolox/yolox_best_stack_3.py | 194 ++++
 configs/detection/yolox/yolox_best_stack_4.py | 194 ++++
 configs/detection/yolox/yolox_best_stack_5.py | 194 ++++
 .../detection/yolox/yolox_l_8xb8_300e_coco.py |   3 +-
 .../yolox/yolox_l_8xb8_300e_coco_asff_2.py    |  10 +
 .../yolox_l_8xb8_300e_coco_asff_tood_giou.py  |  10 +
 .../yolox/yolox_l_8xb8_300e_coco_best_ori.py  |  10 +
 .../yolox/yolox_l_8xb8_300e_coco_la_32.py     |  10 +
 .../yolox/yolox_l_8xb8_300e_coco_yolo6.py     |  10 +
 .../yolox/yolox_l_8xb8_300e_coco_yolo6_att.py |  10 +
 .../yolox_l_8xb8_300e_coco_yolo6_att_relu.py  |  10 +
 .../yolox/yolox_s_8xb16_300e_coco.py          |   7 +-
 .../yolox/yolox_s_8xb16_300e_tal_asff_giou.py | 190 ++++
 .../yolox_s_8xb16_300e_tal_asff_sppf_giou.py  | 192 ++++
 .../yolox/yolox_tiny_8xb16_300e_coco.py       |  11 +-
 configs/detection/yolox/yolox_yolo6.py        | 190 ++++
 .../detection/yolox/yolox_yolo6_asff_sim.py   | 197 ++++
 configs/detection/yolox/yolox_yolo6_att.py    | 195 ++++
 .../detection/yolox/yolox_yolo6_att_relu.py   | 196 ++++
 .../detection/yolox/yolox_yolo6_att_sim.py    | 199 ++++
 .../detection/yolox/yolox_yolo6_att_sim_1.py  | 202 ++++
 .../detection/yolox/yolox_yolo6_att_sim_16.py | 199 ++++
 .../detection/yolox/yolox_yolo6_att_sim_32.py | 199 ++++
 .../detection/yolox/yolox_yolo6_att_sim_8.py  | 199 ++++
 .../detection/yolox/yolox_yolo6_att_sim_d.py  | 199 ++++
 .../yolox/yolox_yolo6_att_sim_no_expand.py    | 199 ++++
 configs/detection/yolox/yolox_yolo6_gsconv.py | 191 ++++
 .../yolox/yolox_yolo6_gsconv_asff_sim.py      | 199 ++++
 .../yolox/yolox_yolo6_gsconv_part.py          | 192 ++++
 .../detection/yolox/yolox_yolo6_head_ori.py   | 202 ++++
 .../detection/yolox/yolox_yolo6_head_tood.py  | 202 ++++
 .../detection/yolox/yolox_yolo6_yoloe_head.py | 195 ++++
 easycv/apis/train.py                          |   2 +
 easycv/hooks/yolox_mode_switch_hook.py        |   3 +
 easycv/models/backbones/__init__.py           |   1 +
 easycv/models/backbones/efficientrep.py       | 135 +++
 easycv/models/backbones/network_blocks.py     |  59 +-
 easycv/models/backbones/yolo6_blocks.py       | 269 +++++
 easycv/models/detection/yolox/ASFF.py         |  59 +-
 easycv/models/detection/yolox/ASFF_sim.py     | 292 ++++++
 easycv/models/detection/yolox/ppyoloe_head.py | 946 ++++++++++++++++++
 easycv/models/detection/yolox/tood_head.py    | 100 +-
 easycv/models/detection/yolox/yolo_pafpn.py   | 361 +++++--
 easycv/models/detection/yolox/yolox.py        | 121 ++-
 easycv/runner/ev_runner.py                    |   1 +
 60 files changed, 9583 insertions(+), 178 deletions(-)
 create mode 100644 compute_model_params.py
 create mode 100644 configs/detection/yolox/yolox_best_asff_1.py
 create mode 100644 configs/detection/yolox/yolox_best_asff_2.py
 create mode 100644 configs/detection/yolox/yolox_best_asff_4.py
 create mode 100644 configs/detection/yolox/yolox_best_asff_8.py
 create mode 100644 configs/detection/yolox/yolox_best_conv1.py
 create mode 100644 configs/detection/yolox/yolox_best_la_16.py
 create mode 100644 configs/detection/yolox/yolox_best_la_32.py
 create mode 100644 configs/detection/yolox/yolox_best_la_32_sconv_2.py
 create mode 100644 configs/detection/yolox/yolox_best_la_32_sconv_4.py
 create mode 100644 configs/detection/yolox/yolox_best_lrelu.py
 create mode 100644 configs/detection/yolox/yolox_best_ori.py
 create mode 100644 configs/detection/yolox/yolox_best_relu.py
 create mode 100644 configs/detection/yolox/yolox_best_stack_1.py
 create mode 100644 configs/detection/yolox/yolox_best_stack_2.py
 create mode 100644 configs/detection/yolox/yolox_best_stack_3.py
 create mode 100644 configs/detection/yolox/yolox_best_stack_4.py
 create mode 100644 configs/detection/yolox/yolox_best_stack_5.py
 create mode 100644 configs/detection/yolox/yolox_l_8xb8_300e_coco_asff_2.py
 create mode 100644 configs/detection/yolox/yolox_l_8xb8_300e_coco_asff_tood_giou.py
 create mode 100644 configs/detection/yolox/yolox_l_8xb8_300e_coco_best_ori.py
 create mode 100644 configs/detection/yolox/yolox_l_8xb8_300e_coco_la_32.py
 create mode 100644 configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6.py
 create mode 100644 configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6_att.py
 create mode 100644 configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6_att_relu.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_sppf_giou.py
 create mode 100644 configs/detection/yolox/yolox_yolo6.py
 create mode 100644 configs/detection/yolox/yolox_yolo6_asff_sim.py
 create mode 100644 configs/detection/yolox/yolox_yolo6_att.py
 create mode 100644 configs/detection/yolox/yolox_yolo6_att_relu.py
 create mode 100644 configs/detection/yolox/yolox_yolo6_att_sim.py
 create mode 100644 configs/detection/yolox/yolox_yolo6_att_sim_1.py
 create mode 100644 configs/detection/yolox/yolox_yolo6_att_sim_16.py
 create mode 100644 configs/detection/yolox/yolox_yolo6_att_sim_32.py
 create mode 100644 configs/detection/yolox/yolox_yolo6_att_sim_8.py
 create mode 100644 configs/detection/yolox/yolox_yolo6_att_sim_d.py
 create mode 100644 configs/detection/yolox/yolox_yolo6_att_sim_no_expand.py
 create mode 100644 configs/detection/yolox/yolox_yolo6_gsconv.py
 create mode 100644 configs/detection/yolox/yolox_yolo6_gsconv_asff_sim.py
 create mode 100644 configs/detection/yolox/yolox_yolo6_gsconv_part.py
 create mode 100644 configs/detection/yolox/yolox_yolo6_head_ori.py
 create mode 100644 configs/detection/yolox/yolox_yolo6_head_tood.py
 create mode 100644 configs/detection/yolox/yolox_yolo6_yoloe_head.py
 create mode 100644 easycv/models/backbones/efficientrep.py
 create mode 100644 easycv/models/backbones/yolo6_blocks.py
 create mode 100644 easycv/models/detection/yolox/ASFF_sim.py
 create mode 100644 easycv/models/detection/yolox/ppyoloe_head.py

diff --git a/compute_model_params.py b/compute_model_params.py
new file mode 100644
index 00000000..fda0000c
--- /dev/null
+++ b/compute_model_params.py
@@ -0,0 +1,310 @@
+# Copyright (c) 2014-2021 Megvii Inc And Alibaba PAI-Teams. All rights reserved.
+
+import torch
+import torch.nn as nn
+
+from easycv.models.backbones.darknet import CSPDarknet
+from easycv.models.backbones.efficientrep import EfficientRep
+from easycv.models.backbones.network_blocks import BaseConv, CSPLayer, DWConv, GSConv, VoVGSCSP
+from torchsummaryX import summary
+import math
+
+
+def make_divisible(x, divisor):
+    # Upward revision the value x to make it evenly divisible by the divisor.
+    return math.ceil(x / divisor) * divisor
+
+
+class YOLOPAFPN(nn.Module):
+    """
+    YOLOv3 model. Darknet 53 is the default backbone of this model.
+    """
+
+    def __init__(
+        self,
+        depth=1.0,
+        width=1.0,
+        in_features=('dark3', 'dark4', 'dark5'),
+        in_channels=[256, 512, 1024],
+        depthwise=False,
+        act='silu',
+        asff_channel = 16,
+        use_att=None,
+        expand_kernel=3,
+        down_rate=32,
+        use_dconv=False,
+        use_expand=True,
+        spp_type='spp',
+        backbone = "CSPDarknet",
+        neck = 'gsconv',
+        neck_mode = 'part',
+    ):
+        super().__init__()
+        # self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act,spp_type=spp_type)
+        self.backbone_name = backbone
+        if backbone == "CSPDarknet":
+            self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act)
+        else:
+            depth_mul = depth
+            width_mul = width
+            num_repeat_backbone = [1, 6, 12, 18, 6]
+            channels_list_backbone = [64, 128, 256, 512, 1024]
+            num_repeat_neck = [12, 12, 12, 12]
+            channels_list_neck = [256, 128, 128, 256, 256, 512]
+
+            channels = 3
+
+            num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i) for i in
+                          (num_repeat_backbone + num_repeat_neck)]
+
+            channels_list = [make_divisible(i * width_mul, 8) for i in (channels_list_backbone + channels_list_neck)]
+            self.backbone = EfficientRep(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat)
+
+
+        self.in_features = in_features
+        self.in_channels = in_channels
+        Conv = DWConv if depthwise else BaseConv
+
+        self.neck = neck
+        self.neck_mode = neck_mode
+
+        if neck =='yolo':
+            self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
+            self.lateral_conv0 = BaseConv(
+                int(in_channels[2] * width),
+                int(in_channels[1] * width),
+                1,
+                1,
+                act=act)
+            self.C3_p4 = CSPLayer(
+                int(2 * in_channels[1] * width),
+                int(in_channels[1] * width),
+                round(3 * depth),
+                False,
+                depthwise=depthwise,
+                act=act)  # cat
+
+            self.reduce_conv1 = BaseConv(
+                int(in_channels[1] * width),
+                int(in_channels[0] * width),
+                1,
+                1,
+                act=act)
+            self.C3_p3 = CSPLayer(
+                int(2 * in_channels[0] * width),
+                int(in_channels[0] * width),
+                round(3 * depth),
+                False,
+                depthwise=depthwise,
+                act=act)
+
+            # bottom-up conv
+            self.bu_conv2 = Conv(
+                int(in_channels[0] * width),
+                int(in_channels[0] * width),
+                3,
+                2,
+                act=act)
+            self.C3_n3 = CSPLayer(
+                int(2 * in_channels[0] * width),
+                int(in_channels[1] * width),
+                round(3 * depth),
+                False,
+                depthwise=depthwise,
+                act=act)
+
+            # bottom-up conv
+            self.bu_conv1 = Conv(
+                int(in_channels[1] * width),
+                int(in_channels[1] * width),
+                3,
+                2,
+                act=act)
+            self.C3_n4 = CSPLayer(
+                int(2 * in_channels[1] * width),
+                int(in_channels[2] * width),
+                round(3 * depth),
+                False,
+                depthwise=depthwise,
+                act=act)
+        else:
+            # gsconv
+            self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
+            self.gsconv1 = GSConv(
+                int(in_channels[2] * width),
+                int(in_channels[1] * width),
+                1,
+                1,
+                act=act)
+
+            self.gsconv2 = GSConv(
+                int(in_channels[1] * width),
+                int(in_channels[0] * width),
+                1,
+                1,
+                act=act
+            )
+
+            self.gsconv4 = GSConv(
+                int(in_channels[0] * width),
+                int(in_channels[0] * width),
+                3,
+                2,
+                act=act
+            )
+
+            self.gsconv5 = GSConv(
+                int(in_channels[1] * width),
+                int(in_channels[1] * width),
+                3,
+                2,
+                act=act
+            )
+
+            if self.neck_mode == 'all':
+                self.vovGSCSP1 = VoVGSCSP(
+                    int(2 * in_channels[1] * width),
+                    int(in_channels[1] * width),
+                    round(3 * depth),
+                    False,
+                )
+
+                self.gsconv3 = GSConv(
+                    int(2 * in_channels[0] * width),
+                    int(2 * in_channels[0] * width),
+                    1,
+                    1,
+                    act=act
+                )
+                self.vovGSCSP2 = VoVGSCSP(
+                    int(2*in_channels[0] * width),
+                    int(in_channels[0] * width),
+                    round(3 * depth),
+                    False,
+                )
+
+
+                self.vovGSCSP3 = VoVGSCSP(
+                    int(2 * in_channels[0] * width),
+                    int(in_channels[1] * width),
+                    round(3 * depth),
+                    False,
+                )
+
+                self.vovGSCSP4 = VoVGSCSP(
+                    int(2 * in_channels[1] * width),
+                    int(in_channels[2] * width),
+                    round(3 * depth),
+                    False,
+                )
+            else:
+                self.C3_p4 = CSPLayer(
+                    int(2 * in_channels[1] * width),
+                    int(in_channels[1] * width),
+                    round(3 * depth),
+                    False,
+                    depthwise=depthwise,
+                    act=act)  # cat
+
+                self.C3_p3 = CSPLayer(
+                    int(2 * in_channels[0] * width),
+                    int(in_channels[0] * width),
+                    round(3 * depth),
+                    False,
+                    depthwise=depthwise,
+                    act=act)
+
+                self.C3_n3 = CSPLayer(
+                    int(2 * in_channels[0] * width),
+                    int(in_channels[1] * width),
+                    round(3 * depth),
+                    False,
+                    depthwise=depthwise,
+                    act=act)
+
+                self.C3_n4 = CSPLayer(
+                    int(2 * in_channels[1] * width),
+                    int(in_channels[2] * width),
+                    round(3 * depth),
+                    False,
+                    depthwise=depthwise,
+                    act=act)
+
+
+
+    def forward(self, input):
+        """
+        Args:
+            inputs: input images.
+
+        Returns:
+            Tuple[Tensor]: FPN feature.
+        """
+
+        #  backbone
+        # out_features = self.backbone(input)
+        # features = [out_features[f] for f in self.in_features]
+        # [x2, x1, x0] = features
+        #  backbone
+        x2,x1,x0 = x
+        if self.neck =='yolo':
+            fpn_out0 = self.lateral_conv0(x0)  # 1024->512/32
+            f_out0 = self.upsample(fpn_out0)  # 512/16
+            f_out0 = torch.cat([f_out0, x1], 1)  # 512->1024/16
+            f_out0 = self.C3_p4(f_out0)  # 1024->512/16
+
+            fpn_out1 = self.reduce_conv1(f_out0)  # 512->256/16
+            f_out1 = self.upsample(fpn_out1)  # 256/8
+            f_out1 = torch.cat([f_out1, x2], 1)  # 256->512/8
+            pan_out2 = self.C3_p3(f_out1)  # 512->256/8
+
+            p_out1 = self.bu_conv2(pan_out2)  # 256->256/16
+            p_out1 = torch.cat([p_out1, fpn_out1], 1)  # 256->512/16
+            pan_out1 = self.C3_n3(p_out1)  # 512->512/16
+
+            p_out0 = self.bu_conv1(pan_out1)  # 512->512/32
+            p_out0 = torch.cat([p_out0, fpn_out0], 1)  # 512->1024/32
+            pan_out0 = self.C3_n4(p_out0)  # 1024->1024/32
+        else:
+            print('in')
+            # gsconv
+            fpn_out0 = self.gsconv1(x0)  # 1024->512/32
+            f_out0 = self.upsample(fpn_out0)  # 512/16
+            f_out0 = torch.cat([f_out0, x1], 1)  # 512->1024/16
+            if self.neck_mode =='all':
+                f_out0 = self.vovGSCSP1(f_out0)  # 1024->512/16
+            else:
+                f_out0 = self.C3_p4(f_out0)
+
+            fpn_out1 = self.gsconv2(f_out0)  # 512->256/16
+            f_out1 = self.upsample(fpn_out1)  # 256/8
+            f_out1 = torch.cat([f_out1, x2], 1)  # 256->512/8
+            if self.neck_mode =='all':
+                f_out1 = self.gsconv3(f_out1)
+                pan_out2 = self.vovGSCSP2(f_out1)  # 512->256/8
+            else:
+                pan_out2 = self.C3_p3(f_out1)  # 512->256/8
+
+
+            p_out1 = self.gsconv4(pan_out2)  # 256->256/16
+            p_out1 = torch.cat([p_out1, fpn_out1], 1)  # 256->512/16
+            if self.neck_mode == 'all':
+                pan_out1 = self.vovGSCSP3(p_out1)  # 512->512/16
+            else:
+                pan_out1 = self.C3_n3(p_out1)  # 512->512/16
+
+            p_out0 = self.gsconv5(pan_out1)  # 512->512/32
+            p_out0 = torch.cat([p_out0, fpn_out0], 1)  # 512->1024/32
+            if self.neck_mode == 'all':
+                pan_out0 = self.vovGSCSP4(p_out0)  # 1024->1024/32
+            else:
+                pan_out0 = self.C3_n4(p_out0)  # 1024->1024/32
+
+        outputs = (pan_out2, pan_out1, pan_out0)
+
+        return outputs
+
+if __name__=='__main__':
+    x = (torch.randn(1,128,80,80).cuda(),torch.randn(1,256,40,40).cuda(),torch.randn(1,512,20,20).cuda())
+    model = YOLOPAFPN(depth=0.33, width=0.5).cuda()
+    summary(model,x)
\ No newline at end of file
diff --git a/configs/detection/yolox/yolox_best_asff_1.py b/configs/detection/yolox/yolox_best_asff_1.py
new file mode 100644
index 00000000..c5af9388
--- /dev/null
+++ b/configs/detection/yolox/yolox_best_asff_1.py
@@ -0,0 +1,190 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    asff_channel=1,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type ='tood' # yolox
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_asff_2.py b/configs/detection/yolox/yolox_best_asff_2.py
new file mode 100644
index 00000000..cb58358b
--- /dev/null
+++ b/configs/detection/yolox/yolox_best_asff_2.py
@@ -0,0 +1,192 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    asff_channel=2,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type ='tood' # yolox
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_asff_4.py b/configs/detection/yolox/yolox_best_asff_4.py
new file mode 100644
index 00000000..db60710a
--- /dev/null
+++ b/configs/detection/yolox/yolox_best_asff_4.py
@@ -0,0 +1,190 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    asff_channel=4,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type ='tood' # yolox
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_asff_8.py b/configs/detection/yolox/yolox_best_asff_8.py
new file mode 100644
index 00000000..2c59d3ad
--- /dev/null
+++ b/configs/detection/yolox/yolox_best_asff_8.py
@@ -0,0 +1,190 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    asff_channel=8,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type ='tood' # yolox
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_conv1.py b/configs/detection/yolox/yolox_best_conv1.py
new file mode 100644
index 00000000..c07ff11d
--- /dev/null
+++ b/configs/detection/yolox/yolox_best_conv1.py
@@ -0,0 +1,194 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    act='silu',
+    asff_channel=16,
+    stacked_convs=6,
+    la_down_rate=8,
+    conv_layers=1
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_la_16.py b/configs/detection/yolox/yolox_best_la_16.py
new file mode 100644
index 00000000..6f7564cd
--- /dev/null
+++ b/configs/detection/yolox/yolox_best_la_16.py
@@ -0,0 +1,194 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    act='silu',
+    asff_channel=16,
+    stacked_convs=6,
+    la_down_rate=16,
+    conv_layers=2
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_la_32.py b/configs/detection/yolox/yolox_best_la_32.py
new file mode 100644
index 00000000..6a18d9bc
--- /dev/null
+++ b/configs/detection/yolox/yolox_best_la_32.py
@@ -0,0 +1,196 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    act='silu',
+    asff_channel=16,
+    stacked_convs=6,
+    la_down_rate=32,
+    conv_layers=2
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_la_32_sconv_2.py b/configs/detection/yolox/yolox_best_la_32_sconv_2.py
new file mode 100644
index 00000000..79bfe269
--- /dev/null
+++ b/configs/detection/yolox/yolox_best_la_32_sconv_2.py
@@ -0,0 +1,194 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    act='silu',
+    asff_channel=16,
+    stacked_convs=2,
+    la_down_rate=32,
+    conv_layers=2
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_la_32_sconv_4.py b/configs/detection/yolox/yolox_best_la_32_sconv_4.py
new file mode 100644
index 00000000..f6d5f153
--- /dev/null
+++ b/configs/detection/yolox/yolox_best_la_32_sconv_4.py
@@ -0,0 +1,194 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    act='silu',
+    asff_channel=16,
+    stacked_convs=4,
+    la_down_rate=32,
+    conv_layers=2
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_lrelu.py b/configs/detection/yolox/yolox_best_lrelu.py
new file mode 100644
index 00000000..5ea1fa27
--- /dev/null
+++ b/configs/detection/yolox/yolox_best_lrelu.py
@@ -0,0 +1,190 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    act = 'lrelu',
+    use_att='ASFF',
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type ='tood' # yolox
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_ori.py b/configs/detection/yolox/yolox_best_ori.py
new file mode 100644
index 00000000..52469b17
--- /dev/null
+++ b/configs/detection/yolox/yolox_best_ori.py
@@ -0,0 +1,190 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type ='tood' # yolox
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=10,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_relu.py b/configs/detection/yolox/yolox_best_relu.py
new file mode 100644
index 00000000..ef4a5a77
--- /dev/null
+++ b/configs/detection/yolox/yolox_best_relu.py
@@ -0,0 +1,190 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    act = 'relu',
+    use_att='ASFF',
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type ='tood' # yolox
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_stack_1.py b/configs/detection/yolox/yolox_best_stack_1.py
new file mode 100644
index 00000000..0e3241ef
--- /dev/null
+++ b/configs/detection/yolox/yolox_best_stack_1.py
@@ -0,0 +1,194 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    act='silu',
+    asff_channel=16,
+    stacked_convs=1,
+    la_down_rate=8,
+    conv_layers=2
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_stack_2.py b/configs/detection/yolox/yolox_best_stack_2.py
new file mode 100644
index 00000000..5dbbc05b
--- /dev/null
+++ b/configs/detection/yolox/yolox_best_stack_2.py
@@ -0,0 +1,194 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    act='silu',
+    asff_channel=16,
+    stacked_convs=2,
+    la_down_rate=8,
+    conv_layers=2
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_stack_3.py b/configs/detection/yolox/yolox_best_stack_3.py
new file mode 100644
index 00000000..0695c3ef
--- /dev/null
+++ b/configs/detection/yolox/yolox_best_stack_3.py
@@ -0,0 +1,194 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    act='silu',
+    asff_channel=16,
+    stacked_convs=3,
+    la_down_rate=8,
+    conv_layers=2
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_stack_4.py b/configs/detection/yolox/yolox_best_stack_4.py
new file mode 100644
index 00000000..5a30aaa1
--- /dev/null
+++ b/configs/detection/yolox/yolox_best_stack_4.py
@@ -0,0 +1,194 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    act='silu',
+    asff_channel=16,
+    stacked_convs=4,
+    la_down_rate=8,
+    conv_layers=2
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_stack_5.py b/configs/detection/yolox/yolox_best_stack_5.py
new file mode 100644
index 00000000..d5a13881
--- /dev/null
+++ b/configs/detection/yolox/yolox_best_stack_5.py
@@ -0,0 +1,194 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    act='silu',
+    asff_channel=16,
+    stacked_convs=5,
+    la_down_rate=8,
+    conv_layers=2
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
index 459a20ad..fbb50dd7 100644
--- a/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
+++ b/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
@@ -1,4 +1,5 @@
-_base_ = './yolox_s_8xb16_300e_coco.py'
+_base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
+# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
 
 # model settings
 model = dict(model_type='l')
diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco_asff_2.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco_asff_2.py
new file mode 100644
index 00000000..ae3b18c0
--- /dev/null
+++ b/configs/detection/yolox/yolox_l_8xb8_300e_coco_asff_2.py
@@ -0,0 +1,10 @@
+_base_ = 'configs/detection/yolox/yolox_best_asff_2.py'
+# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
+
+# model settings
+model = dict(model_type='l')
+
+data = dict(imgs_per_gpu=8, workers_per_gpu=4)
+
+optimizer = dict(
+    type='SGD', lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True)
diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco_asff_tood_giou.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco_asff_tood_giou.py
new file mode 100644
index 00000000..10da9745
--- /dev/null
+++ b/configs/detection/yolox/yolox_l_8xb8_300e_coco_asff_tood_giou.py
@@ -0,0 +1,10 @@
+_base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
+# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
+
+# model settings
+model = dict(model_type='l')
+
+data = dict(imgs_per_gpu=8, workers_per_gpu=4)
+
+optimizer = dict(
+    type='SGD', lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True)
diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco_best_ori.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco_best_ori.py
new file mode 100644
index 00000000..a31acdd4
--- /dev/null
+++ b/configs/detection/yolox/yolox_l_8xb8_300e_coco_best_ori.py
@@ -0,0 +1,10 @@
+_base_ = 'configs/detection/yolox/yolox_best_ori.py'
+# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
+
+# model settings
+model = dict(model_type='l')
+
+data = dict(imgs_per_gpu=8, workers_per_gpu=4)
+
+optimizer = dict(
+    type='SGD', lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True)
diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco_la_32.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco_la_32.py
new file mode 100644
index 00000000..864116f4
--- /dev/null
+++ b/configs/detection/yolox/yolox_l_8xb8_300e_coco_la_32.py
@@ -0,0 +1,10 @@
+_base_ = 'configs/detection/yolox/yolox_best_la_32.py'
+# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
+
+# model settings
+model = dict(model_type='l')
+
+data = dict(imgs_per_gpu=8, workers_per_gpu=4)
+
+optimizer = dict(
+    type='SGD', lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True)
diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6.py
new file mode 100644
index 00000000..f3ac55c4
--- /dev/null
+++ b/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6.py
@@ -0,0 +1,10 @@
+_base_ = 'configs/detection/yolox/yolox_yolo6.py'
+# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
+
+# model settings
+model = dict(model_type='l')
+
+data = dict(imgs_per_gpu=8, workers_per_gpu=4)
+
+optimizer = dict(
+    type='SGD', lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True)
diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6_att.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6_att.py
new file mode 100644
index 00000000..e7f03efc
--- /dev/null
+++ b/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6_att.py
@@ -0,0 +1,10 @@
+_base_ = 'configs/detection/yolox/yolox_yolo6_att.py'
+# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
+
+# model settings
+model = dict(model_type='l')
+
+data = dict(imgs_per_gpu=8, workers_per_gpu=4)
+
+optimizer = dict(
+    type='SGD', lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True)
diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6_att_relu.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6_att_relu.py
new file mode 100644
index 00000000..29eccc8e
--- /dev/null
+++ b/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6_att_relu.py
@@ -0,0 +1,10 @@
+_base_ = 'configs/detection/yolox/yolox_yolo6_att_relu.py'
+# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
+
+# model settings
+model = dict(model_type='l')
+
+data = dict(imgs_per_gpu=8, workers_per_gpu=4)
+
+optimizer = dict(
+    type='SGD', lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index 6aa8789a..2a99d897 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -41,6 +41,7 @@
 # dataset settings
 data_root = '/apsarapangu/disk5/zxy/data/coco/'
 # data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/cpfs01/shared/public/dataset/coco2017/'
 
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
@@ -88,7 +89,7 @@
             dict(type='LoadAnnotations', with_bbox=True)
         ],
         classes=CLASSES,
-        filter_empty_gt=False,
+        filter_empty_gt=True,
         iscrowd=False),
     pipeline=train_pipeline,
     dynamic_scale=img_scale)
@@ -106,14 +107,14 @@
         ],
         classes=CLASSES,
         filter_empty_gt=True,
-        test_mode = True,
+        test_mode=True,
         iscrowd=True),
     pipeline=test_pipeline,
     dynamic_scale=None,
     label_padding=False)
 
 data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
 
 # additional hooks
 interval = 10
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py b/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py
new file mode 100644
index 00000000..7feccc12
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py
@@ -0,0 +1,190 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type ='tood' # yolox
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_sppf_giou.py b/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_sppf_giou.py
new file mode 100644
index 00000000..50496148
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_sppf_giou.py
@@ -0,0 +1,192 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type ='tood', # yolox
+    spp_type='sppf'
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py b/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
index 436244d9..24a36389 100644
--- a/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
@@ -1,4 +1,7 @@
-_base_ = './yolox_s_8xb16_300e_coco.py'
+# _base_ = './yolox_s_8xb16_300e_coco.py'
+# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
+# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
+_base_ = 'configs/detection/yolox/yolox_yolo6_att.py'
 
 # model settings
 model = dict(model_type='tiny')
@@ -57,7 +60,9 @@
     dict(type='Collect', keys=['img'])
 ]
 
-data_root = 'data/coco/'
+# data_root = 'data/coco/'
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+
 
 train_dataset = dict(
     type='DetImagesMixDataset',
@@ -70,7 +75,7 @@
             dict(type='LoadAnnotations', with_bbox=True)
         ],
         classes=CLASSES,
-        filter_empty_gt=False,
+        filter_empty_gt=True,
         iscrowd=False),
     pipeline=train_pipeline,
     dynamic_scale=img_scale)
diff --git a/configs/detection/yolox/yolox_yolo6.py b/configs/detection/yolox/yolox_yolo6.py
new file mode 100644
index 00000000..c96e979e
--- /dev/null
+++ b/configs/detection/yolox/yolox_yolo6.py
@@ -0,0 +1,190 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    backbone='EfficientRep',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    )
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = 'data/coco/'
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_asff_sim.py b/configs/detection/yolox/yolox_yolo6_asff_sim.py
new file mode 100644
index 00000000..9881ab69
--- /dev/null
+++ b/configs/detection/yolox/yolox_yolo6_asff_sim.py
@@ -0,0 +1,197 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    backbone='EfficientRep',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    use_att='ASFF_sim',
+    asff_channel=2,
+    la_down_rate=32,
+    expand_kernel=1,
+    down_rate=None,
+    use_dconv=False,
+    use_expand=True,
+    )
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = 'data/coco/'
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att.py b/configs/detection/yolox/yolox_yolo6_att.py
new file mode 100644
index 00000000..4da952ab
--- /dev/null
+++ b/configs/detection/yolox/yolox_yolo6_att.py
@@ -0,0 +1,195 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    backbone='EfficientRep',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    asff_channel=2,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    la_down_rate=32,
+    )
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = 'data/coco/'
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att_relu.py b/configs/detection/yolox/yolox_yolo6_att_relu.py
new file mode 100644
index 00000000..a2f51299
--- /dev/null
+++ b/configs/detection/yolox/yolox_yolo6_att_relu.py
@@ -0,0 +1,196 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    backbone='EfficientRep',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    asff_channel=2,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    la_down_rate=32,
+    act='relu'
+    )
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = 'data/coco/'
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim.py b/configs/detection/yolox/yolox_yolo6_att_sim.py
new file mode 100644
index 00000000..26d51a2a
--- /dev/null
+++ b/configs/detection/yolox/yolox_yolo6_att_sim.py
@@ -0,0 +1,199 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    backbone='EfficientRep',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF_sim',
+    asff_channel=2,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    la_down_rate=32,
+    expand_kernel=3,
+    down_rate=None,
+    use_dconv=False,
+    use_expand=True,
+    )
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = 'data/coco/'
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_1.py b/configs/detection/yolox/yolox_yolo6_att_sim_1.py
new file mode 100644
index 00000000..e6fe76e2
--- /dev/null
+++ b/configs/detection/yolox/yolox_yolo6_att_sim_1.py
@@ -0,0 +1,202 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    backbone='EfficientRep',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF_sim',
+    asff_channel=2,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    la_down_rate=32,
+    expand_kernel=1,
+    down_rate=None,
+    use_dconv=False,
+    use_expand=True,
+    # norm_cfg = 'SyncBN'
+    )
+
+sync_bn = True
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = 'data/coco/'
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_16.py b/configs/detection/yolox/yolox_yolo6_att_sim_16.py
new file mode 100644
index 00000000..b94c2b4b
--- /dev/null
+++ b/configs/detection/yolox/yolox_yolo6_att_sim_16.py
@@ -0,0 +1,199 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    backbone='EfficientRep',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF_sim',
+    asff_channel=2,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    la_down_rate=32,
+    expand_kernel=3,
+    down_rate=16,
+    use_dconv=False,
+    use_expand=True,
+    )
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = 'data/coco/'
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_32.py b/configs/detection/yolox/yolox_yolo6_att_sim_32.py
new file mode 100644
index 00000000..273b6e7f
--- /dev/null
+++ b/configs/detection/yolox/yolox_yolo6_att_sim_32.py
@@ -0,0 +1,199 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    backbone='EfficientRep',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF_sim',
+    asff_channel=2,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    la_down_rate=32,
+    expand_kernel=3,
+    down_rate=32,
+    use_dconv=False,
+    use_expand=True,
+    )
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = 'data/coco/'
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_8.py b/configs/detection/yolox/yolox_yolo6_att_sim_8.py
new file mode 100644
index 00000000..b1b6b1c7
--- /dev/null
+++ b/configs/detection/yolox/yolox_yolo6_att_sim_8.py
@@ -0,0 +1,199 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    backbone='EfficientRep',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF_sim',
+    asff_channel=2,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    la_down_rate=32,
+    expand_kernel=3,
+    down_rate=8,
+    use_dconv=False,
+    use_expand=True,
+    )
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = 'data/coco/'
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_d.py b/configs/detection/yolox/yolox_yolo6_att_sim_d.py
new file mode 100644
index 00000000..75270d10
--- /dev/null
+++ b/configs/detection/yolox/yolox_yolo6_att_sim_d.py
@@ -0,0 +1,199 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    backbone='EfficientRep',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF_sim',
+    asff_channel=2,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    la_down_rate=32,
+    expand_kernel=3,
+    down_rate=None,
+    use_dconv=True,
+    use_expand=True,
+    )
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = 'data/coco/'
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_no_expand.py b/configs/detection/yolox/yolox_yolo6_att_sim_no_expand.py
new file mode 100644
index 00000000..737c9fed
--- /dev/null
+++ b/configs/detection/yolox/yolox_yolo6_att_sim_no_expand.py
@@ -0,0 +1,199 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    backbone='EfficientRep',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF_sim',
+    asff_channel=2,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    la_down_rate=32,
+    expand_kernel=3,
+    down_rate=None,
+    use_dconv=False,
+    use_expand=False,
+    )
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = 'data/coco/'
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_gsconv.py b/configs/detection/yolox/yolox_yolo6_gsconv.py
new file mode 100644
index 00000000..dfaaf403
--- /dev/null
+++ b/configs/detection/yolox/yolox_yolo6_gsconv.py
@@ -0,0 +1,191 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    backbone='EfficientRep',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    neck = 'gsconv'
+    )
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = 'data/coco/'
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_gsconv_asff_sim.py b/configs/detection/yolox/yolox_yolo6_gsconv_asff_sim.py
new file mode 100644
index 00000000..ba98448a
--- /dev/null
+++ b/configs/detection/yolox/yolox_yolo6_gsconv_asff_sim.py
@@ -0,0 +1,199 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    backbone='EfficientRep',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    neck = 'gsconv',
+    use_att='ASFF_sim',
+    asff_channel=2,
+    la_down_rate=32,
+    expand_kernel=1,
+    down_rate=None,
+    use_dconv=False,
+    use_expand=True,
+
+    )
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = 'data/coco/'
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_gsconv_part.py b/configs/detection/yolox/yolox_yolo6_gsconv_part.py
new file mode 100644
index 00000000..d023d427
--- /dev/null
+++ b/configs/detection/yolox/yolox_yolo6_gsconv_part.py
@@ -0,0 +1,192 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    backbone='EfficientRep',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    neck = 'gsconv',
+    neck_mode = 'part'
+    )
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = 'data/coco/'
+# data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_head_ori.py b/configs/detection/yolox/yolox_yolo6_head_ori.py
new file mode 100644
index 00000000..ffeb109a
--- /dev/null
+++ b/configs/detection/yolox/yolox_yolo6_head_ori.py
@@ -0,0 +1,202 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    backbone='EfficientRep',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF_sim',
+    asff_channel=2,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='yolox',
+    la_down_rate=32,
+    expand_kernel=1,
+    down_rate=None,
+    use_dconv=False,
+    use_expand=True,
+    # norm_cfg = 'SyncBN'
+    )
+
+sync_bn = True
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = 'data/coco/'
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_head_tood.py b/configs/detection/yolox/yolox_yolo6_head_tood.py
new file mode 100644
index 00000000..c349c4d9
--- /dev/null
+++ b/configs/detection/yolox/yolox_yolo6_head_tood.py
@@ -0,0 +1,202 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    backbone='EfficientRep',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF_sim',
+    asff_channel=2,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='tood',
+    la_down_rate=32,
+    expand_kernel=1,
+    down_rate=None,
+    use_dconv=False,
+    use_expand=True,
+    # norm_cfg = 'SyncBN'
+    )
+
+# sync_bn = True
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = 'data/coco/'
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_yoloe_head.py b/configs/detection/yolox/yolox_yolo6_yoloe_head.py
new file mode 100644
index 00000000..4d49a570
--- /dev/null
+++ b/configs/detection/yolox/yolox_yolo6_yoloe_head.py
@@ -0,0 +1,195 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    backbone='EfficientRep',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65,
+    use_att='ASFF',
+    asff_channel=2,
+    obj_loss_type='BCE',
+    reg_loss_type='giou',
+    head_type='ppyoloe',
+    la_down_rate=32,
+    )
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = 'data/coco/'
+data_root = '/apsarapangu/disk5/zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/cpfs01/shared/public/dataset/coco2017/'
+
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=1, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/easycv/apis/train.py b/easycv/apis/train.py
index 555114c8..1601c02c 100644
--- a/easycv/apis/train.py
+++ b/easycv/apis/train.py
@@ -97,6 +97,8 @@ def train_model(model,
 
     # SyncBatchNorm
     open_sync_bn = cfg.get('sync_bn', False)
+    print("!!Sync_bn",open_sync_bn)
+
     if open_sync_bn:
         model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to('cuda')
         logger.info('Using SyncBatchNorm()')
diff --git a/easycv/hooks/yolox_mode_switch_hook.py b/easycv/hooks/yolox_mode_switch_hook.py
index 9d396773..7b2afddc 100644
--- a/easycv/hooks/yolox_mode_switch_hook.py
+++ b/easycv/hooks/yolox_mode_switch_hook.py
@@ -40,3 +40,6 @@ def before_train_epoch(self, runner):
             train_loader.dataset.update_skip_type_keys(self.skip_type_keys)
             runner.logger.info('Add additional L1 loss now!')
             model.head.use_l1 = True
+
+        if hasattr(runner.model.module,'epoch_counter'):
+            runner.model.module.epoch_counter = epoch
diff --git a/easycv/models/backbones/__init__.py b/easycv/models/backbones/__init__.py
index 0da33cdf..efb59c2a 100644
--- a/easycv/models/backbones/__init__.py
+++ b/easycv/models/backbones/__init__.py
@@ -15,3 +15,4 @@
 from .resnext import ResNeXt
 from .shuffle_transformer import ShuffleTransformer
 from .swin_transformer_dynamic import SwinTransformer
+from .efficientrep import EfficientRep
diff --git a/easycv/models/backbones/efficientrep.py b/easycv/models/backbones/efficientrep.py
new file mode 100644
index 00000000..51d36e16
--- /dev/null
+++ b/easycv/models/backbones/efficientrep.py
@@ -0,0 +1,135 @@
+from torch import nn
+from easycv.models.backbones.yolo6_blocks import RepVGGBlock, RepBlock, SimSPPF
+from torchsummaryX import summary
+import math
+import torch
+
+
+def make_divisible(x, divisor):
+    # Upward revision the value x to make it evenly divisible by the divisor.
+    return math.ceil(x / divisor) * divisor
+
+
+class EfficientRep(nn.Module):
+    '''EfficientRep Backbone
+    EfficientRep is handcrafted by hardware-aware neural network design.
+    With rep-style struct, EfficientRep is friendly to high-computation hardware(e.g. GPU).
+    '''
+
+    def __init__(
+        self,
+        in_channels=3,
+        channels_list=None,
+        num_repeats=None
+    ):
+        super().__init__()
+
+        assert channels_list is not None
+        assert num_repeats is not None
+
+        self.stem = RepVGGBlock(
+            in_channels=in_channels,
+            out_channels=channels_list[0],
+            kernel_size=3,
+            stride=2
+        )
+
+        self.ERBlock_2 = nn.Sequential(
+            RepVGGBlock(
+                in_channels=channels_list[0],
+                out_channels=channels_list[1],
+                kernel_size=3,
+                stride=2
+            ),
+            RepBlock(
+                in_channels=channels_list[1],
+                out_channels=channels_list[1],
+                n=num_repeats[1]
+            )
+        )
+
+        self.ERBlock_3 = nn.Sequential(
+            RepVGGBlock(
+                in_channels=channels_list[1],
+                out_channels=channels_list[2],
+                kernel_size=3,
+                stride=2
+            ),
+            RepBlock(
+                in_channels=channels_list[2],
+                out_channels=channels_list[2],
+                n=num_repeats[2],
+            )
+        )
+
+        self.ERBlock_4 = nn.Sequential(
+            RepVGGBlock(
+                in_channels=channels_list[2],
+                out_channels=channels_list[3],
+                kernel_size=3,
+                stride=2
+            ),
+            RepBlock(
+                in_channels=channels_list[3],
+                out_channels=channels_list[3],
+                n=num_repeats[3]
+            )
+        )
+
+        self.ERBlock_5 = nn.Sequential(
+            RepVGGBlock(
+                in_channels=channels_list[3],
+                out_channels=channels_list[4],
+                kernel_size=3,
+                stride=2
+            ),
+            RepBlock(
+                in_channels=channels_list[4],
+                out_channels=channels_list[4],
+                n=num_repeats[4]
+            ),
+            SimSPPF(
+                in_channels=channels_list[4],
+                out_channels=channels_list[4],
+                kernel_size=5
+            )
+        )
+
+    def forward(self, x):
+
+        outputs = []
+        x = self.stem(x)
+        x = self.ERBlock_2(x)
+        x = self.ERBlock_3(x)
+        outputs.append(x)
+        x = self.ERBlock_4(x)
+        outputs.append(x)
+        x = self.ERBlock_5(x)
+        outputs.append(x)
+
+        return tuple(outputs)
+
+if __name__=='__main__':
+    depth_mul = 0.33
+    width_mul = 0.5
+    num_repeat_backbone = [1, 6, 12, 18, 6]
+    channels_list_backbone = [64, 128, 256, 512, 1024]
+    num_repeat_neck = [12, 12, 12, 12]
+    channels_list_neck = [256, 128, 128, 256, 256, 512]
+
+    channels = 3
+
+    num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i) for i in
+                  (num_repeat_backbone + num_repeat_neck)]
+
+    channels_list = [make_divisible(i * width_mul, 8) for i in (channels_list_backbone + channels_list_neck)]
+    model = EfficientRep(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat)
+    for layer in model.modules():
+        if isinstance(layer, RepVGGBlock):
+            layer.switch_to_deploy()
+
+    model = model.cuda()
+
+    a = torch.randn(1,3,640,640).cuda()
+    summary(model,a)
+
diff --git a/easycv/models/backbones/network_blocks.py b/easycv/models/backbones/network_blocks.py
index ee5ed88e..59c9a582 100644
--- a/easycv/models/backbones/network_blocks.py
+++ b/easycv/models/backbones/network_blocks.py
@@ -46,6 +46,8 @@ def get_activation(name='silu', inplace=True):
         module = nn.LeakyReLU(0.1, inplace=inplace)
     elif name == 'hsilu':
         module = HSiLU(inplace=inplace)
+    elif name == 'identity':
+        module = nn.Identity(inplace=inplace)
     else:
         raise AttributeError('Unsupported act type: {}'.format(name))
     return module
@@ -57,8 +59,8 @@ class BaseConv(nn.Module):
     def __init__(self,
                  in_channels,
                  out_channels,
-                 ksize,
-                 stride,
+                 ksize=1,
+                 stride=1,
                  groups=1,
                  bias=False,
                  act='silu'):
@@ -282,3 +284,56 @@ def forward(self, x):
             dim=1,
         )
         return self.conv(x)
+
+class GSConv(nn.Module):
+    # GSConv https://github.com/AlanLi1997/slim-neck-by-gsconv
+    def __init__(self, c1, c2, k=1, s=1, g=1, act='silu'):
+        super().__init__()
+        c_ = c2 // 2
+        self.cv1 = BaseConv(c1, c_, k, s, g, act)
+        self.cv2 = BaseConv(c_, c_, 5, 1, c_, act)
+
+    def forward(self, x):
+        x1 = self.cv1(x)
+        x2 = torch.cat((x1, self.cv2(x1)), 1)
+        # shuffle
+        b, n, h, w = x2.data.size()
+        b_n = b * n // 2
+        y = x2.reshape(b_n, 2, h * w)
+        y = y.permute(1, 0, 2)
+        y = y.reshape(2, -1, n // 2, h, w)
+
+        return torch.cat((y[0], y[1]), 1)
+
+
+class GSBottleneck(nn.Module):
+    # GS Bottleneck https://github.com/AlanLi1997/slim-neck-by-gsconv
+    def __init__(self, c1, c2, k=3, s=1):
+        super().__init__()
+        c_ = c2 // 2
+        # for lighting
+        self.conv_lighting = nn.Sequential(
+            GSConv(c1, c_, 1, 1),
+            GSConv(c_, c2, 1, 1, act='identity'))
+        # for receptive field
+        self.conv = nn.Sequential(
+            GSConv(c1, c_, 3, 1),
+            GSConv(c_, c2, 3, 1, act='identity'))
+        self.shortcut = nn.Identity()
+
+    def forward(self, x):
+        return self.conv_lighting(x)
+
+
+class VoVGSCSP(nn.Module):
+    # VoV-GSCSP https://github.com/AlanLi1997/slim-neck-by-gsconv
+    def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
+        super().__init__()
+        c_ = int(c2 * e)
+        self.cv1 = BaseConv(c1, c_, 1, 1)
+        self.cv2 = BaseConv(2 * c_, c2, 1,1)
+        self.m = nn.Sequential(*(GSBottleneck(c_, c_) for _ in range(n)))
+
+    def forward(self, x):
+        x1 = self.cv1(x)
+        return self.cv2(torch.cat((self.m(x1), x1), dim=1))
diff --git a/easycv/models/backbones/yolo6_blocks.py b/easycv/models/backbones/yolo6_blocks.py
new file mode 100644
index 00000000..7cb9d757
--- /dev/null
+++ b/easycv/models/backbones/yolo6_blocks.py
@@ -0,0 +1,269 @@
+#!/usr/bin/env python3
+# -*- coding:utf-8 -*-
+
+import warnings
+from pathlib import Path
+
+import numpy as np
+import torch
+import torch.nn as nn
+
+
+class SiLU(nn.Module):
+    '''Activation of SiLU'''
+    @staticmethod
+    def forward(x):
+        return x * torch.sigmoid(x)
+
+
+class Conv(nn.Module):
+    '''Normal Conv with SiLU activation'''
+    def __init__(self, in_channels, out_channels, kernel_size, stride, groups=1, bias=False):
+        super().__init__()
+        padding = kernel_size // 2
+        self.conv = nn.Conv2d(
+            in_channels,
+            out_channels,
+            kernel_size=kernel_size,
+            stride=stride,
+            padding=padding,
+            groups=groups,
+            bias=bias,
+        )
+        self.bn = nn.BatchNorm2d(out_channels)
+        self.act = nn.SiLU()
+
+    def forward(self, x):
+        return self.act(self.bn(self.conv(x)))
+
+    def forward_fuse(self, x):
+        return self.act(self.conv(x))
+
+
+class SimConv(nn.Module):
+    '''Normal Conv with ReLU activation'''
+    def __init__(self, in_channels, out_channels, kernel_size, stride, groups=1, bias=False):
+        super().__init__()
+        padding = kernel_size // 2
+        self.conv = nn.Conv2d(
+            in_channels,
+            out_channels,
+            kernel_size=kernel_size,
+            stride=stride,
+            padding=padding,
+            groups=groups,
+            bias=bias,
+        )
+        self.bn = nn.BatchNorm2d(out_channels)
+        self.act = nn.ReLU()
+
+    def forward(self, x):
+        return self.act(self.bn(self.conv(x)))
+
+    def forward_fuse(self, x):
+        return self.act(self.conv(x))
+
+
+class SimSPPF(nn.Module):
+    '''Simplified SPPF with ReLU activation'''
+    def __init__(self, in_channels, out_channels, kernel_size=5):
+        super().__init__()
+        c_ = in_channels // 2  # hidden channels
+        self.cv1 = SimConv(in_channels, c_, 1, 1)
+        self.cv2 = SimConv(c_ * 4, out_channels, 1, 1)
+        self.m = nn.MaxPool2d(kernel_size=kernel_size, stride=1, padding=kernel_size // 2)
+
+    def forward(self, x):
+        x = self.cv1(x)
+        with warnings.catch_warnings():
+            warnings.simplefilter('ignore')
+            y1 = self.m(x)
+            y2 = self.m(y1)
+            return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1))
+
+
+class Transpose(nn.Module):
+    '''Normal Transpose, default for upsampling'''
+    def __init__(self, in_channels, out_channels, kernel_size=2, stride=2):
+        super().__init__()
+        self.upsample_transpose = torch.nn.ConvTranspose2d(
+            in_channels=in_channels,
+            out_channels=out_channels,
+            kernel_size=kernel_size,
+            stride=stride,
+            bias=True
+        )
+
+    def forward(self, x):
+        return self.upsample_transpose(x)
+
+
+class Concat(nn.Module):
+    def __init__(self, dimension=1):
+        super().__init__()
+        self.d = dimension
+
+    def forward(self, x):
+        return torch.cat(x, self.d)
+
+
+def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
+    '''Basic cell for rep-style block, including conv and bn'''
+    result = nn.Sequential()
+    result.add_module('conv', nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
+                                                  kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False))
+    result.add_module('bn', nn.BatchNorm2d(num_features=out_channels))
+    return result
+
+
+class RepBlock(nn.Module):
+    '''
+        RepBlock is a stage block with rep-style basic block
+    '''
+    def __init__(self, in_channels, out_channels, n=1):
+        super().__init__()
+        self.conv1 = RepVGGBlock(in_channels, out_channels)
+        self.block = nn.Sequential(*(RepVGGBlock(out_channels, out_channels) for _ in range(n - 1))) if n > 1 else None
+
+    def forward(self, x):
+        x = self.conv1(x)
+        if self.block is not None:
+            x = self.block(x)
+        return x
+
+
+class RepVGGBlock(nn.Module):
+    '''RepVGGBlock is a basic rep-style block, including training and deploy status
+    This code is based on https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py
+    '''
+    def __init__(self, in_channels, out_channels, kernel_size=3,
+                 stride=1, padding=1, dilation=1, groups=1, padding_mode='zeros', deploy=False, use_se=False):
+        super(RepVGGBlock, self).__init__()
+        """ Intialization of the class.
+        Args:
+            in_channels (int): Number of channels in the input image
+            out_channels (int): Number of channels produced by the convolution
+            kernel_size (int or tuple): Size of the convolving kernel
+            stride (int or tuple, optional): Stride of the convolution. Default: 1
+            padding (int or tuple, optional): Zero-padding added to both sides of
+                the input. Default: 1
+            dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
+            groups (int, optional): Number of blocked connections from input
+                channels to output channels. Default: 1
+            padding_mode (string, optional): Default: 'zeros'
+            deploy: Whether to be deploy status or training status. Default: False
+            use_se: Whether to use se. Default: False
+        """
+        self.deploy = deploy
+        self.groups = groups
+        self.in_channels = in_channels
+        self.out_channels = out_channels
+
+        assert kernel_size == 3
+        assert padding == 1
+
+        padding_11 = padding - kernel_size // 2
+
+        self.nonlinearity = nn.ReLU()
+
+        if use_se:
+            raise NotImplementedError("se block not supported yet")
+        else:
+            self.se = nn.Identity()
+
+        if deploy:
+            self.rbr_reparam = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
+                                         padding=padding, dilation=dilation, groups=groups, bias=True, padding_mode=padding_mode)
+
+        else:
+            self.rbr_identity = nn.BatchNorm2d(num_features=in_channels) if out_channels == in_channels and stride == 1 else None
+            self.rbr_dense = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups)
+            self.rbr_1x1 = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=padding_11, groups=groups)
+
+    def forward(self, inputs):
+        '''Forward process'''
+        if hasattr(self, 'rbr_reparam'):
+            return self.nonlinearity(self.se(self.rbr_reparam(inputs)))
+
+        if self.rbr_identity is None:
+            id_out = 0
+        else:
+            id_out = self.rbr_identity(inputs)
+
+        return self.nonlinearity(self.se(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out))
+
+    def get_equivalent_kernel_bias(self):
+        kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense)
+        kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1)
+        kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity)
+        return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid
+
+    def _pad_1x1_to_3x3_tensor(self, kernel1x1):
+        if kernel1x1 is None:
+            return 0
+        else:
+            return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1])
+
+    def _fuse_bn_tensor(self, branch):
+        if branch is None:
+            return 0, 0
+        if isinstance(branch, nn.Sequential):
+            kernel = branch.conv.weight
+            running_mean = branch.bn.running_mean
+            running_var = branch.bn.running_var
+            gamma = branch.bn.weight
+            beta = branch.bn.bias
+            eps = branch.bn.eps
+        else:
+            assert isinstance(branch, nn.BatchNorm2d)
+            if not hasattr(self, 'id_tensor'):
+                input_dim = self.in_channels // self.groups
+                kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), dtype=np.float32)
+                for i in range(self.in_channels):
+                    kernel_value[i, i % input_dim, 1, 1] = 1
+                self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device)
+            kernel = self.id_tensor
+            running_mean = branch.running_mean
+            running_var = branch.running_var
+            gamma = branch.weight
+            beta = branch.bias
+            eps = branch.eps
+        std = (running_var + eps).sqrt()
+        t = (gamma / std).reshape(-1, 1, 1, 1)
+        return kernel * t, beta - running_mean * gamma / std
+
+    def switch_to_deploy(self):
+        if hasattr(self, 'rbr_reparam'):
+            return
+        kernel, bias = self.get_equivalent_kernel_bias()
+        self.rbr_reparam = nn.Conv2d(in_channels=self.rbr_dense.conv.in_channels, out_channels=self.rbr_dense.conv.out_channels,
+                                     kernel_size=self.rbr_dense.conv.kernel_size, stride=self.rbr_dense.conv.stride,
+                                     padding=self.rbr_dense.conv.padding, dilation=self.rbr_dense.conv.dilation, groups=self.rbr_dense.conv.groups, bias=True)
+        self.rbr_reparam.weight.data = kernel
+        self.rbr_reparam.bias.data = bias
+        for para in self.parameters():
+            para.detach_()
+        self.__delattr__('rbr_dense')
+        self.__delattr__('rbr_1x1')
+        if hasattr(self, 'rbr_identity'):
+            self.__delattr__('rbr_identity')
+        if hasattr(self, 'id_tensor'):
+            self.__delattr__('id_tensor')
+        self.deploy = True
+
+
+# class DetectBackend(nn.Module):
+#     def __init__(self, weights='yolov6s.pt', device=None, dnn=True):
+#
+#         super().__init__()
+#         assert isinstance(weights, str) and Path(weights).suffix == '.pt', f'{Path(weights).suffix} format is not supported.'
+#         # from yolov6.utils.checkpoint import load_checkpoint
+#         # model = load_checkpoint(weights, map_location=device)
+#         stride = int(model.stride.max())
+#         self.__dict__.update(locals())  # assign all variables to self
+#
+#     def forward(self, im, val=False):
+#         y = self.model(im)
+#         if isinstance(y, np.ndarray):
+#             y = torch.tensor(y, device=self.device)
+#         return y
diff --git a/easycv/models/detection/yolox/ASFF.py b/easycv/models/detection/yolox/ASFF.py
index 9cbc8e6a..018aace9 100644
--- a/easycv/models/detection/yolox/ASFF.py
+++ b/easycv/models/detection/yolox/ASFF.py
@@ -9,14 +9,28 @@ def autopad(k, p=None):  # kernel, padding
         p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad
     return p
 
+def get_activation(name='silu', inplace=True):
+    if name == 'silu':
+        # @ to do nn.SiLU 1.7.0
+        # module = nn.SiLU(inplace=inplace)
+        module = SiLU(inplace=inplace)
+    elif name == 'relu':
+        module = nn.ReLU(inplace=inplace)
+    elif name == 'lrelu':
+        module = nn.LeakyReLU(0.1, inplace=inplace)
+    else:
+        raise AttributeError('Unsupported act type: {}'.format(name))
+    return module
+
 
 class Conv(nn.Module):
     # Standard convolution
-    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):  # ch_in, ch_out, kernel, stride, padding, groups
+    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act='silu'):  # ch_in, ch_out, kernel, stride, padding, groups
         super(Conv, self).__init__()
         self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
         self.bn = nn.BatchNorm2d(c2)
-        self.act = SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
+        # self.act = SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
+        self.act = get_activation(act, inplace=True)
 
     def forward(self, x):
         return self.act(self.bn(self.conv(x)))
@@ -26,7 +40,7 @@ def forward_fuse(self, x):
 
 
 class ASFF(nn.Module):
-    def __init__(self, level, multiplier=1, rfb=False, vis=False, act_cfg=True):
+    def __init__(self, level, multiplier=1, asff_channel=16, rfb=False, vis=False, act='silu'):
         """
         multiplier should be 1, 0.5
         which means, the channel of ASFF can be
@@ -40,39 +54,43 @@ def __init__(self, level, multiplier=1, rfb=False, vis=False, act_cfg=True):
                     int(256 * multiplier)]
         # print(self.dim)
 
+        # print(act, asff_channel)
+
         self.inter_dim = self.dim[self.level]
         if level == 0:
-            self.stride_level_1 = Conv(int(512 * multiplier), self.inter_dim, 3, 2)
+            self.stride_level_1 = Conv(int(512 * multiplier), self.inter_dim, 3, 2,act=act)
 
-            self.stride_level_2 = Conv(int(256 * multiplier), self.inter_dim, 3, 2)
+            self.stride_level_2 = Conv(int(256 * multiplier), self.inter_dim, 3, 2,act=act)
 
             self.expand = Conv(self.inter_dim, int(
-                1024 * multiplier), 3, 1)
+                1024 * multiplier), 3, 1, act=act)
         elif level == 1:
             self.compress_level_0 = Conv(
-                int(1024 * multiplier), self.inter_dim, 1, 1)
+                int(1024 * multiplier), self.inter_dim, 1, 1,act=act)
             self.stride_level_2 = Conv(
-                int(256 * multiplier), self.inter_dim, 3, 2)
-            self.expand = Conv(self.inter_dim, int(512 * multiplier), 3, 1)
+                int(256 * multiplier), self.inter_dim, 3, 2,act=act)
+            self.expand = Conv(self.inter_dim, int(512 * multiplier), 3, 1,act=act)
         elif level == 2:
             self.compress_level_0 = Conv(
-                int(1024 * multiplier), self.inter_dim, 1, 1)
+                int(1024 * multiplier), self.inter_dim, 1, 1,act=act)
             self.compress_level_1 = Conv(
-                int(512 * multiplier), self.inter_dim, 1, 1)
+                int(512 * multiplier), self.inter_dim, 1, 1,act=act)
             self.expand = Conv(self.inter_dim, int(
-                256 * multiplier), 3, 1)
+                256 * multiplier), 3, 1,act=act)
 
         # when adding rfb, we use half number of channels to save memory
-        compress_c = 8 if rfb else 16
+        # compress_c = 8 if rfb else 16
+        compress_c = asff_channel
+
         self.weight_level_0 = Conv(
-            self.inter_dim, compress_c, 1, 1)
+            self.inter_dim, compress_c, 1, 1,act=act)
         self.weight_level_1 = Conv(
-            self.inter_dim, compress_c, 1, 1)
+            self.inter_dim, compress_c, 1, 1,act=act)
         self.weight_level_2 = Conv(
-            self.inter_dim, compress_c, 1, 1)
+            self.inter_dim, compress_c, 1, 1,act=act)
 
         self.weight_levels = Conv(
-            compress_c * 3, 3, 1, 1)
+            compress_c * 3, 3, 1, 1,act=act)
         self.vis = vis
 
     def forward(self, x):  # l,m,s
@@ -81,9 +99,9 @@ def forward(self, x):  # l,m,s
         256, 512, 1024
         from small -> large
         """
-        x_level_0 = x[2]  # 最大特征层
-        x_level_1 = x[1]  # 中间特征层
-        x_level_2 = x[0]  # 最小特征层
+        x_level_0 = x[2]  # 最大特征层 [512,20,20]
+        x_level_1 = x[1]  # 中间特征层 [256,40,40]
+        x_level_2 = x[0]  # 最小特征层 [128,80,80]
 
         if self.level == 0:
             level_0_resized = x_level_0
@@ -126,3 +144,4 @@ def forward(self, x):  # l,m,s
         else:
             return out
 
+
diff --git a/easycv/models/detection/yolox/ASFF_sim.py b/easycv/models/detection/yolox/ASFF_sim.py
new file mode 100644
index 00000000..fa19152a
--- /dev/null
+++ b/easycv/models/detection/yolox/ASFF_sim.py
@@ -0,0 +1,292 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from easycv.models.backbones.network_blocks import SiLU
+from easycv.models.backbones.network_blocks import DWConv
+from torchsummaryX import summary
+
+
+def autopad(k, p=None):  # kernel, padding
+    # Pad to 'same'
+    if p is None:
+        p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad
+    return p
+
+def get_activation(name='silu', inplace=True):
+    if name == 'silu':
+        # @ to do nn.SiLU 1.7.0
+        # module = nn.SiLU(inplace=inplace)
+        module = SiLU(inplace=inplace)
+    elif name == 'relu':
+        module = nn.ReLU(inplace=inplace)
+    elif name == 'lrelu':
+        module = nn.LeakyReLU(0.1, inplace=inplace)
+    else:
+        raise AttributeError('Unsupported act type: {}'.format(name))
+    return module
+
+
+class Conv(nn.Module):
+    # Standard convolution
+    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act='silu'):  # ch_in, ch_out, kernel, stride, padding, groups
+        super(Conv, self).__init__()
+        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
+        self.bn = nn.BatchNorm2d(c2)
+        # self.act = SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
+        self.act = get_activation(act, inplace=True)
+
+    def forward(self, x):
+        return self.act(self.bn(self.conv(x)))
+
+    def forward_fuse(self, x):
+        return self.act(self.conv(x))
+
+
+# class expandChannel(nn.Module):
+#     def __init__(self,
+#                  in_channels,
+#                  out_channels,
+#                  ksize=1,
+#                  stride=1,
+#                  act='silu',
+#                  use_conv = True):
+#         super().__init__()
+#         self.use_conv = use_conv
+#         self.conv = Conv(
+#             in_channels * 4, out_channels, ksize, stride, act=act)
+#
+#     def forward(self, x):
+#         # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2)
+#         patch_top_left = x[..., ::2, ::2]
+#         patch_top_right = x[..., ::2, 1::2]
+#         patch_bot_left = x[..., 1::2, ::2]
+#         patch_bot_right = x[..., 1::2, 1::2]
+#         x = torch.cat(
+#             (
+#                 patch_top_left,
+#                 patch_bot_left,
+#                 patch_top_right,
+#                 patch_bot_right,
+#             ),
+#             dim=1,
+#         )
+#         if self.use_conv:
+#             return self.conv(x)
+#         else:
+#             return x
+
+
+class ASFF(nn.Module):
+    def __init__(self, level, multiplier=1, asff_channel=2, expand_kernel=3, down_rate = None, use_dconv = False, use_expand = True, rfb=False, vis=False, act='silu'):
+        """
+        multiplier should be 1, 0.5
+        which means, the channel of ASFF can be
+        512, 256, 128 -> multiplier=0.5
+        1024, 512, 256 -> multiplier=1
+        For even smaller, you need change code manually.
+        """
+        super(ASFF, self).__init__()
+        self.level = level
+        self.dim = [int(1024 * multiplier), int(512 * multiplier),
+                    int(256 * multiplier)]
+
+        self.inter_dim = self.dim[self.level]
+
+        self.use_expand = use_expand
+
+        if level == 0:
+            # self.stride_level_1 = Conv(int(512 * multiplier), self.inter_dim, 3, 2,act=act)
+            # self.stride_level_2 = Conv(int(256 * multiplier), self.inter_dim, 3, 2,act=act)
+            # self.expand_channel_1 = expandChannel(int(512 * multiplier), self.inter_dim*2, use_conv=use_conv)
+            # self.expand_channel_2 = expandChannel(int(256 * multiplier),self.inter_dim,use_conv=use_conv)
+            if down_rate == None:
+                self.expand = Conv(self.inter_dim, int(
+                    1024 * multiplier), expand_kernel, 1, act=act)
+            else:
+                if use_dconv:
+                    self.expand = DWConv(self.inter_dim, int(
+                        1024 * multiplier), expand_kernel, 1, act=act)
+                else:
+                    self.expand = nn.Sequential(
+                        Conv(self.inter_dim, int(self.inter_dim//down_rate), 1, 1, act=act),
+                        Conv(int(self.inter_dim//down_rate), int(1024 * multiplier), 1, 1, act=act)
+                    )
+
+        elif level == 1:
+            # self.compress_level_0 = Conv(
+            #     int(1024 * multiplier), self.inter_dim, 1, 1,act=act)
+            # self.stride_level_2 = Conv(
+            #     int(256 * multiplier), self.inter_dim, 3, 2,act=act)
+            # self.expand = Conv(self.inter_dim, int(512 * multiplier), 3, 1,act=act)
+            if down_rate == None:
+                self.expand = Conv(self.inter_dim, int(
+                    512 * multiplier), expand_kernel, 1, act=act)
+            else:
+                if use_dconv:
+                    self.expand = DWConv(self.inter_dim, int(
+                        512 * multiplier), expand_kernel, 1, act=act)
+                else:
+                    self.expand = nn.Sequential(
+                        Conv(self.inter_dim, int(self.inter_dim//down_rate), 1, 1, act=act),
+                        Conv(int(self.inter_dim//down_rate),
+                             int(512 * multiplier), 1, 1, act=act)
+                    )
+
+        elif level == 2:
+            # self.compress_level_0 = Conv(
+            #     int(1024 * multiplier), self.inter_dim, 1, 1,act=act)
+            # self.compress_level_1 = Conv(
+            #     int(512 * multiplier), self.inter_dim, 1, 1,act=act)
+            # self.expand = Conv(self.inter_dim, int(
+            #     256 * multiplier), 3, 1,act=act)
+            if down_rate == None:
+                self.expand = Conv(self.inter_dim, int(
+                    256 * multiplier), expand_kernel, 1, act=act)
+            else:
+                if use_dconv:
+                    self.expand = DWConv(self.inter_dim, int(
+                        256 * multiplier), expand_kernel, 1, act=act)
+                else:
+                    self.expand = nn.Sequential(
+                        Conv(self.inter_dim, int(self.inter_dim//down_rate), 1, 1, act=act),
+                        Conv(int(self.inter_dim//down_rate),
+                             int(256 * multiplier), 1, 1, act=act)
+                    )
+
+        # when adding rfb, we use half number of channels to save memory
+        # compress_c = 8 if rfb else 16
+        compress_c = asff_channel
+
+        self.weight_level_0 = Conv(
+            self.inter_dim, compress_c, 1, 1,act=act)
+        self.weight_level_1 = Conv(
+            self.inter_dim, compress_c, 1, 1,act=act)
+        self.weight_level_2 = Conv(
+            self.inter_dim, compress_c, 1, 1,act=act)
+
+        self.weight_levels = Conv(
+            compress_c * 3, 3, 1, 1,act=act)
+        self.vis = vis
+
+    def expand_channel(self, x):
+        # [b,c,h,w]->[b,c*4,h/2,w/2]
+        patch_top_left = x[..., ::2, ::2]
+        patch_top_right = x[..., ::2, 1::2]
+        patch_bot_left = x[..., 1::2, ::2]
+        patch_bot_right = x[..., 1::2, 1::2]
+        x = torch.cat(
+            (
+                patch_top_left,
+                patch_bot_left,
+                patch_top_right,
+                patch_bot_right,
+            ),
+            dim=1,
+        )
+        return x
+
+    # def expand_fmap(self, x):
+    #     # [b,c,h,w]-> [b,c/4,h*2,w*2]
+    #     b,c,h,w = x.shape[1]
+    #     res = torch.zeros(b,int(c/4),h*2,w*2)
+    #     res[..., ::2, ::2] = x[:,:int(c/4),:,:]
+    #     res[..., ::2, 1::2] = x[:,int(c/4):int(c/2),:,:]
+    #     res[..., 1::2, ::2] = x[:,int(c/2):3*int(c/4),:,:]
+    #     res[..., 1::2, 1::2] = x[:,:int(c/4),:,:]
+    #
+    #     return res
+
+
+    def mean_channel(self, x):
+        # [b,c,h,w]->[b,c/4,h*2,w*2]
+        x1 = x[:,::2,:,:]
+        x2 = x[:,1::2,:,:]
+        return (x1+x2)/2
+
+
+    def forward(self, x):  # l,m,s
+        """
+        #
+        256, 512, 1024
+        from small -> large
+        """
+        x_level_0 = x[2]  # 最大特征层 [512,20,20]
+        x_level_1 = x[1]  # 中间特征层 [256,40,40]
+        x_level_2 = x[0]  # 最小特征层 [128,80,80]
+
+        if self.level == 0:
+            level_0_resized = x_level_0
+            level_1_resized = self.expand_channel(x_level_1)
+            level_1_resized = self.mean_channel(level_1_resized)
+            level_2_resized = self.expand_channel(x_level_2)
+            level_2_resized = F.max_pool2d(
+                level_2_resized, 3, stride=2, padding=1)
+        elif self.level == 1:
+            level_0_resized = F.interpolate(
+                x_level_0, scale_factor=2, mode='nearest')
+            level_0_resized = self.mean_channel(level_0_resized)
+            level_1_resized = x_level_1
+            level_2_resized = self.expand_channel(x_level_2)
+            level_2_resized = self.mean_channel(level_2_resized)
+
+        elif self.level == 2:
+            level_0_resized = F.interpolate(
+                x_level_0, scale_factor=4, mode='nearest')
+            level_0_resized = self.mean_channel(self.mean_channel(level_0_resized))
+            level_1_resized = F.interpolate(
+                x_level_1, scale_factor=2, mode='nearest')
+            level_1_resized = self.mean_channel(level_1_resized)
+            level_2_resized = x_level_2
+
+        level_0_weight_v = self.weight_level_0(level_0_resized)
+        level_1_weight_v = self.weight_level_1(level_1_resized)
+        level_2_weight_v = self.weight_level_2(level_2_resized)
+
+        levels_weight_v = torch.cat(
+            (level_0_weight_v, level_1_weight_v, level_2_weight_v), 1)
+        levels_weight = self.weight_levels(levels_weight_v)
+        levels_weight = F.softmax(levels_weight, dim=1)
+
+        fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] + \
+                            level_1_resized * levels_weight[:, 1:2, :, :] + \
+                            level_2_resized * levels_weight[:, 2:, :, :]
+
+        if self.use_expand:
+            out = self.expand(fused_out_reduced)
+        else:
+            out = fused_out_reduced
+
+        if self.vis:
+            return out, levels_weight, fused_out_reduced.sum(dim=1)
+        else:
+            return out
+
+if __name__=="__main__":
+    width = 0.5
+    num_classes = 80
+    in_channels = [256, 512, 1024]
+
+    asff_channel = 2
+    print(asff_channel)
+    act = 'relu'
+
+    asff_1 = ASFF(level=0, multiplier=width, asff_channel=asff_channel, act=act).cuda()
+    asff_2 = ASFF(level=1, multiplier=width, asff_channel=asff_channel, act=act).cuda()
+    asff_3 = ASFF(level=2, multiplier=width, asff_channel=asff_channel, act=act).cuda()
+
+    input = (
+        torch.rand(1, 128, 80, 80).cuda(), torch.rand(1, 256, 40, 40).cuda(), torch.rand(1, 512, 20, 20).cuda())
+
+    # flops, params = get_model_complexity_info(asff_1, input, as_strings=True,
+    #                                           print_per_layer_stat=True)
+    # print('Flops:  ' + flops)
+    # print('Params: ' + params)
+
+    # input = torch.randn(1, 3, 640, 640).cuda()
+    # flops, params = profile(asff_1, inputs=(input,))
+    # print('flops: {}, params: {}'.format(flops, params))
+
+    summary(asff_1, input)
+    summary(asff_2, input)
+    summary(asff_3, input)
+
diff --git a/easycv/models/detection/yolox/ppyoloe_head.py b/easycv/models/detection/yolox/ppyoloe_head.py
new file mode 100644
index 00000000..588ac879
--- /dev/null
+++ b/easycv/models/detection/yolox/ppyoloe_head.py
@@ -0,0 +1,946 @@
+# Copyright (c) 2014-2021 Megvii Inc And Alibaba PAI-Teams. All rights reserved.
+import logging
+import math
+from distutils.version import LooseVersion
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from easycv.models.backbones.network_blocks import BaseConv, DWConv
+from easycv.models.detection.utils import bboxes_iou
+from easycv.models.loss import IOUloss
+from easycv.models.loss import FocalLoss, VarifocalLoss
+from torchsummaryX import summary
+
+
+class ESEAttn(nn.Module):
+    def __init__(self, feat_channels, act='silu'):
+        super(ESEAttn, self).__init__()
+        self.fc = nn.Conv2d(feat_channels, feat_channels, 1)
+        self.sig = nn.Sigmoid()
+        self.conv = BaseConv(feat_channels, feat_channels, ksize=1, stride=1, act=act)
+
+        self._init_weights()
+
+    def _init_weights(self):
+        nn.init.normal_(self.fc.weight, mean=0, std=0.001)
+
+    def forward(self, feat, avg_feat):
+        weight = self.sig(self.fc(avg_feat))
+        return self.conv(feat * weight)
+
+
+class PPYOLOEHead(nn.Module):
+    def __init__(self,
+                 num_classes=80,
+                 width=1.0,
+                 strides=[8, 16, 32],
+                 in_channels=[256, 512, 1024],
+                 act='silu',
+                 depthwise=False,
+                 stage='CLOUD',
+                 obj_loss_type='l1',
+                 reg_loss_type='l1',
+                 reg_max=16,
+                 static_assigner_epoch=4,
+                 use_varifocal_loss=True,
+                 eval_input_size=[],
+                 # static_assigner=ATSSAssigner(9, num_classes=80),
+                 # assigner=TaskAlignedAssigner(topk=13, alpha=1.0, beta=6.0),
+                 loss_weight={
+                     'class': 1.0,
+                     'iou': 2.5,
+                     'dfl': 0.5,
+                 },
+                 atss_topk=9):
+        """
+        Args:
+            num_classes (int): detection class numbers.
+            width (float): model width. Default value: 1.0.
+            strides (list): expanded strides. Default value: [8, 16, 32].
+            in_channels (list): model conv channels set. Default value: [256, 512, 1024].
+            act (str): activation type of conv. Defalut value: "silu".
+            depthwise (bool): whether apply depthwise conv in conv branch. Default value: False.
+            stage (str): model stage, distinguish edge head to cloud head. Default value: CLOUD.
+            obj_loss_type (str): the loss function of the obj conf. Default value: l1.
+            reg_loss_type (str): the loss function of the box prediction. Default value: l1.
+        """
+
+        super(PPYOLOEHead, self).__init__()
+        self.n_anchors = 1
+        self.num_classes = num_classes
+        self.stage = stage
+        self.decode_in_inference = True  # for deploy, set to False
+        self.in_channels = in_channels
+
+        self.reg_max = reg_max
+        self.loss_weight = loss_weight
+        self.use_varifocal_loss = use_varifocal_loss
+        self.varifocal_loss = VarifocalLoss().cuda()
+        self.focal_loss = FocalLoss().cuda()
+
+        self.eval_input_size = eval_input_size
+        self.static_assigner_epoch = static_assigner_epoch
+
+        # stem
+        self.stem_cls = nn.ModuleList()
+        self.stem_reg = nn.ModuleList()
+
+        for in_c in self.in_channels:
+            self.stem_cls.append(ESEAttn(int(in_c * width), act=act))
+            self.stem_reg.append(ESEAttn(int(in_c * width), act=act))
+
+        # pred head
+        self.cls_preds = nn.ModuleList()
+        self.reg_preds = nn.ModuleList()
+
+        for in_c in self.in_channels:
+            self.cls_preds.append(
+                nn.Conv2d(
+                    in_channels=int(in_c * width),
+                    out_channels=self.n_anchors * self.num_classes,
+                    kernel_size=3,
+                    stride=1,
+                    padding=1,
+                )
+            )
+            self.reg_preds.append(
+                nn.Conv2d(
+                    in_channels=int(in_c * width),
+                    out_channels=4 * (self.reg_max + 1),
+                    kernel_size=3,
+                    stride=1,
+                    padding=1,
+                )
+            )
+        # projection conv
+        self.proj_conv = nn.Conv2d(self.reg_max + 1, 1, 1, bias=False)
+        self._init_weights()
+
+        # ATSS para
+        # self.atss_topk = atss_topk
+        # self.atss_assign = static_assigner
+        # self.assigner = assigner
+
+        self.bcewithlog_loss = nn.BCEWithLogitsLoss(reduction='none')
+
+        self.use_l1 = False
+        self.l1_loss = nn.L1Loss(reduction='none')
+
+        self.iou_loss = IOUloss(reduction='none', loss_type=reg_loss_type)
+
+        self.obj_loss_type = obj_loss_type
+        if obj_loss_type == 'BCE':
+            self.obj_loss = nn.BCEWithLogitsLoss(reduction='none')
+        elif obj_loss_type == 'focal':
+            self.obj_loss = FocalLoss(reduction='none')
+
+        elif obj_loss_type == 'v_focal':
+            self.obj_loss = VarifocalLoss(reduction='none')
+        else:
+            assert "Undefined loss type: {}".format(obj_loss_type)
+
+        self.strides = strides
+        self.grids = [torch.zeros(1)] * len(in_channels)
+
+    def _init_weights(self, prior_prob=0.01):
+        for conv in self.cls_preds:
+            b = conv.bias.view(-1, )
+            b.data.fill_(-math.log((1 - prior_prob) / prior_prob))
+            conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+            w = conv.weight
+            w.data.fill_(0.)
+            conv.weight = torch.nn.Parameter(w, requires_grad=True)
+
+        for conv in self.reg_preds:
+            b = conv.bias.view(-1, )
+            b.data.fill_(1.0)
+            conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+            w = conv.weight
+            w.data.fill_(0.)
+            conv.weight = torch.nn.Parameter(w, requires_grad=True)
+
+        self.proj = nn.Parameter(torch.linspace(0, self.reg_max, self.reg_max + 1), requires_grad=False)
+        self.proj_conv.weight = torch.nn.Parameter(self.proj.view([1, self.reg_max + 1, 1, 1]).clone().detach(),
+                                                   requires_grad=False)
+
+        if self.eval_input_size:
+            anchor_points, stride_tensor = self._generate_anchors()
+            self.register_buffer('anchor_points', anchor_points)
+            self.register_buffer('stride_tensor', stride_tensor)
+
+    def initialize_biases(self, prior_prob):
+        for conv in self.cls_preds:
+            b = conv.bias.view(self.n_anchors, -1)
+            b.data.fill_(-math.log((1 - prior_prob) / prior_prob))
+            conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+
+        for conv in self.reg_preds:
+            b = conv.bias.view(self.n_anchors, -1)
+            b.data.fill_(-math.log((1 - prior_prob) / prior_prob))
+            conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+
+    def forward(self, xin, labels=None, imgs=None):
+        outputs = []
+        reg_outputs = [] # to compute the box loss
+        origin_preds = []
+        x_shifts = []
+        y_shifts = []
+        expanded_strides = []
+
+        for k, (stride_this_level, x) in enumerate(
+                zip(self.strides, xin)):
+            avg_feat = F.adaptive_avg_pool2d(x, (1, 1))
+            cls_output = self.cls_preds[k](self.stem_cls[k](x, avg_feat) + x)
+            reg_output = self.reg_preds[k](self.stem_reg[k](x, avg_feat))
+
+            reg_outputs.append(reg_output)
+
+            if self.training:
+                output = torch.cat([reg_output, cls_output], 1)
+                # decode the output to c_x, c_y, w, h
+                output, grid = self.get_output_and_grid(
+                    output, k, stride_this_level, xin[0].type())
+
+                x_shifts.append(grid[:, :, 0])
+                y_shifts.append(grid[:, :, 1])
+                expanded_strides.append(
+                    torch.zeros(
+                        1, grid.shape[1]).fill_(stride_this_level).type_as(
+                        xin[0]))
+                if self.use_l1:
+                    batch_size = reg_output.shape[0]
+
+                    hsize, wsize = reg_output.shape[-2:]
+                    reg_output = reg_output.view(batch_size, self.n_anchors, 4,
+                                                 hsize, wsize)
+                    reg_output = reg_output.permute(0, 1, 3, 4, 2).reshape(
+                        batch_size, -1, 4)
+                    reg_output = F.softmax(
+                        reg_output.view(batch_size, self.n_anchors * hsize * wsize, 4, self.reg_max + 1),
+                        dim=-1).matmul(self.proj)
+                    origin_preds.append(reg_output.clone())
+
+            else:
+                # merge reg_output and set obj_output as torch.ones
+                batch_size = reg_output.shape[0]
+                hsize, wsize = reg_output.shape[-2:]
+
+                reg_output = reg_output.view(batch_size, self.n_anchors, 4,
+                                             hsize, wsize)
+                reg_output = reg_output.permute(0, 1, 3, 4, 2).reshape(
+                    batch_size, -1, 4)
+                reg_output = F.softmax(reg_output.view(batch_size, self.n_anchors * hsize * wsize, 4, self.reg_max + 1),
+                                dim=-1).matmul(self.proj)
+
+                obj_output = torch.ones((reg_output.shape[0], reg_output.shape[1], 1), device=reg_output.device,
+                                        dtype=reg_output.dtype)
+                if self.stage == 'EDGE':
+                    m = nn.Hardsigmoid()
+                    output = torch.cat(
+                        [reg_output, m(obj_output),
+                         m(cls_output)], 1)
+                else:
+                    output = torch.cat([
+                        reg_output,
+                        obj_output.sigmoid(),
+                        cls_output.sigmoid()
+                    ], 1)
+
+            outputs.append(output)
+
+        if self.training:
+
+            return self.get_losses(
+                imgs,
+                x_shifts,
+                y_shifts,
+                expanded_strides,
+                labels,
+                torch.cat(outputs, 1),
+                origin_preds,
+                dtype=xin[0].dtype,
+            )
+
+        else:
+            self.hw = [x.shape[-2:] for x in outputs]
+            # [batch, n_anchors_all, 85]
+            outputs = torch.cat([x.flatten(start_dim=2) for x in outputs],
+                                dim=2).permute(0, 2, 1)
+            if self.decode_in_inference:
+                return self.decode_outputs(outputs, dtype=xin[0].type())
+            else:
+                return outputs
+
+    def get_output_and_grid(self, output, k, stride, dtype):
+        grid = self.grids[k]
+
+        batch_size = output.shape[0]
+        n_ch_distri = 4*(1+self.reg_max) + self.num_classes
+        n_ch = 5 + self.num_classes
+        hsize, wsize = output.shape[-2:]
+
+        if grid.shape[2:4] != output.shape[2:4]:
+            yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])
+            grid = torch.stack((xv, yv), 2).view(1, 1, hsize, wsize,
+                                                 2).type(dtype)
+            self.grids[k] = grid
+
+        output = output.view(batch_size, self.n_anchors, n_ch_distri, hsize, wsize)
+        output = output.permute(0, 1, 3, 4,
+                                2).reshape(batch_size,
+                                           self.n_anchors * hsize * wsize, -1)
+        reg_output = output[:, :, :4 * (1 + self.reg_max)]
+        cls_output = output[:, :, 4 * (1 + self.reg_max):]
+
+        pred_dist = F.softmax(reg_output.view(batch_size, self.n_anchors * hsize * wsize, 4, self.reg_max + 1), dim=-1).matmul(self.proj)
+
+        grid = grid.view(1, -1, 2)
+        output_pred = torch.ones(batch_size, self.n_anchors * hsize * wsize, n_ch).cuda()
+
+        output_pred[..., :2] = (pred_dist[..., :2] + grid) * stride
+        output_pred[..., 2:4] = torch.exp(pred_dist[..., 2:4]) * stride
+        output_pred[..., 5:] = cls_output
+
+        return output_pred, grid
+
+    def decode_outputs(self, outputs, dtype):
+        grids = []
+        strides = []
+        for (hsize, wsize), stride in zip(self.hw, self.strides):
+            yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])
+            grid = torch.stack((xv, yv), 2).view(1, -1, 2)
+            grids.append(grid)
+            shape = grid.shape[:2]
+            strides.append(torch.full((*shape, 1), stride, dtype=torch.int))
+
+        grids = torch.cat(grids, dim=1).type(dtype)
+        strides = torch.cat(strides, dim=1).type(dtype)
+
+        outputs[..., :2] = (outputs[..., :2] + grids) * strides
+        outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides
+        return outputs
+
+    def get_losses(
+            self,
+            imgs,
+            x_shifts,
+            y_shifts,
+            expanded_strides,
+            labels,
+            outputs,
+            origin_preds,
+            dtype,
+    ):
+        bbox_preds = outputs[:, :, :4]  # [batch, n_anchors_all, 4]
+        # obj_preds = outputs[:, :, 4].unsqueeze(-1)  # [batch, n_anchors_all, 1]
+        cls_preds = outputs[:, :, 5:]  # [batch, n_anchors_all, n_cls]
+
+        # calculate targets
+        nlabel = (labels.sum(dim=2) > 0).sum(dim=1)  # number of objects
+
+        total_num_anchors = outputs.shape[1]
+        x_shifts = torch.cat(x_shifts, 1)  # [1, n_anchors_all]
+        y_shifts = torch.cat(y_shifts, 1)  # [1, n_anchors_all]
+        expanded_strides = torch.cat(expanded_strides, 1)
+        if self.use_l1:
+            origin_preds = torch.cat(origin_preds, 1)
+
+        cls_targets = []
+        reg_targets = []
+        l1_targets = []
+        obj_targets = []
+        fg_masks = []
+
+        num_fg = 0.0
+        num_gts = 0.0
+
+        for batch_idx in range(outputs.shape[0]):
+            num_gt = int(nlabel[batch_idx])
+
+            num_gts += num_gt
+            if num_gt == 0:
+                cls_target = outputs.new_zeros((0, self.num_classes))
+                reg_target = outputs.new_zeros((0, 4))
+                l1_target = outputs.new_zeros((0, 4))
+                obj_target = outputs.new_zeros((total_num_anchors, 1))
+                fg_mask = outputs.new_zeros(total_num_anchors).bool()
+            else:
+                gt_bboxes_per_image = labels[batch_idx, :num_gt, 1:5]
+                gt_classes = labels[batch_idx, :num_gt, 0]
+                bboxes_preds_per_image = bbox_preds[batch_idx]
+
+                try:
+                    if 1:
+                        # atss
+                        (
+                            gt_matched_classes,
+                            fg_mask,
+                            pred_ious_this_matching,
+                            matched_gt_inds,
+                            num_fg_img,
+                        ) = self.get_assignments_atss(  # noqa
+                            batch_idx,
+                            num_gt,
+                            total_num_anchors,
+                            gt_bboxes_per_image,
+                            gt_classes,
+                            bboxes_preds_per_image,
+                            expanded_strides,
+                            x_shifts,
+                            y_shifts,
+                            cls_preds,
+                            bbox_preds,
+                            # obj_preds,
+                            labels,
+                            imgs,
+                        )
+                    else:
+                        # tal
+                        (
+                            gt_matched_classes,
+                            fg_mask,
+                            pred_ious_this_matching,
+                            matched_gt_inds,
+                            num_fg_img,
+                        ) = self.get_assignments_atss(  # noqa
+                            batch_idx,
+                            num_gt,
+                            total_num_anchors,
+                            gt_bboxes_per_image,
+                            gt_classes,
+                            bboxes_preds_per_image,
+                            expanded_strides,
+                            x_shifts,
+                            y_shifts,
+                            cls_preds,
+                            bbox_preds,
+                            # obj_preds,
+                            labels,
+                            imgs,
+                        )
+
+                except RuntimeError:
+                    logging.error(
+                        'OOM RuntimeError is raised due to the huge memory cost during label assignment. \
+                           CPU mode is applied in this batch. If you want to avoid this issue, \
+                           try to reduce the batch size or image size.')
+                    torch.cuda.empty_cache()
+                    (
+                        gt_matched_classes,
+                        fg_mask,
+                        pred_ious_this_matching,
+                        matched_gt_inds,
+                        num_fg_img,
+                    ) = self.get_assignments(  # noqa
+                        batch_idx,
+                        num_gt,
+                        total_num_anchors,
+                        gt_bboxes_per_image,
+                        gt_classes,
+                        bboxes_preds_per_image,
+                        expanded_strides,
+                        x_shifts,
+                        y_shifts,
+                        cls_preds,
+                        bbox_preds,
+                        # obj_preds,
+                        labels,
+                        imgs,
+                        'cpu',
+                    )
+
+                torch.cuda.empty_cache()
+                num_fg += num_fg_img
+
+                cls_target = F.one_hot(
+                    gt_matched_classes.to(torch.int64),
+                    self.num_classes) * pred_ious_this_matching.unsqueeze(-1)
+                obj_target = fg_mask.unsqueeze(-1)
+                reg_target = gt_bboxes_per_image[matched_gt_inds]
+
+                if self.use_l1:
+                    l1_target = self.get_l1_target(
+                        outputs.new_zeros((num_fg_img, 4)),
+                        gt_bboxes_per_image[matched_gt_inds],
+                        expanded_strides[0][fg_mask],
+                        x_shifts=x_shifts[0][fg_mask],
+                        y_shifts=y_shifts[0][fg_mask],
+                    )
+
+            cls_targets.append(cls_target)
+            reg_targets.append(reg_target)
+            obj_targets.append(obj_target.to(dtype))
+            fg_masks.append(fg_mask)
+            if self.use_l1:
+                l1_targets.append(l1_target)
+
+        cls_targets = torch.cat(cls_targets, 0)
+        reg_targets = torch.cat(reg_targets, 0)
+        obj_targets = torch.cat(obj_targets, 0)
+        fg_masks = torch.cat(fg_masks, 0)
+
+        if self.use_l1:
+            l1_targets = torch.cat(l1_targets, 0)
+
+        num_fg = max(num_fg, 1)
+
+        loss_iou = (self.iou_loss(
+            bbox_preds.view(-1, 4)[fg_masks], reg_targets)).sum() / num_fg
+
+        # if self.obj_loss_type == 'focal':
+        #     loss_obj = (
+        #                    self.focal_loss(obj_preds.sigmoid().view(-1, 1), obj_targets)
+        #                ).sum() / num_fg
+        # else:
+        #     loss_obj = (self.obj_loss(obj_preds.view(-1, 1),
+        #                               obj_targets)).sum() / num_fg
+
+        loss_obj = 0.0
+
+        loss_cls = (self.bcewithlog_loss(
+            cls_preds.view(-1, self.num_classes)[fg_masks],
+            cls_targets)).sum() / num_fg
+
+        if self.use_l1:
+            loss_l1 = (self.l1_loss(
+                origin_preds.view(-1, 4)[fg_masks], l1_targets)).sum() / num_fg
+        else:
+            loss_l1 = 0.0
+
+        reg_weight = 5.0
+        loss = reg_weight * loss_iou + loss_obj + loss_cls + loss_l1
+
+        return (
+            loss,
+            reg_weight * loss_iou,
+            loss_obj,
+            loss_cls,
+            loss_l1,
+            num_fg / max(num_gts, 1),
+        )
+
+    def focal_loss(self, pred, gt):
+        pos_inds = gt.eq(1).float()
+        neg_inds = gt.eq(0).float()
+        pos_loss = torch.log(pred + 1e-5) * torch.pow(1 - pred, 2) * pos_inds * 0.75
+        neg_loss = torch.log(1 - pred + 1e-5) * torch.pow(pred, 2) * neg_inds * 0.25
+        loss = -(pos_loss + neg_loss)
+        return loss
+
+    def get_l1_target(self,
+                      l1_target,
+                      gt,
+                      stride,
+                      x_shifts,
+                      y_shifts,
+                      eps=1e-8):
+        l1_target[:, 0] = gt[:, 0] / stride - x_shifts
+        l1_target[:, 1] = gt[:, 1] / stride - y_shifts
+        l1_target[:, 2] = torch.log(gt[:, 2] / stride + eps)
+        l1_target[:, 3] = torch.log(gt[:, 3] / stride + eps)
+        return l1_target
+
+    @torch.no_grad()
+    def get_assignments_atss(
+            self,
+            batch_idx,
+            num_gt,
+            total_num_anchors,
+            gt_bboxes_per_image,
+            gt_classes,
+            bboxes_preds_per_image,
+            expanded_strides,
+            x_shifts,
+            y_shifts,
+            cls_preds,
+            bbox_preds,
+            obj_preds,
+            labels,
+            imgs,
+            mode='gpu',
+    ):
+
+        if mode == 'cpu':
+            print('------------CPU Mode for This Batch-------------')
+            gt_bboxes_per_image = gt_bboxes_per_image.cpu().float()
+            bboxes_preds_per_image = bboxes_preds_per_image.cpu().float()
+            gt_classes = gt_classes.cpu().float()
+            expanded_strides = expanded_strides.cpu().float()
+            x_shifts = x_shifts.cpu()
+            y_shifts = y_shifts.cpu()
+
+        # fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(
+        #     gt_bboxes_per_image,
+        #     expanded_strides,
+        #     x_shifts,
+        #     y_shifts,
+        #     total_num_anchors,
+        #     num_gt,
+        # )
+        # reference to: https://github.com/Megvii-BaseDetection/YOLOX/pull/811
+        # NOTE: Fix `selected index k out of range`
+        # npa: int = fg_mask.sum().item()  # number of positive anchors
+        #
+        # if npa == 0:
+        #     gt_matched_classes = torch.zeros(0, device=fg_mask.device).long()
+        #     pred_ious_this_matching = torch.rand(0, device=fg_mask.device)
+        #     matched_gt_inds = gt_matched_classes
+        #     num_fg = npa
+        #
+        #     if mode == 'cpu':
+        #         gt_matched_classes = gt_matched_classes.cuda()
+        #         fg_mask = fg_mask.cuda()
+        #         pred_ious_this_matching = pred_ious_this_matching.cuda()
+        #         matched_gt_inds = matched_gt_inds.cuda()
+        #         num_fg = num_fg.cuda()
+        #
+        #     return (
+        #         gt_matched_classes,
+        #         fg_mask,
+        #         pred_ious_this_matching,
+        #         matched_gt_inds,
+        #         num_fg,
+        #     )
+
+        bboxes_preds_per_image = bboxes_preds_per_image
+        cls_preds_ = cls_preds[batch_idx]
+        # obj_preds_ = obj_preds[batch_idx]
+        num_in_boxes_anchor = bboxes_preds_per_image.shape[0]
+
+        if mode == 'cpu':
+            gt_bboxes_per_image = gt_bboxes_per_image.cpu()
+            bboxes_preds_per_image = bboxes_preds_per_image.cpu()
+
+        # cal
+        pair_wise_ious = bboxes_iou(gt_bboxes_per_image,
+                                    bboxes_preds_per_image, False)
+
+        if (torch.isnan(pair_wise_ious.max())):
+            pair_wise_ious = bboxes_iou(gt_bboxes_per_image,
+                                        bboxes_preds_per_image, False)
+
+        gt_cls_per_image = (
+            F.one_hot(gt_classes.to(torch.int64),
+                      self.num_classes).float().unsqueeze(1).repeat(
+                1, num_in_boxes_anchor, 1))
+        pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8)
+
+        if mode == 'cpu':
+            cls_preds_, obj_preds_ = cls_preds_.cpu(), obj_preds_.cpu()
+
+        if LooseVersion(torch.__version__) >= LooseVersion('1.6.0'):
+            with torch.cuda.amp.autocast(enabled=False):
+                cls_preds_ = (
+                        cls_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                               1).sigmoid_() *
+                        obj_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                               1).sigmoid_())
+                pair_wise_cls_loss = F.binary_cross_entropy(
+                    cls_preds_.sqrt_(), gt_cls_per_image,
+                    reduction='none').sum(-1)
+        else:
+            cls_preds_ = (
+                    cls_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                           1).sigmoid_() *
+                    obj_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                           1).sigmoid_())
+            pair_wise_cls_loss = F.binary_cross_entropy(
+                cls_preds_.sqrt_(), gt_cls_per_image, reduction='none').sum(-1)
+
+        del cls_preds_
+
+        cost = (
+                pair_wise_cls_loss + 3.0 * pair_wise_ious_loss + 100000.0 *
+                (~is_in_boxes_and_center))
+
+        (
+            num_fg,
+            gt_matched_classes,
+            pred_ious_this_matching,
+            matched_gt_inds,
+        ) = self.dynamic_k_matching(cost, pair_wise_ious, gt_classes, num_gt,
+                                    fg_mask)
+
+        del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss
+
+        if mode == 'cpu':
+            gt_matched_classes = gt_matched_classes.cuda()
+            fg_mask = fg_mask.cuda()
+            pred_ious_this_matching = pred_ious_this_matching.cuda()
+            matched_gt_inds = matched_gt_inds.cuda()
+
+        return (
+            gt_matched_classes,
+            fg_mask,
+            pred_ious_this_matching,
+            matched_gt_inds,
+            num_fg,
+        )
+
+
+    @torch.no_grad()
+    def get_assignments(
+            self,
+            batch_idx,
+            num_gt,
+            total_num_anchors,
+            gt_bboxes_per_image,
+            gt_classes,
+            bboxes_preds_per_image,
+            expanded_strides,
+            x_shifts,
+            y_shifts,
+            cls_preds,
+            bbox_preds,
+            obj_preds,
+            labels,
+            imgs,
+            mode='gpu',
+    ):
+
+        if mode == 'cpu':
+            print('------------CPU Mode for This Batch-------------')
+            gt_bboxes_per_image = gt_bboxes_per_image.cpu().float()
+            bboxes_preds_per_image = bboxes_preds_per_image.cpu().float()
+            gt_classes = gt_classes.cpu().float()
+            expanded_strides = expanded_strides.cpu().float()
+            x_shifts = x_shifts.cpu()
+            y_shifts = y_shifts.cpu()
+
+        fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(
+            gt_bboxes_per_image,
+            expanded_strides,
+            x_shifts,
+            y_shifts,
+            total_num_anchors,
+            num_gt,
+        )
+        # reference to: https://github.com/Megvii-BaseDetection/YOLOX/pull/811
+        # NOTE: Fix `selected index k out of range`
+        npa: int = fg_mask.sum().item()  # number of positive anchors
+
+        if npa == 0:
+            gt_matched_classes = torch.zeros(0, device=fg_mask.device).long()
+            pred_ious_this_matching = torch.rand(0, device=fg_mask.device)
+            matched_gt_inds = gt_matched_classes
+            num_fg = npa
+
+            if mode == 'cpu':
+                gt_matched_classes = gt_matched_classes.cuda()
+                fg_mask = fg_mask.cuda()
+                pred_ious_this_matching = pred_ious_this_matching.cuda()
+                matched_gt_inds = matched_gt_inds.cuda()
+                num_fg = num_fg.cuda()
+
+            return (
+                gt_matched_classes,
+                fg_mask,
+                pred_ious_this_matching,
+                matched_gt_inds,
+                num_fg,
+            )
+
+        bboxes_preds_per_image = bboxes_preds_per_image[fg_mask]
+        cls_preds_ = cls_preds[batch_idx][fg_mask]
+        obj_preds_ = obj_preds[batch_idx][fg_mask]
+        num_in_boxes_anchor = bboxes_preds_per_image.shape[0]
+
+        if mode == 'cpu':
+            gt_bboxes_per_image = gt_bboxes_per_image.cpu()
+            bboxes_preds_per_image = bboxes_preds_per_image.cpu()
+
+        pair_wise_ious = bboxes_iou(gt_bboxes_per_image,
+                                    bboxes_preds_per_image, False)
+
+        if (torch.isnan(pair_wise_ious.max())):
+            pair_wise_ious = bboxes_iou(gt_bboxes_per_image,
+                                        bboxes_preds_per_image, False)
+
+        gt_cls_per_image = (
+            F.one_hot(gt_classes.to(torch.int64),
+                      self.num_classes).float().unsqueeze(1).repeat(
+                1, num_in_boxes_anchor, 1))
+        pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8)
+
+        if mode == 'cpu':
+            cls_preds_, obj_preds_ = cls_preds_.cpu(), obj_preds_.cpu()
+
+        if LooseVersion(torch.__version__) >= LooseVersion('1.6.0'):
+            with torch.cuda.amp.autocast(enabled=False):
+                cls_preds_ = (
+                        cls_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                               1).sigmoid_() *
+                        obj_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                               1).sigmoid_())
+                pair_wise_cls_loss = F.binary_cross_entropy(
+                    cls_preds_.sqrt_(), gt_cls_per_image,
+                    reduction='none').sum(-1)
+        else:
+            cls_preds_ = (
+                    cls_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                           1).sigmoid_() *
+                    obj_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                           1).sigmoid_())
+            pair_wise_cls_loss = F.binary_cross_entropy(
+                cls_preds_.sqrt_(), gt_cls_per_image, reduction='none').sum(-1)
+
+        del cls_preds_
+
+        cost = (
+                pair_wise_cls_loss + 3.0 * pair_wise_ious_loss + 100000.0 *
+                (~is_in_boxes_and_center))
+
+        (
+            num_fg,
+            gt_matched_classes,
+            pred_ious_this_matching,
+            matched_gt_inds,
+        ) = self.dynamic_k_matching(cost, pair_wise_ious, gt_classes, num_gt,
+                                    fg_mask)
+
+        del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss
+
+        if mode == 'cpu':
+            gt_matched_classes = gt_matched_classes.cuda()
+            fg_mask = fg_mask.cuda()
+            pred_ious_this_matching = pred_ious_this_matching.cuda()
+            matched_gt_inds = matched_gt_inds.cuda()
+
+        return (
+            gt_matched_classes,
+            fg_mask,
+            pred_ious_this_matching,
+            matched_gt_inds,
+            num_fg,
+        )
+
+    def get_in_boxes_info(
+            self,
+            gt_bboxes_per_image,
+            expanded_strides,
+            x_shifts,
+            y_shifts,
+            total_num_anchors,
+            num_gt,
+    ):
+        expanded_strides_per_image = expanded_strides[0]
+        x_shifts_per_image = x_shifts[0] * expanded_strides_per_image
+        y_shifts_per_image = y_shifts[0] * expanded_strides_per_image
+        x_centers_per_image = (
+            (x_shifts_per_image +
+             0.5 * expanded_strides_per_image).unsqueeze(0).repeat(num_gt, 1)
+        )  # [n_anchor] -> [n_gt, n_anchor]
+        y_centers_per_image = (
+            (y_shifts_per_image +
+             0.5 * expanded_strides_per_image).unsqueeze(0).repeat(num_gt, 1))
+
+        gt_bboxes_per_image_l = (
+            (gt_bboxes_per_image[:, 0] -
+             0.5 * gt_bboxes_per_image[:, 2]).unsqueeze(1).repeat(
+                1, total_num_anchors))
+        gt_bboxes_per_image_r = (
+            (gt_bboxes_per_image[:, 0] +
+             0.5 * gt_bboxes_per_image[:, 2]).unsqueeze(1).repeat(
+                1, total_num_anchors))
+        gt_bboxes_per_image_t = (
+            (gt_bboxes_per_image[:, 1] -
+             0.5 * gt_bboxes_per_image[:, 3]).unsqueeze(1).repeat(
+                1, total_num_anchors))
+        gt_bboxes_per_image_b = (
+            (gt_bboxes_per_image[:, 1] +
+             0.5 * gt_bboxes_per_image[:, 3]).unsqueeze(1).repeat(
+                1, total_num_anchors))
+
+        b_l = x_centers_per_image - gt_bboxes_per_image_l
+        b_r = gt_bboxes_per_image_r - x_centers_per_image
+        b_t = y_centers_per_image - gt_bboxes_per_image_t
+        b_b = gt_bboxes_per_image_b - y_centers_per_image
+        bbox_deltas = torch.stack([b_l, b_t, b_r, b_b], 2)
+
+        is_in_boxes = bbox_deltas.min(dim=-1).values > 0.0
+        is_in_boxes_all = is_in_boxes.sum(dim=0) > 0
+        # in fixed center
+
+        center_radius = 2.5
+
+        gt_bboxes_per_image_l = (
+                                    gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
+            1, total_num_anchors
+        ) - center_radius * expanded_strides_per_image.unsqueeze(0)
+        gt_bboxes_per_image_r = (
+                                    gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
+            1, total_num_anchors
+        ) + center_radius * expanded_strides_per_image.unsqueeze(0)
+        gt_bboxes_per_image_t = (
+                                    gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
+            1, total_num_anchors
+        ) - center_radius * expanded_strides_per_image.unsqueeze(0)
+        gt_bboxes_per_image_b = (
+                                    gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
+            1, total_num_anchors
+        ) + center_radius * expanded_strides_per_image.unsqueeze(0)
+
+        c_l = x_centers_per_image - gt_bboxes_per_image_l
+        c_r = gt_bboxes_per_image_r - x_centers_per_image
+        c_t = y_centers_per_image - gt_bboxes_per_image_t
+        c_b = gt_bboxes_per_image_b - y_centers_per_image
+        center_deltas = torch.stack([c_l, c_t, c_r, c_b], 2)
+        is_in_centers = center_deltas.min(dim=-1).values > 0.0
+        is_in_centers_all = is_in_centers.sum(dim=0) > 0
+
+        # in boxes and in centers
+        is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all
+
+        is_in_boxes_and_center = (
+                is_in_boxes[:, is_in_boxes_anchor]
+                & is_in_centers[:, is_in_boxes_anchor])
+        return is_in_boxes_anchor, is_in_boxes_and_center
+
+    def dynamic_k_matching(self, cost, pair_wise_ious, gt_classes, num_gt,
+                           fg_mask):
+
+        # Dynamic K
+        # ---------------------------------------------------------------
+        matching_matrix = torch.zeros_like(cost, dtype=torch.uint8)
+
+        ious_in_boxes_matrix = pair_wise_ious
+        n_candidate_k = min(10, ious_in_boxes_matrix.size(1))
+
+        topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=1)
+        dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)
+        dynamic_ks = dynamic_ks.tolist()
+
+        for gt_idx in range(num_gt):
+            _, pos_idx = torch.topk(
+                cost[gt_idx], k=dynamic_ks[gt_idx], largest=False)
+            matching_matrix[gt_idx][pos_idx] = 1
+
+        del topk_ious, dynamic_ks, pos_idx
+
+        anchor_matching_gt = matching_matrix.sum(0)
+        if (anchor_matching_gt > 1).sum() > 0:
+            _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
+            matching_matrix[:, anchor_matching_gt > 1] *= 0
+            matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1
+        fg_mask_inboxes = matching_matrix.sum(0) > 0
+        num_fg = fg_mask_inboxes.sum().item()
+
+        fg_mask[fg_mask.clone()] = fg_mask_inboxes
+
+        matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
+        gt_matched_classes = gt_classes[matched_gt_inds]
+
+        pred_ious_this_matching = (matching_matrix *
+                                   pair_wise_ious).sum(0)[fg_mask_inboxes]
+
+        return num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds
+
+
+if __name__ == '__main__':
+    head = PPYOLOEHead(80, 0.5, in_channels=[256, 512, 1024], ).cuda()
+    head.training = False
+    fpn_out = (
+        torch.randn(1, 128, 80, 80).cuda(), torch.randn(1, 256, 40, 40).cuda(), torch.randn(1, 512, 20, 20).cuda())
+    summary(head, fpn_out)
diff --git a/easycv/models/detection/yolox/tood_head.py b/easycv/models/detection/yolox/tood_head.py
index 0477530a..4c7deae0 100644
--- a/easycv/models/detection/yolox/tood_head.py
+++ b/easycv/models/detection/yolox/tood_head.py
@@ -27,7 +27,7 @@ class TaskDecomposition(nn.Module):
 
     def __init__(self,
                  feat_channels,
-                 stacked_convs,
+                 stacked_convs=6,
                  la_down_rate=8,
                  conv_cfg=None,
                  norm_cfg=None):
@@ -99,6 +99,8 @@ def __init__(self,
                  obj_loss_type='l1',
                  reg_loss_type='iou',
                  stacked_convs=6,
+                 la_down_rate=8,
+                 conv_layers=2,
                  conv_cfg=None,
                  norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
                  ):
@@ -148,40 +150,64 @@ def __init__(self,
                     stride=1,
                     act=act,
                 ))
-            self.cls_convs.append(
-                nn.Sequential(*[
-                    Conv(
-                        in_channels=int(256 * width),
-                        out_channels=int(256 * width),
-                        ksize=3,
-                        stride=1,
-                        act=act,
-                    ),
-                    Conv(
-                        in_channels=int(256 * width),
-                        out_channels=int(256 * width),
-                        ksize=3,
-                        stride=1,
-                        act=act,
-                    ),
-                ]))
-            self.reg_convs.append(
-                nn.Sequential(*[
-                    Conv(
-                        in_channels=int(256 * width),
-                        out_channels=int(256 * width),
-                        ksize=3,
-                        stride=1,
-                        act=act,
-                    ),
-                    Conv(
-                        in_channels=int(256 * width),
-                        out_channels=int(256 * width),
-                        ksize=3,
-                        stride=1,
-                        act=act,
-                    ),
-                ]))
+
+            if conv_layers==2:
+                self.cls_convs.append(
+                    nn.Sequential(*[
+                        Conv(
+                            in_channels=int(256 * width),
+                            out_channels=int(256 * width),
+                            ksize=3,
+                            stride=1,
+                            act=act,
+                        ),
+                        Conv(
+                            in_channels=int(256 * width),
+                            out_channels=int(256 * width),
+                            ksize=3,
+                            stride=1,
+                            act=act,
+                        ),
+                    ]))
+                self.reg_convs.append(
+                    nn.Sequential(*[
+                        Conv(
+                            in_channels=int(256 * width),
+                            out_channels=int(256 * width),
+                            ksize=3,
+                            stride=1,
+                            act=act,
+                        ),
+                        Conv(
+                            in_channels=int(256 * width),
+                            out_channels=int(256 * width),
+                            ksize=3,
+                            stride=1,
+                            act=act,
+                        ),
+                    ]))
+            elif conv_layers==1:
+                self.cls_convs.append(
+                    nn.Sequential(*[
+                        Conv(
+                            in_channels=int(256 * width),
+                            out_channels=int(256 * width),
+                            ksize=3,
+                            stride=1,
+                            act=act,
+                        )
+                    ]))
+                self.reg_convs.append(
+                    nn.Sequential(*[
+                        Conv(
+                            in_channels=int(256 * width),
+                            out_channels=int(256 * width),
+                            ksize=3,
+                            stride=1,
+                            act=act,
+                        )
+                    ]))
+
             self.cls_preds.append(
                 nn.Conv2d(
                     in_channels=int(256 * width),
@@ -209,12 +235,12 @@ def __init__(self,
             self.cls_decomps.append(
                 TaskDecomposition(self.feat_channels,
                                   self.stacked_convs,
-                                  self.stacked_convs * 8,
+                                  self.stacked_convs * la_down_rate,
                                   self.conv_cfg, self.norm_cfg))
             self.reg_decomps.append(
                 TaskDecomposition(self.feat_channels,
                                   self.stacked_convs,
-                                  self.stacked_convs * 8,
+                                  self.stacked_convs * la_down_rate,
                                   self.conv_cfg, self.norm_cfg)
             )
 
diff --git a/easycv/models/detection/yolox/yolo_pafpn.py b/easycv/models/detection/yolox/yolo_pafpn.py
index e13f0a10..145d88f5 100644
--- a/easycv/models/detection/yolox/yolo_pafpn.py
+++ b/easycv/models/detection/yolox/yolo_pafpn.py
@@ -4,9 +4,17 @@
 import torch.nn as nn
 
 from easycv.models.backbones.darknet import CSPDarknet
-from easycv.models.backbones.network_blocks import BaseConv, CSPLayer, DWConv
+from easycv.models.backbones.efficientrep import EfficientRep
+from easycv.models.backbones.network_blocks import BaseConv, CSPLayer, DWConv, GSConv, VoVGSCSP
 from .attention import SE, CBAM, ECA
-from .ASFF import ASFF
+# from .ASFF import ASFF
+import math
+
+
+def make_divisible(x, divisor):
+    # Upward revision the value x to make it evenly divisible by the divisor.
+    return math.ceil(x / divisor) * divisor
+
 
 class YOLOPAFPN(nn.Module):
     """
@@ -21,77 +29,209 @@ def __init__(
         in_channels=[256, 512, 1024],
         depthwise=False,
         act='silu',
+        asff_channel = 16,
         use_att=None,
-        spp_type='spp'
+        expand_kernel=3,
+        down_rate=32,
+        use_dconv=False,
+        use_expand=True,
+        spp_type='spp',
+        backbone = "CSPDarknet",
+        neck = 'yolo',
+        neck_mode = 'all'
     ):
         super().__init__()
-        self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act,spp_type=spp_type)
+        # self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act,spp_type=spp_type)
+        self.backbone_name = backbone
+        if backbone == "CSPDarknet":
+            self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act)
+        else:
+            depth_mul = depth
+            width_mul = width
+            num_repeat_backbone = [1, 6, 12, 18, 6]
+            channels_list_backbone = [64, 128, 256, 512, 1024]
+            num_repeat_neck = [12, 12, 12, 12]
+            channels_list_neck = [256, 128, 128, 256, 256, 512]
+
+            channels = 3
+
+            num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i) for i in
+                          (num_repeat_backbone + num_repeat_neck)]
+
+            channels_list = [make_divisible(i * width_mul, 8) for i in (channels_list_backbone + channels_list_neck)]
+            self.backbone = EfficientRep(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat)
+
+
         self.in_features = in_features
         self.in_channels = in_channels
         Conv = DWConv if depthwise else BaseConv
 
-        self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
-        self.lateral_conv0 = BaseConv(
-            int(in_channels[2] * width),
-            int(in_channels[1] * width),
-            1,
-            1,
-            act=act)
-        self.C3_p4 = CSPLayer(
-            int(2 * in_channels[1] * width),
-            int(in_channels[1] * width),
-            round(3 * depth),
-            False,
-            depthwise=depthwise,
-            act=act)  # cat
-
-        self.reduce_conv1 = BaseConv(
-            int(in_channels[1] * width),
-            int(in_channels[0] * width),
-            1,
-            1,
-            act=act)
-        self.C3_p3 = CSPLayer(
-            int(2 * in_channels[0] * width),
-            int(in_channels[0] * width),
-            round(3 * depth),
-            False,
-            depthwise=depthwise,
-            act=act)
-
-        # bottom-up conv
-        self.bu_conv2 = Conv(
-            int(in_channels[0] * width),
-            int(in_channels[0] * width),
-            3,
-            2,
-            act=act)
-        self.C3_n3 = CSPLayer(
-            int(2 * in_channels[0] * width),
-            int(in_channels[1] * width),
-            round(3 * depth),
-            False,
-            depthwise=depthwise,
-            act=act)
-
-        # bottom-up conv
-        self.bu_conv1 = Conv(
-            int(in_channels[1] * width),
-            int(in_channels[1] * width),
-            3,
-            2,
-            act=act)
-        self.C3_n4 = CSPLayer(
-            int(2 * in_channels[1] * width),
-            int(in_channels[2] * width),
-            round(3 * depth),
-            False,
-            depthwise=depthwise,
-            act=act)
+        self.neck = neck
+        self.neck_mode = neck_mode
+        if neck =='yolo':
+            self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
+            self.lateral_conv0 = BaseConv(
+                int(in_channels[2] * width),
+                int(in_channels[1] * width),
+                1,
+                1,
+                act=act)
+            self.C3_p4 = CSPLayer(
+                int(2 * in_channels[1] * width),
+                int(in_channels[1] * width),
+                round(3 * depth),
+                False,
+                depthwise=depthwise,
+                act=act)  # cat
+
+            self.reduce_conv1 = BaseConv(
+                int(in_channels[1] * width),
+                int(in_channels[0] * width),
+                1,
+                1,
+                act=act)
+            self.C3_p3 = CSPLayer(
+                int(2 * in_channels[0] * width),
+                int(in_channels[0] * width),
+                round(3 * depth),
+                False,
+                depthwise=depthwise,
+                act=act)
+
+            # bottom-up conv
+            self.bu_conv2 = Conv(
+                int(in_channels[0] * width),
+                int(in_channels[0] * width),
+                3,
+                2,
+                act=act)
+            self.C3_n3 = CSPLayer(
+                int(2 * in_channels[0] * width),
+                int(in_channels[1] * width),
+                round(3 * depth),
+                False,
+                depthwise=depthwise,
+                act=act)
+
+            # bottom-up conv
+            self.bu_conv1 = Conv(
+                int(in_channels[1] * width),
+                int(in_channels[1] * width),
+                3,
+                2,
+                act=act)
+            self.C3_n4 = CSPLayer(
+                int(2 * in_channels[1] * width),
+                int(in_channels[2] * width),
+                round(3 * depth),
+                False,
+                depthwise=depthwise,
+                act=act)
+        else:
+            # gsconv
+            self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
+            self.gsconv1 = GSConv(
+                int(in_channels[2] * width),
+                int(in_channels[1] * width),
+                1,
+                1,
+                act=act)
+
+            self.gsconv2 = GSConv(
+                int(in_channels[1] * width),
+                int(in_channels[0] * width),
+                1,
+                1,
+                act=act
+            )
+
+            self.gsconv4 = GSConv(
+                int(in_channels[0] * width),
+                int(in_channels[0] * width),
+                3,
+                2,
+                act=act
+            )
+
+            self.gsconv5 = GSConv(
+                int(in_channels[1] * width),
+                int(in_channels[1] * width),
+                3,
+                2,
+                act=act
+            )
+
+            if self.neck_mode == 'all':
+                self.vovGSCSP1 = VoVGSCSP(
+                    int(2 * in_channels[1] * width),
+                    int(in_channels[1] * width),
+                    round(3 * depth),
+                    False,
+                )
+
+                self.gsconv3 = GSConv(
+                    int(2 * in_channels[0] * width),
+                    int(2 * in_channels[0] * width),
+                    1,
+                    1,
+                    act=act
+                )
+                self.vovGSCSP2 = VoVGSCSP(
+                    int(2 * in_channels[0] * width),
+                    int(in_channels[0] * width),
+                    round(3 * depth),
+                    False,
+                )
+
+                self.vovGSCSP3 = VoVGSCSP(
+                    int(2 * in_channels[0] * width),
+                    int(in_channels[1] * width),
+                    round(3 * depth),
+                    False,
+                )
+
+                self.vovGSCSP4 = VoVGSCSP(
+                    int(2 * in_channels[1] * width),
+                    int(in_channels[2] * width),
+                    round(3 * depth),
+                    False,
+                )
+            else:
+                self.C3_p4 = CSPLayer(
+                    int(2 * in_channels[1] * width),
+                    int(in_channels[1] * width),
+                    round(3 * depth),
+                    False,
+                    depthwise=depthwise,
+                    act=act)  # cat
+
+                self.C3_p3 = CSPLayer(
+                    int(2 * in_channels[0] * width),
+                    int(in_channels[0] * width),
+                    round(3 * depth),
+                    False,
+                    depthwise=depthwise,
+                    act=act)
+
+                self.C3_n3 = CSPLayer(
+                    int(2 * in_channels[0] * width),
+                    int(in_channels[1] * width),
+                    round(3 * depth),
+                    False,
+                    depthwise=depthwise,
+                    act=act)
+
+                self.C3_n4 = CSPLayer(
+                    int(2 * in_channels[1] * width),
+                    int(in_channels[2] * width),
+                    round(3 * depth),
+                    False,
+                    depthwise=depthwise,
+                    act=act)
 
         self.use_att=use_att
 
-        if self.use_att!=None and self.use_att!='ASFF':
+        if self.use_att!=None and self.use_att!='ASFF' and self.use_att!='ASFF_sim':
             # add attention layer
             if self.use_att=="CBAM":
                 ATT = CBAM
@@ -106,10 +246,18 @@ def __init__(
             self.att_2 = ATT(int(in_channels[1] * width))  # 对应dark4输出的512维度通道
             self.att_3 = ATT(int(in_channels[0] * width))  # 对应dark3输出的256维度通道
 
-        if self.use_att=='ASFF':
-            self.asff_1 = ASFF(level=0, multiplier=width)
-            self.asff_2 = ASFF(level=1, multiplier=width)
-            self.asff_3 = ASFF(level=2, multiplier=width)
+        if self.use_att=='ASFF' or self.use_att=='ASFF_sim':
+            if self.use_att=='ASFF':
+                from .ASFF import ASFF
+                self.asff_1 = ASFF(level=0, multiplier=width, asff_channel=asff_channel, act=act)
+                self.asff_2 = ASFF(level=1, multiplier=width, asff_channel=asff_channel, act=act)
+                self.asff_3 = ASFF(level=2, multiplier=width, asff_channel=asff_channel, act=act)
+            else:
+                from .ASFF_sim import ASFF
+                self.asff_1 = ASFF(level=0, multiplier=width, asff_channel=asff_channel, act=act,expand_kernel=expand_kernel, down_rate = down_rate, use_dconv = use_dconv, use_expand = use_expand)
+                self.asff_2 = ASFF(level=1, multiplier=width, asff_channel=asff_channel, act=act,expand_kernel=expand_kernel, down_rate = down_rate, use_dconv = use_dconv, use_expand = use_expand)
+                self.asff_3 = ASFF(level=2, multiplier=width, asff_channel=asff_channel, act=act,expand_kernel=expand_kernel, down_rate = down_rate, use_dconv = use_dconv, use_expand = use_expand)
+
 
     def forward(self, input):
         """
@@ -121,37 +269,78 @@ def forward(self, input):
         """
 
         #  backbone
-        out_features = self.backbone(input)
-        features = [out_features[f] for f in self.in_features]
-        [x2, x1, x0] = features
+        # out_features = self.backbone(input)
+        # features = [out_features[f] for f in self.in_features]
+        # [x2, x1, x0] = features
+        #  backbone
+        if self.backbone_name == "CSPDarknet":
+            out_features = self.backbone(input)
+            features = [out_features[f] for f in self.in_features]
+            [x2, x1, x0] = features
+        else:
+            features = self.backbone(input)
+            [x2, x1, x0] = features
 
         # add attention
-        if self.use_att!=None and self.use_att!='ASFF':
+        if self.use_att!=None and self.use_att!='ASFF' and self.use_att!='ASFF_sim':
             x0 = self.att_1(x0)
             x1 = self.att_2(x1)
             x2 = self.att_3(x2)
 
-        fpn_out0 = self.lateral_conv0(x0)  # 1024->512/32
-        f_out0 = self.upsample(fpn_out0)  # 512/16
-        f_out0 = torch.cat([f_out0, x1], 1)  # 512->1024/16
-        f_out0 = self.C3_p4(f_out0)  # 1024->512/16
+        if self.neck =='yolo':
+            fpn_out0 = self.lateral_conv0(x0)  # 1024->512/32
+            f_out0 = self.upsample(fpn_out0)  # 512/16
+            f_out0 = torch.cat([f_out0, x1], 1)  # 512->1024/16
+            f_out0 = self.C3_p4(f_out0)  # 1024->512/16
+
+            fpn_out1 = self.reduce_conv1(f_out0)  # 512->256/16
+            f_out1 = self.upsample(fpn_out1)  # 256/8
+            f_out1 = torch.cat([f_out1, x2], 1)  # 256->512/8
+            pan_out2 = self.C3_p3(f_out1)  # 512->256/8
+
+            p_out1 = self.bu_conv2(pan_out2)  # 256->256/16
+            p_out1 = torch.cat([p_out1, fpn_out1], 1)  # 256->512/16
+            pan_out1 = self.C3_n3(p_out1)  # 512->512/16
+
+            p_out0 = self.bu_conv1(pan_out1)  # 512->512/32
+            p_out0 = torch.cat([p_out0, fpn_out0], 1)  # 512->1024/32
+            pan_out0 = self.C3_n4(p_out0)  # 1024->1024/32
+        else:
+            # gsconv
+            fpn_out0 = self.gsconv1(x0)  # 1024->512/32
+            f_out0 = self.upsample(fpn_out0)  # 512/16
+            f_out0 = torch.cat([f_out0, x1], 1)  # 512->1024/16
+            if self.neck_mode == 'all':
+                f_out0 = self.vovGSCSP1(f_out0)  # 1024->512/16
+            else:
+                f_out0 = self.C3_p4(f_out0)
 
-        fpn_out1 = self.reduce_conv1(f_out0)  # 512->256/16
-        f_out1 = self.upsample(fpn_out1)  # 256/8
-        f_out1 = torch.cat([f_out1, x2], 1)  # 256->512/8
-        pan_out2 = self.C3_p3(f_out1)  # 512->256/8
+            fpn_out1 = self.gsconv2(f_out0)  # 512->256/16
+            f_out1 = self.upsample(fpn_out1)  # 256/8
+            f_out1 = torch.cat([f_out1, x2], 1)  # 256->512/8
+            if self.neck_mode == 'all':
+                f_out1 = self.gsconv3(f_out1)
+                pan_out2 = self.vovGSCSP2(f_out1)  # 512->256/8
+            else:
+                pan_out2 = self.C3_p3(f_out1)  # 512->256/8
 
-        p_out1 = self.bu_conv2(pan_out2)  # 256->256/16
-        p_out1 = torch.cat([p_out1, fpn_out1], 1)  # 256->512/16
-        pan_out1 = self.C3_n3(p_out1)  # 512->512/16
+            p_out1 = self.gsconv4(pan_out2)  # 256->256/16
+            p_out1 = torch.cat([p_out1, fpn_out1], 1)  # 256->512/16
+            if self.neck_mode == 'all':
+                pan_out1 = self.vovGSCSP3(p_out1)  # 512->512/16
+            else:
+                pan_out1 = self.C3_n3(p_out1)  # 512->512/16
 
-        p_out0 = self.bu_conv1(pan_out1)  # 512->512/32
-        p_out0 = torch.cat([p_out0, fpn_out0], 1)  # 512->1024/32
-        pan_out0 = self.C3_n4(p_out0)  # 1024->1024/32
+            p_out0 = self.gsconv5(pan_out1)  # 512->512/32
+            p_out0 = torch.cat([p_out0, fpn_out0], 1)  # 512->1024/32
+            if self.neck_mode == 'all':
+                pan_out0 = self.vovGSCSP4(p_out0)  # 1024->1024/32
+            else:
+                pan_out0 = self.C3_n4(p_out0)  # 1024->1024/32
 
         outputs = (pan_out2, pan_out1, pan_out0)
 
-        if self.use_att == 'ASFF':
+        if self.use_att == 'ASFF' or self.use_att=='ASFF_sim':
             pan_out0 = self.asff_1(outputs)
             pan_out1 = self.asff_2(outputs)
             pan_out2 = self.asff_3(outputs)
diff --git a/easycv/models/detection/yolox/yolox.py b/easycv/models/detection/yolox/yolox.py
index 492f7870..11d6482d 100644
--- a/easycv/models/detection/yolox/yolox.py
+++ b/easycv/models/detection/yolox/yolox.py
@@ -9,6 +9,7 @@
 from easycv.models.base import BaseModel
 from easycv.models.builder import MODELS
 from easycv.models.detection.utils import postprocess
+# from .ppyoloe_head import PPYOLOEHead
 from .yolo_head import YOLOXHead
 from .yolo_pafpn import YOLOPAFPN
 from .tood_head import TOODHead
@@ -20,6 +21,13 @@ def init_yolo(M):
             m.eps = 1e-3
             m.momentum = 0.03
 
+def cxcywh2xyxy(bboxes):
+    bboxes[..., 0] = bboxes[..., 0] - bboxes[..., 2] * 0.5   # x1
+    bboxes[..., 1] = bboxes[..., 1] - bboxes[..., 3] * 0.5
+    bboxes[..., 2] = bboxes[..., 0] + bboxes[..., 2]
+    bboxes[..., 3] = bboxes[..., 1] + bboxes[..., 3]
+    return bboxes
+
 
 @MODELS.register_module
 class YOLOX(BaseModel):
@@ -50,6 +58,18 @@ def __init__(self,
                  reg_loss_type: str = 'l1',
                  spp_type: str = 'spp',
                  head_type: str = 'yolox',
+                 neck: str = 'yolo',
+                 neck_mode: str = 'all',
+                 act: str = 'silu',
+                 asff_channel: int = 16,
+                 stacked_convs: int = 6,
+                 la_down_rate: int = 8,
+                 conv_layers: int = 2,
+                 backbone="CSPDarknet",
+                 expand_kernel=3,
+                 down_rate=32,
+                 use_dconv=False,
+                 use_expand=True,
                  pretrained: str = None):
         super(YOLOX, self).__init__()
         assert model_type in self.param_map, f'invalid model_type for yolox {model_type}, valid ones are {list(self.param_map.keys())}'
@@ -58,20 +78,43 @@ def __init__(self,
         depth = self.param_map[model_type][0]
         width = self.param_map[model_type][1]
 
-        self.backbone = YOLOPAFPN(depth, width, in_channels=in_channels, use_att=use_att, spp_type=spp_type)
-
-        if head_type=='yolox':
-            self.head = YOLOXHead(num_classes, width, in_channels=in_channels, obj_loss_type=obj_loss_type, reg_loss_type=reg_loss_type)
-        elif head_type=='tood':
-            self.head = TOODHead(num_classes, width, in_channels=in_channels, obj_loss_type=obj_loss_type, reg_loss_type=reg_loss_type)
+        self.backbone = YOLOPAFPN(depth, width, in_channels=in_channels, asff_channel=asff_channel, act=act, use_att=use_att, spp_type=spp_type, backbone = backbone, neck = neck, neck_mode=neck_mode, expand_kernel=expand_kernel, down_rate = down_rate, use_dconv = use_dconv, use_expand = use_expand)
+
+        self.head_type = head_type
+        if head_type == 'yolox':
+            self.head = YOLOXHead(num_classes, width, in_channels=in_channels, act=act, obj_loss_type=obj_loss_type, reg_loss_type=reg_loss_type)
+            self.head.initialize_biases(1e-2)
+        elif head_type == 'tood':
+            self.head = TOODHead(num_classes, width, in_channels=in_channels, act=act, obj_loss_type=obj_loss_type, reg_loss_type=reg_loss_type, stacked_convs=stacked_convs,
+                 la_down_rate=la_down_rate,
+                 conv_layers=conv_layers)
+            self.head.initialize_biases(1e-2)
+        elif head_type == 'ppyoloe':
+            self.head = PPYOLOEHead(
+                in_channels=in_channels,
+                width=width,
+                strides=[8, 16, 32],
+                static_assigner_epoch=4,
+                use_varifocal_loss=True,
+                # eval_input_size=self.test_size,
+                eval_input_size=None,
+                loss_weight={
+                    'class': 1.0,
+                    'iou': 2.5,
+                    'dfl': 0.5
+                },
+                # static_assigner=ATSSAssigner(self.atss_topk, num_classes=self.num_classes),
+                # assigner=TaskAlignedAssigner(topk=self.tal_topk, alpha=1.0, beta=6.0)
+            )
 
         self.apply(init_yolo)  # init_yolo(self)
-        self.head.initialize_biases(1e-2)
+
 
         self.num_classes = num_classes
         self.test_conf = test_conf
         self.nms_thre = nms_thre
         self.test_size = test_size
+        self.epoch_counter = 0
 
     def forward_train(self,
                       img: Tensor,
@@ -94,26 +137,52 @@ def forward_train(self,
         targets = torch.cat([gt_labels, gt_bboxes], dim=2)
 
 
+        if self.head_type!='ppyoloe':
+            loss, iou_loss, conf_loss, cls_loss, l1_loss, num_fg = self.head(
+                fpn_outs, targets, img)
+
+            outputs = {
+                'total_loss':
+                    loss,
+                'iou_l':
+                    iou_loss,
+                'conf_l':
+                    conf_loss,
+                'cls_l':
+                    cls_loss,
+                'img_h':
+                    torch.tensor(img_metas[0]['img_shape'][0],
+                                 device=loss.device).float(),
+                'img_w':
+                    torch.tensor(img_metas[0]['img_shape'][1],
+                                 device=loss.device).float()
+            }
+
+        else:
+            targets[..., 1:] = cxcywh2xyxy(targets[..., 1:])
+            extra_info = {}
+            extra_info['epoch'] = self.epoch_counter
+
+            print(extra_info['epoch'])
+            yolo_losses = self.head(fpn_outs, targets, extra_info)
+
+            outputs = {
+                'total_loss':
+                    yolo_losses['total_loss'],
+                'iou_l':
+                    yolo_losses['loss_iou'],
+                'conf_l':
+                    yolo_losses['loss_dfl'],
+                'cls_l':
+                    yolo_losses['loss_cls'],
+                'img_h':
+                    torch.tensor(img_metas[0]['img_shape'][0],
+                                 device=yolo_losses['total_loss'].device).float(),
+                'img_w':
+                    torch.tensor(img_metas[0]['img_shape'][1],
+                                 device=yolo_losses['total_loss'].device).float()
+            }
 
-        loss, iou_loss, conf_loss, cls_loss, l1_loss, num_fg = self.head(
-            fpn_outs, targets, img)
-
-        outputs = {
-            'total_loss':
-            loss,
-            'iou_l':
-            iou_loss,
-            'conf_l':
-            conf_loss,
-            'cls_l':
-            cls_loss,
-            'img_h':
-            torch.tensor(img_metas[0]['img_shape'][0],
-                         device=loss.device).float(),
-            'img_w':
-            torch.tensor(img_metas[0]['img_shape'][1],
-                         device=loss.device).float()
-        }
         return outputs
 
     def forward_test(self, img: Tensor, img_metas=None) -> Tensor:
diff --git a/easycv/runner/ev_runner.py b/easycv/runner/ev_runner.py
index c1e72f23..7b2a396c 100644
--- a/easycv/runner/ev_runner.py
+++ b/easycv/runner/ev_runner.py
@@ -88,6 +88,7 @@ def train(self, data_loader, **kwargs):
         time.sleep(2)  # Prevent possible deadlock during epoch transition
         for i, data_batch in enumerate(self.data_loader):
             self._inner_iter = i
+
             self.call_hook('before_train_iter')
             # only in amp from pytorch 1.6 or later, we should use amp.autocast
             if self.fp16_enable and LooseVersion(

From 2e872573f6e9f468678fd05ed934d03001a0ed8b Mon Sep 17 00:00:00 2001
From: "yanhaiqiang.yhq" <yanhaiqiang.yhq@alibaba-inc.com>
Date: Wed, 27 Jul 2022 17:37:33 +0800
Subject: [PATCH 08/69] fix config

---
 configs/detection/yolox/yolox_yolo6_gsconv_asff_sim.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/configs/detection/yolox/yolox_yolo6_gsconv_asff_sim.py b/configs/detection/yolox/yolox_yolo6_gsconv_asff_sim.py
index ba98448a..73ed1570 100644
--- a/configs/detection/yolox/yolox_yolo6_gsconv_asff_sim.py
+++ b/configs/detection/yolox/yolox_yolo6_gsconv_asff_sim.py
@@ -49,7 +49,7 @@
 
 # dataset settings
 # data_root = 'data/coco/'
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = '/root/database/coco/'
 # data_root = '/mnt/data/nas/data/detection/coco/'
 # data_root = '/cpfs01/shared/public/dataset/coco2017/'
 
@@ -123,7 +123,7 @@
     label_padding=False)
 
 data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
 
 # additional hooks
 interval = 10
@@ -167,7 +167,7 @@
 
 # optimizer
 optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+    type='SGD', lr=0.005, momentum=0.9, weight_decay=5e-4, nesterov=True)
 optimizer_config = {}
 
 # learning policy

From 5bc6e00b23278939e14d7fde697f9b3c76cf8c15 Mon Sep 17 00:00:00 2001
From: "yanhaiqiang.yhq" <yanhaiqiang.yhq@alibaba-inc.com>
Date: Fri, 5 Aug 2022 04:25:32 +0000
Subject: [PATCH 09/69] fix repvgg_yolox_backbone refer2 repvgg

---
 easycv/models/backbones/efficientrep.py       |  11 +-
 .../models/backbones/repvgg_yolox_backbone.py | 334 ++++++++++++++++++
 easycv/models/backbones/yolo6_blocks.py       |  16 -
 .../detection/detectors/yolox/ASFF_sim.py     |   3 +-
 4 files changed, 343 insertions(+), 21 deletions(-)
 create mode 100644 easycv/models/backbones/repvgg_yolox_backbone.py

diff --git a/easycv/models/backbones/efficientrep.py b/easycv/models/backbones/efficientrep.py
index 51d36e16..c3ae4a1f 100644
--- a/easycv/models/backbones/efficientrep.py
+++ b/easycv/models/backbones/efficientrep.py
@@ -1,13 +1,9 @@
 from torch import nn
 from easycv.models.backbones.yolo6_blocks import RepVGGBlock, RepBlock, SimSPPF
-from torchsummaryX import summary
 import math
 import torch
 
 
-def make_divisible(x, divisor):
-    # Upward revision the value x to make it evenly divisible by the divisor.
-    return math.ceil(x / divisor) * divisor
 
 
 class EfficientRep(nn.Module):
@@ -110,6 +106,9 @@ def forward(self, x):
         return tuple(outputs)
 
 if __name__=='__main__':
+
+    from torchsummaryX import summary
+
     depth_mul = 0.33
     width_mul = 0.5
     num_repeat_backbone = [1, 6, 12, 18, 6]
@@ -122,6 +121,10 @@ def forward(self, x):
     num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i) for i in
                   (num_repeat_backbone + num_repeat_neck)]
 
+    def make_divisible(x, divisor):
+        # Upward revision the value x to make it evenly divisible by the divisor.
+        return math.ceil(x / divisor) * divisor
+
     channels_list = [make_divisible(i * width_mul, 8) for i in (channels_list_backbone + channels_list_neck)]
     model = EfficientRep(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat)
     for layer in model.modules():
diff --git a/easycv/models/backbones/repvgg_yolox_backbone.py b/easycv/models/backbones/repvgg_yolox_backbone.py
new file mode 100644
index 00000000..263ce6b0
--- /dev/null
+++ b/easycv/models/backbones/repvgg_yolox_backbone.py
@@ -0,0 +1,334 @@
+# borrow some code from https://github.com/DingXiaoH/RepVGG/repvgg.py MIT2.0
+import torch.nn as nn
+import numpy as np
+import torch
+import copy
+import warnings
+
+
+def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
+    '''Basic cell for rep-style block, including conv and bn'''
+    result = nn.Sequential()
+    result.add_module('conv', nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
+                                                  kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False))
+    result.add_module('bn', nn.BatchNorm2d(num_features=out_channels))
+    return result
+
+class RepVGGBlock(nn.Module):
+    def __init__(self, in_channels, out_channels, kernel_size=3,
+                 stride=1, padding=1, dilation=1, groups=1, padding_mode='zeros', deploy=False, use_se=False):
+        super(RepVGGBlock, self).__init__()
+        self.deploy = deploy
+        self.groups = groups
+        self.in_channels = in_channels
+
+        assert kernel_size == 3
+        assert padding == 1
+
+        padding_11 = padding - kernel_size // 2
+
+        self.nonlinearity = nn.ReLU()
+
+        if use_se:
+            self.se = SEBlock(out_channels, internal_neurons=out_channels // 16)
+        else:
+            self.se = nn.Identity()
+
+        if deploy:
+            self.rbr_reparam = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
+                                      padding=padding, dilation=dilation, groups=groups, bias=True, padding_mode=padding_mode)
+
+        else:
+            self.rbr_identity = nn.BatchNorm2d(num_features=in_channels) if out_channels == in_channels and stride == 1 else None
+            self.rbr_dense = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups)
+            self.rbr_1x1 = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=padding_11, groups=groups)
+            print('RepVGG Block, identity = ', self.rbr_identity)
+
+
+    def forward(self, inputs):
+        if hasattr(self, 'rbr_reparam'):
+            return self.nonlinearity(self.se(self.rbr_reparam(inputs)))
+
+        if self.rbr_identity is None:
+            id_out = 0
+        else:
+            id_out = self.rbr_identity(inputs)
+
+        return self.nonlinearity(self.se(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out))
+
+
+    #   Optional. This improves the accuracy and facilitates quantization.
+    #   1.  Cancel the original weight decay on rbr_dense.conv.weight and rbr_1x1.conv.weight.
+    #   2.  Use like this.
+    #       loss = criterion(....)
+    #       for every RepVGGBlock blk:
+    #           loss += weight_decay_coefficient * 0.5 * blk.get_cust_L2()
+    #       optimizer.zero_grad()
+    #       loss.backward()
+    def get_custom_L2(self):
+        K3 = self.rbr_dense.conv.weight
+        K1 = self.rbr_1x1.conv.weight
+        t3 = (self.rbr_dense.bn.weight / ((self.rbr_dense.bn.running_var + self.rbr_dense.bn.eps).sqrt())).reshape(-1, 1, 1, 1).detach()
+        t1 = (self.rbr_1x1.bn.weight / ((self.rbr_1x1.bn.running_var + self.rbr_1x1.bn.eps).sqrt())).reshape(-1, 1, 1, 1).detach()
+
+        l2_loss_circle = (K3 ** 2).sum() - (K3[:, :, 1:2, 1:2] ** 2).sum()      # The L2 loss of the "circle" of weights in 3x3 kernel. Use regular L2 on them.
+        eq_kernel = K3[:, :, 1:2, 1:2] * t3 + K1 * t1                           # The equivalent resultant central point of 3x3 kernel.
+        l2_loss_eq_kernel = (eq_kernel ** 2 / (t3 ** 2 + t1 ** 2)).sum()        # Normalize for an L2 coefficient comparable to regular L2.
+        return l2_loss_eq_kernel + l2_loss_circle
+
+
+
+    #   This func derives the equivalent kernel and bias in a DIFFERENTIABLE way.
+    #   You can get the equivalent kernel and bias at any time and do whatever you want,
+        #   for example, apply some penalties or constraints during training, just like you do to the other models.
+    #   May be useful for quantization or pruning.
+    def get_equivalent_kernel_bias(self):
+        kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense)
+        kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1)
+        kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity)
+        return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid
+
+    def _pad_1x1_to_3x3_tensor(self, kernel1x1):
+        if kernel1x1 is None:
+            return 0
+        else:
+            return torch.nn.functional.pad(kernel1x1, [1,1,1,1])
+
+    def _fuse_bn_tensor(self, branch):
+        if branch is None:
+            return 0, 0
+        if isinstance(branch, nn.Sequential):
+            kernel = branch.conv.weight
+            running_mean = branch.bn.running_mean
+            running_var = branch.bn.running_var
+            gamma = branch.bn.weight
+            beta = branch.bn.bias
+            eps = branch.bn.eps
+        else:
+            assert isinstance(branch, nn.BatchNorm2d)
+            if not hasattr(self, 'id_tensor'):
+                input_dim = self.in_channels // self.groups
+                kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), dtype=np.float32)
+                for i in range(self.in_channels):
+                    kernel_value[i, i % input_dim, 1, 1] = 1
+                self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device)
+            kernel = self.id_tensor
+            running_mean = branch.running_mean
+            running_var = branch.running_var
+            gamma = branch.weight
+            beta = branch.bias
+            eps = branch.eps
+        std = (running_var + eps).sqrt()
+        t = (gamma / std).reshape(-1, 1, 1, 1)
+        return kernel * t, beta - running_mean * gamma / std
+
+    def switch_to_deploy(self):
+        if hasattr(self, 'rbr_reparam'):
+            return
+        kernel, bias = self.get_equivalent_kernel_bias()
+        self.rbr_reparam = nn.Conv2d(in_channels=self.rbr_dense.conv.in_channels, out_channels=self.rbr_dense.conv.out_channels,
+                                     kernel_size=self.rbr_dense.conv.kernel_size, stride=self.rbr_dense.conv.stride,
+                                     padding=self.rbr_dense.conv.padding, dilation=self.rbr_dense.conv.dilation, groups=self.rbr_dense.conv.groups, bias=True)
+        self.rbr_reparam.weight.data = kernel
+        self.rbr_reparam.bias.data = bias
+        for para in self.parameters():
+            para.detach_()
+        self.__delattr__('rbr_dense')
+        self.__delattr__('rbr_1x1')
+        if hasattr(self, 'rbr_identity'):
+            self.__delattr__('rbr_identity')
+        if hasattr(self, 'id_tensor'):
+            self.__delattr__('id_tensor')
+        self.deploy = True
+
+class ConvBNAct(nn.Module):
+    '''Normal Conv with SiLU activation'''
+    def __init__(self, in_channels, out_channels, kernel_size, stride, groups=1, bias=False, act='relu'):
+        super().__init__()
+        padding = kernel_size // 2
+        self.conv = nn.Conv2d(
+            in_channels,
+            out_channels,
+            kernel_size=kernel_size,
+            stride=stride,
+            padding=padding,
+            groups=groups,
+            bias=bias,
+        )
+        self.bn = nn.BatchNorm2d(out_channels)
+
+        if act =='relu':
+            self.act=nn.ReLU()
+        if act == 'silu':
+            self.act = nn.SiLU()
+
+    def forward(self, x):
+        return self.act(self.bn(self.conv(x)))
+
+    def forward_fuse(self, x):
+        return self.act(self.conv(x))
+
+class ConvBNReLU(ConvBNAct):
+    def __init__(self, in_channels, out_channels, kernel_size, stride, groups=1, bias=False):
+        super().__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, groups=groups, bias=bias, act='relu')
+
+class ConvBNSiLU(ConvBNAct):
+    def __init__(self, in_channels, out_channels, kernel_size, stride, groups=1, bias=False):
+        super().__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, groups=groups, bias=bias, act='silu')
+
+class MT_SPPF(nn.Module):
+    '''Simplified SPPF with ReLU activation'''
+    def __init__(self, in_channels, out_channels, kernel_size=5):
+        super().__init__()
+        c_ = in_channels // 2  # hidden channels
+        self.cv1 = ConvBNReLU(in_channels, c_, 1, 1)
+        self.cv2 = ConvBNReLU(c_ * 4, out_channels, 1, 1)
+        self.maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=1, padding=kernel_size // 2)
+
+    def forward(self, x):
+        x = self.cv1(x)
+        with warnings.catch_warnings():
+            warnings.simplefilter('ignore')
+            y1 = self.maxpool(x)
+            y2 = self.maxpool(y1)
+            return self.cv2(torch.cat([x, y1, y2, self.maxpool(y2)], 1))
+
+class RepVGGYOLOX(nn.Module):
+    ''' RepVGG with MT_SPPF to build a efficient Yolox backbone
+    '''
+
+    def __init__(
+        self,
+        in_channels=3,
+        channels_list=None,
+        num_repeats=None
+    ):
+        super().__init__()
+
+        assert channels_list is not None
+        assert num_repeats is not None
+
+        self.stage0 = RepVGGBlock(
+            in_channels=in_channels,
+            out_channels=channels_list[0],
+            kernel_size=3,
+            stride=2
+        )
+        self.stage1 = self._make_stage(channels_list[0], channels_list[1], num_repeats[1])
+        self.stage2 = self._make_stage(channels_list[1], channels_list[2], num_repeats[2])
+        self.stage3 = self._make_stage(channels_list[2], channels_list[3], num_repeats[3])
+        self.stage4 = self._make_stage(channels_list[3], channels_list[4], num_repeats[4], add_ppf=True)
+
+        # self.ERBlock_2 = nn.Sequential(
+        #     RepVGGBlock(
+        #         in_channels=channels_list[0],
+        #         out_channels=channels_list[1],
+        #         kernel_size=3,
+        #         stride=2
+        #     ),
+        #     RepBlock(
+        #         in_channels=channels_list[1],
+        #         out_channels=channels_list[1],
+        #         n=num_repeats[1]
+        #     )
+        # )
+
+        # self.ERBlock_3 = nn.Sequential(
+        #     RepVGGBlock(
+        #         in_channels=channels_list[1],
+        #         out_channels=channels_list[2],
+        #         kernel_size=3,
+        #         stride=2
+        #     ),
+        #     RepBlock(
+        #         in_channels=channels_list[2],
+        #         out_channels=channels_list[2],
+        #         n=num_repeats[2],
+        #     )
+        # )
+
+        # self.ERBlock_4 = nn.Sequential(
+        #     RepVGGBlock(
+        #         in_channels=channels_list[2],
+        #         out_channels=channels_list[3],
+        #         kernel_size=3,
+        #         stride=2
+        #     ),
+        #     RepBlock(
+        #         in_channels=channels_list[3],
+        #         out_channels=channels_list[3],
+        #         n=num_repeats[3]
+        #     )
+        # )
+        # self.ERBlock_5 = nn.Sequential(
+        #     RepVGGBlock(
+        #         in_channels=channels_list[3],
+        #         out_channels=channels_list[4],
+        #         kernel_size=3,
+        #         stride=2
+        #     ),
+        #     RepBlock(
+        #         in_channels=channels_list[4],
+        #         out_channels=channels_list[4],
+        #         n=num_repeats[4]
+        #     ),
+        #     SimSPPF(
+        #         in_channels=channels_list[4],
+        #         out_channels=channels_list[4],
+        #         kernel_size=5
+        #     )
+        # )
+
+    def _make_stage(self, in_channels, out_channels, repeat, stride=2,  add_ppf=False):
+        blocks= []
+        blocks.append(RepVGGBlock(in_channels, out_channels, kernel_size=3, stride=stride))
+        for i in range(repeat):
+            blocks.append(RepVGGBlock(out_channels, out_channels))
+        if add_ppf:
+            blocks.append(MT_SPPF(out_channels, out_channels, kernel_size=5))
+
+        return nn.Sequential(*blocks)
+
+    def forward(self, x):
+        outputs = []
+        x = self.stage0(x)
+        x = self.stage1(x)
+        x = self.stage2(x)
+        outputs.append(x)
+        x = self.stage3(x)
+        outputs.append(x)
+        x = self.stage4(x)
+        outputs.append(x)
+        return tuple(outputs)
+
+if __name__=='__main__':
+
+    from torchsummaryX import summary
+    import math
+
+    depth_mul = 0.33
+    width_mul = 0.5
+    num_repeat_backbone = [1, 6, 12, 18, 6]
+    channels_list_backbone = [64, 128, 256, 512, 1024]
+    num_repeat_neck = [12, 12, 12, 12]
+    channels_list_neck = [256, 128, 128, 256, 256, 512]
+    channels = 3
+    num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i) for i in
+                  (num_repeat_backbone + num_repeat_neck)]
+
+    def make_divisible(x, divisor):
+        # Upward revision the value x to make it evenly divisible by the divisor.
+        return math.ceil(x / divisor) * divisor
+
+    channels_list = [make_divisible(i * width_mul, 8) for i in (channels_list_backbone + channels_list_neck)]
+    # from easycv.models.backbones.efficientrep import EfficientRep
+    # model = EfficientRep(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat)
+    model = RepVGGYOLOX(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat)
+    for layer in model.modules():
+        if isinstance(layer, RepVGGBlock):
+            layer.switch_to_deploy()
+
+    model = model.cuda()
+
+    a = torch.randn(1,3,640,640).cuda()
+    summary(model,a)
diff --git a/easycv/models/backbones/yolo6_blocks.py b/easycv/models/backbones/yolo6_blocks.py
index 7cb9d757..0688ab8d 100644
--- a/easycv/models/backbones/yolo6_blocks.py
+++ b/easycv/models/backbones/yolo6_blocks.py
@@ -251,19 +251,3 @@ def switch_to_deploy(self):
             self.__delattr__('id_tensor')
         self.deploy = True
 
-
-# class DetectBackend(nn.Module):
-#     def __init__(self, weights='yolov6s.pt', device=None, dnn=True):
-#
-#         super().__init__()
-#         assert isinstance(weights, str) and Path(weights).suffix == '.pt', f'{Path(weights).suffix} format is not supported.'
-#         # from yolov6.utils.checkpoint import load_checkpoint
-#         # model = load_checkpoint(weights, map_location=device)
-#         stride = int(model.stride.max())
-#         self.__dict__.update(locals())  # assign all variables to self
-#
-#     def forward(self, im, val=False):
-#         y = self.model(im)
-#         if isinstance(y, np.ndarray):
-#             y = torch.tensor(y, device=self.device)
-#         return y
diff --git a/easycv/models/detection/detectors/yolox/ASFF_sim.py b/easycv/models/detection/detectors/yolox/ASFF_sim.py
index fa19152a..33902f4a 100644
--- a/easycv/models/detection/detectors/yolox/ASFF_sim.py
+++ b/easycv/models/detection/detectors/yolox/ASFF_sim.py
@@ -3,7 +3,6 @@
 import torch.nn.functional as F
 from easycv.models.backbones.network_blocks import SiLU
 from easycv.models.backbones.network_blocks import DWConv
-from torchsummaryX import summary
 
 
 def autopad(k, p=None):  # kernel, padding
@@ -285,6 +284,8 @@ def forward(self, x):  # l,m,s
     # input = torch.randn(1, 3, 640, 640).cuda()
     # flops, params = profile(asff_1, inputs=(input,))
     # print('flops: {}, params: {}'.format(flops, params))
+    
+    from torchsummaryX import summary
 
     summary(asff_1, input)
     summary(asff_2, input)

From e81208978fefa537a0214f931dd8d2ebbd9014c2 Mon Sep 17 00:00:00 2001
From: "yanhaiqiang.yhq" <yanhaiqiang.yhq@alibaba-inc.com>
Date: Fri, 5 Aug 2022 07:25:55 +0000
Subject: [PATCH 10/69] fix asff tood code

---
 easycv/models/backbones/__init__.py           |  2 +-
 .../models/backbones/repvgg_yolox_backbone.py | 17 +++-
 .../models/detection/detectors/yolox/ASFF.py  | 11 +--
 .../detection/detectors/yolox/ASFF_sim.py     | 72 +--------------
 .../detection/detectors/yolox/attention.py    | 89 -------------------
 .../detection/detectors/yolox/tood_head.py    | 17 +++-
 .../detection/detectors/yolox/yolo_pafpn.py   | 70 ++++-----------
 7 files changed, 53 insertions(+), 225 deletions(-)
 delete mode 100644 easycv/models/detection/detectors/yolox/attention.py

diff --git a/easycv/models/backbones/__init__.py b/easycv/models/backbones/__init__.py
index 291c16a7..3aa12261 100644
--- a/easycv/models/backbones/__init__.py
+++ b/easycv/models/backbones/__init__.py
@@ -18,5 +18,5 @@
 from .resnext import ResNeXt
 from .shuffle_transformer import ShuffleTransformer
 from .swin_transformer_dynamic import SwinTransformer
-from .efficientrep import EfficientRep
+from .repvgg_yolox_backbone import RepVGGYOLOX
 from .vitdet import ViTDet
diff --git a/easycv/models/backbones/repvgg_yolox_backbone.py b/easycv/models/backbones/repvgg_yolox_backbone.py
index 263ce6b0..2ec82049 100644
--- a/easycv/models/backbones/repvgg_yolox_backbone.py
+++ b/easycv/models/backbones/repvgg_yolox_backbone.py
@@ -200,11 +200,20 @@ class RepVGGYOLOX(nn.Module):
     def __init__(
         self,
         in_channels=3,
-        channels_list=None,
-        num_repeats=None
+        depth=1.0,
+        width=1.0,
     ):
         super().__init__()
 
+        num_repeat_backbone = [1, 6, 12, 18, 6]
+        channels_list_backbone = [64, 128, 256, 512, 1024]
+        num_repeat_neck = [12, 12, 12, 12]
+        channels_list_neck = [256, 128, 128, 256, 256, 512]
+        num_repeats = [(max(round(i * depth), 1) if i > 1 else i) for i in
+                        (num_repeat_backbone + num_repeat_neck)]
+
+        channels_list = [make_divisible(i * width, 8) for i in (channels_list_backbone + channels_list_neck)]
+
         assert channels_list is not None
         assert num_repeats is not None
 
@@ -301,6 +310,7 @@ def forward(self, x):
         outputs.append(x)
         return tuple(outputs)
 
+
 if __name__=='__main__':
 
     from torchsummaryX import summary
@@ -323,7 +333,8 @@ def make_divisible(x, divisor):
     channels_list = [make_divisible(i * width_mul, 8) for i in (channels_list_backbone + channels_list_neck)]
     # from easycv.models.backbones.efficientrep import EfficientRep
     # model = EfficientRep(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat)
-    model = RepVGGYOLOX(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat)
+    # model = RepVGGYOLOX(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat)
+    model = RepVGGYOLOX(in_channels=channels, depth=depth_mul, width=width_mul)
     for layer in model.modules():
         if isinstance(layer, RepVGGBlock):
             layer.switch_to_deploy()
diff --git a/easycv/models/detection/detectors/yolox/ASFF.py b/easycv/models/detection/detectors/yolox/ASFF.py
index 018aace9..c163c4fb 100644
--- a/easycv/models/detection/detectors/yolox/ASFF.py
+++ b/easycv/models/detection/detectors/yolox/ASFF.py
@@ -11,8 +11,6 @@ def autopad(k, p=None):  # kernel, padding
 
 def get_activation(name='silu', inplace=True):
     if name == 'silu':
-        # @ to do nn.SiLU 1.7.0
-        # module = nn.SiLU(inplace=inplace)
         module = SiLU(inplace=inplace)
     elif name == 'relu':
         module = nn.ReLU(inplace=inplace)
@@ -29,7 +27,6 @@ def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act='silu'):  # ch_in, ch_out,
         super(Conv, self).__init__()
         self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
         self.bn = nn.BatchNorm2d(c2)
-        # self.act = SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
         self.act = get_activation(act, inplace=True)
 
     def forward(self, x):
@@ -52,9 +49,7 @@ def __init__(self, level, multiplier=1, asff_channel=16, rfb=False, vis=False, a
         self.level = level
         self.dim = [int(1024 * multiplier), int(512 * multiplier),
                     int(256 * multiplier)]
-        # print(self.dim)
 
-        # print(act, asff_channel)
 
         self.inter_dim = self.dim[self.level]
         if level == 0:
@@ -99,9 +94,9 @@ def forward(self, x):  # l,m,s
         256, 512, 1024
         from small -> large
         """
-        x_level_0 = x[2]  # 最大特征层 [512,20,20]
-        x_level_1 = x[1]  # 中间特征层 [256,40,40]
-        x_level_2 = x[0]  # 最小特征层 [128,80,80]
+        x_level_0 = x[2]  # max feature level [512,20,20]
+        x_level_1 = x[1]  # mid feature level [256,40,40]
+        x_level_2 = x[0]  # min feature level [128,80,80]
 
         if self.level == 0:
             level_0_resized = x_level_0
diff --git a/easycv/models/detection/detectors/yolox/ASFF_sim.py b/easycv/models/detection/detectors/yolox/ASFF_sim.py
index 33902f4a..9fa0cef4 100644
--- a/easycv/models/detection/detectors/yolox/ASFF_sim.py
+++ b/easycv/models/detection/detectors/yolox/ASFF_sim.py
@@ -24,14 +24,12 @@ def get_activation(name='silu', inplace=True):
         raise AttributeError('Unsupported act type: {}'.format(name))
     return module
 
-
 class Conv(nn.Module):
     # Standard convolution
     def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act='silu'):  # ch_in, ch_out, kernel, stride, padding, groups
         super(Conv, self).__init__()
         self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
         self.bn = nn.BatchNorm2d(c2)
-        # self.act = SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
         self.act = get_activation(act, inplace=True)
 
     def forward(self, x):
@@ -40,41 +38,6 @@ def forward(self, x):
     def forward_fuse(self, x):
         return self.act(self.conv(x))
 
-
-# class expandChannel(nn.Module):
-#     def __init__(self,
-#                  in_channels,
-#                  out_channels,
-#                  ksize=1,
-#                  stride=1,
-#                  act='silu',
-#                  use_conv = True):
-#         super().__init__()
-#         self.use_conv = use_conv
-#         self.conv = Conv(
-#             in_channels * 4, out_channels, ksize, stride, act=act)
-#
-#     def forward(self, x):
-#         # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2)
-#         patch_top_left = x[..., ::2, ::2]
-#         patch_top_right = x[..., ::2, 1::2]
-#         patch_bot_left = x[..., 1::2, ::2]
-#         patch_bot_right = x[..., 1::2, 1::2]
-#         x = torch.cat(
-#             (
-#                 patch_top_left,
-#                 patch_bot_left,
-#                 patch_top_right,
-#                 patch_bot_right,
-#             ),
-#             dim=1,
-#         )
-#         if self.use_conv:
-#             return self.conv(x)
-#         else:
-#             return x
-
-
 class ASFF(nn.Module):
     def __init__(self, level, multiplier=1, asff_channel=2, expand_kernel=3, down_rate = None, use_dconv = False, use_expand = True, rfb=False, vis=False, act='silu'):
         """
@@ -94,10 +57,6 @@ def __init__(self, level, multiplier=1, asff_channel=2, expand_kernel=3, down_ra
         self.use_expand = use_expand
 
         if level == 0:
-            # self.stride_level_1 = Conv(int(512 * multiplier), self.inter_dim, 3, 2,act=act)
-            # self.stride_level_2 = Conv(int(256 * multiplier), self.inter_dim, 3, 2,act=act)
-            # self.expand_channel_1 = expandChannel(int(512 * multiplier), self.inter_dim*2, use_conv=use_conv)
-            # self.expand_channel_2 = expandChannel(int(256 * multiplier),self.inter_dim,use_conv=use_conv)
             if down_rate == None:
                 self.expand = Conv(self.inter_dim, int(
                     1024 * multiplier), expand_kernel, 1, act=act)
@@ -112,11 +71,6 @@ def __init__(self, level, multiplier=1, asff_channel=2, expand_kernel=3, down_ra
                     )
 
         elif level == 1:
-            # self.compress_level_0 = Conv(
-            #     int(1024 * multiplier), self.inter_dim, 1, 1,act=act)
-            # self.stride_level_2 = Conv(
-            #     int(256 * multiplier), self.inter_dim, 3, 2,act=act)
-            # self.expand = Conv(self.inter_dim, int(512 * multiplier), 3, 1,act=act)
             if down_rate == None:
                 self.expand = Conv(self.inter_dim, int(
                     512 * multiplier), expand_kernel, 1, act=act)
@@ -132,12 +86,6 @@ def __init__(self, level, multiplier=1, asff_channel=2, expand_kernel=3, down_ra
                     )
 
         elif level == 2:
-            # self.compress_level_0 = Conv(
-            #     int(1024 * multiplier), self.inter_dim, 1, 1,act=act)
-            # self.compress_level_1 = Conv(
-            #     int(512 * multiplier), self.inter_dim, 1, 1,act=act)
-            # self.expand = Conv(self.inter_dim, int(
-            #     256 * multiplier), 3, 1,act=act)
             if down_rate == None:
                 self.expand = Conv(self.inter_dim, int(
                     256 * multiplier), expand_kernel, 1, act=act)
@@ -184,34 +132,21 @@ def expand_channel(self, x):
         )
         return x
 
-    # def expand_fmap(self, x):
-    #     # [b,c,h,w]-> [b,c/4,h*2,w*2]
-    #     b,c,h,w = x.shape[1]
-    #     res = torch.zeros(b,int(c/4),h*2,w*2)
-    #     res[..., ::2, ::2] = x[:,:int(c/4),:,:]
-    #     res[..., ::2, 1::2] = x[:,int(c/4):int(c/2),:,:]
-    #     res[..., 1::2, ::2] = x[:,int(c/2):3*int(c/4),:,:]
-    #     res[..., 1::2, 1::2] = x[:,:int(c/4),:,:]
-    #
-    #     return res
-
-
     def mean_channel(self, x):
         # [b,c,h,w]->[b,c/4,h*2,w*2]
         x1 = x[:,::2,:,:]
         x2 = x[:,1::2,:,:]
         return (x1+x2)/2
 
-
     def forward(self, x):  # l,m,s
         """
         #
         256, 512, 1024
         from small -> large
         """
-        x_level_0 = x[2]  # 最大特征层 [512,20,20]
-        x_level_1 = x[1]  # 中间特征层 [256,40,40]
-        x_level_2 = x[0]  # 最小特征层 [128,80,80]
+        x_level_0 = x[2]  # max feature [512,20,20]
+        x_level_1 = x[1]  # mid feature [256,40,40]
+        x_level_2 = x[0]  # min feature [128,80,80]
 
         if self.level == 0:
             level_0_resized = x_level_0
@@ -266,7 +201,6 @@ def forward(self, x):  # l,m,s
     in_channels = [256, 512, 1024]
 
     asff_channel = 2
-    print(asff_channel)
     act = 'relu'
 
     asff_1 = ASFF(level=0, multiplier=width, asff_channel=asff_channel, act=act).cuda()
diff --git a/easycv/models/detection/detectors/yolox/attention.py b/easycv/models/detection/detectors/yolox/attention.py
deleted file mode 100644
index ad6889d0..00000000
--- a/easycv/models/detection/detectors/yolox/attention.py
+++ /dev/null
@@ -1,89 +0,0 @@
-import torch
-import torch.nn as nn
-import math
-
-
-# SE
-class SE(nn.Module):
-    def __init__(self, channel, ratio=16):
-        super(SE, self).__init__()
-        self.avg_pool = nn.AdaptiveAvgPool2d(1)
-        self.fc = nn.Sequential(
-            nn.Linear(channel, channel // ratio, bias=False),
-            nn.ReLU(inplace=True),
-            nn.Linear(channel // ratio, channel, bias=False),
-            nn.Sigmoid()
-        )
-
-    def forward(self, x):
-        b, c, _, _ = x.size()
-        y = self.avg_pool(x).view(b, c)
-        y = self.fc(y).view(b, c, 1, 1)
-        return x * y
-
-
-class ChannelAttention(nn.Module):
-    def __init__(self, in_planes, ratio=8):
-        super(ChannelAttention, self).__init__()
-        self.avg_pool = nn.AdaptiveAvgPool2d(1)
-        self.max_pool = nn.AdaptiveMaxPool2d(1)
-
-        self.fc1 = nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False)
-        self.relu1 = nn.ReLU()
-        self.fc2 = nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False)
-
-        self.sigmoid = nn.Sigmoid()
-
-    def forward(self, x):
-        avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
-        max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
-        out = avg_out + max_out
-        return self.sigmoid(out)
-
-
-class SpatialAttention(nn.Module):
-    def __init__(self, kernel_size=7):
-        super(SpatialAttention, self).__init__()
-
-        assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
-        padding = 3 if kernel_size == 7 else 1
-        self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
-        self.sigmoid = nn.Sigmoid()
-
-    def forward(self, x):
-        avg_out = torch.mean(x, dim=1, keepdim=True)
-        max_out, _ = torch.max(x, dim=1, keepdim=True)
-        x = torch.cat([avg_out, max_out], dim=1)
-        x = self.conv1(x)
-        return self.sigmoid(x)
-
-
-# CBAM
-class CBAM(nn.Module):
-    def __init__(self, channel, ratio=8, kernel_size=7):
-        super(CBAM, self).__init__()
-        self.channelattention = ChannelAttention(channel, ratio=ratio)
-        self.spatialattention = SpatialAttention(kernel_size=kernel_size)
-
-    def forward(self, x):
-        x = x * self.channelattention(x)
-        x = x * self.spatialattention(x)
-        return x
-
-
-class ECA(nn.Module):
-    def __init__(self, channel, b=1, gamma=2):
-        super(ECA, self).__init__()
-        kernel_size = int(abs((math.log(channel, 2) + b) / gamma))
-        kernel_size = kernel_size if kernel_size % 2 else kernel_size + 1
-
-        self.avg_pool = nn.AdaptiveAvgPool2d(1)
-        self.conv = nn.Conv1d(1, 1, kernel_size=kernel_size, padding=(kernel_size - 1) // 2, bias=False)
-        self.sigmoid = nn.Sigmoid()
-
-    def forward(self, x):
-        y = self.avg_pool(x)
-        y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
-        y = self.sigmoid(y)
-        return x * y.expand_as(x)
-
diff --git a/easycv/models/detection/detectors/yolox/tood_head.py b/easycv/models/detection/detectors/yolox/tood_head.py
index 4c7deae0..088626c7 100644
--- a/easycv/models/detection/detectors/yolox/tood_head.py
+++ b/easycv/models/detection/detectors/yolox/tood_head.py
@@ -7,6 +7,7 @@
 import torch.nn as nn
 import torch.nn.functional as F
 
+from easycv.models.backbone.repvgg_yolox_backbone import RepVGGBlock
 from easycv.models.backbones.network_blocks import BaseConv, DWConv
 from easycv.models.detection.utils import bboxes_iou
 from easycv.models.loss import IOUloss
@@ -93,8 +94,8 @@ def __init__(self,
                  width=1.0,
                  strides=[8, 16, 32],
                  in_channels=[256, 512, 1024],
+                 conv_type='conv',
                  act='silu',
-                 depthwise=False,
                  stage='CLOUD',
                  obj_loss_type='l1',
                  reg_loss_type='iou',
@@ -139,7 +140,17 @@ def __init__(self,
 
         self.inter_convs = nn.ModuleList()
 
-        Conv = DWConv if depthwise else BaseConv
+        default_conv_type_list = ['conv', 'dwconv', 'repconv']
+        # Conv = DWConv if depthwise else BaseConv
+        if conv_type not in    default_conv_type_list:
+            logging.warning('YOLOX-PAI tood head conv_type must in [conv, dwconv, repconv], otherwise we use repconv as default')
+            conv_type = repconv
+        if conv_type == 'conv':
+            Conv = BaseConv
+        if conv_type == 'dwconv':
+            Conv = DWConv
+        if conv_type == 'repconv':
+            Conv = RepVGGBlock
 
         for i in range(len(in_channels)):
             self.stems.append(
@@ -150,7 +161,6 @@ def __init__(self,
                     stride=1,
                     act=act,
                 ))
-
             if conv_layers==2:
                 self.cls_convs.append(
                     nn.Sequential(*[
@@ -269,7 +279,6 @@ def __init__(self,
             self.obj_loss = nn.BCEWithLogitsLoss(reduction='none')
         elif obj_loss_type == 'focal':
             self.obj_loss = FocalLoss(reduction='none')
-
         elif obj_loss_type == 'v_focal':
             self.obj_loss = VarifocalLoss(reduction='none')
         else:
diff --git a/easycv/models/detection/detectors/yolox/yolo_pafpn.py b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
index 145d88f5..a6f0ce73 100644
--- a/easycv/models/detection/detectors/yolox/yolo_pafpn.py
+++ b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
@@ -1,13 +1,11 @@
 # Copyright (c) 2014-2021 Megvii Inc And Alibaba PAI-Teams. All rights reserved.
-
+import logging
 import torch
 import torch.nn as nn
 
 from easycv.models.backbones.darknet import CSPDarknet
-from easycv.models.backbones.efficientrep import EfficientRep
+from easycv.models.backbones.repvgg_yolox_backbone import RepVGGYOLOX
 from easycv.models.backbones.network_blocks import BaseConv, CSPLayer, DWConv, GSConv, VoVGSCSP
-from .attention import SE, CBAM, ECA
-# from .ASFF import ASFF
 import math
 
 
@@ -35,40 +33,32 @@ def __init__(
         down_rate=32,
         use_dconv=False,
         use_expand=True,
-        spp_type='spp',
         backbone = "CSPDarknet",
         neck = 'yolo',
         neck_mode = 'all'
     ):
         super().__init__()
-        # self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act,spp_type=spp_type)
-        self.backbone_name = backbone
+        # build backbone
         if backbone == "CSPDarknet":
             self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act)
+        elif backbone == "RepVGGYOLOX":
+            self.backbone = RepVGGYOLOX(in_channels=channels,depth=depth, width=width)
         else:
-            depth_mul = depth
-            width_mul = width
-            num_repeat_backbone = [1, 6, 12, 18, 6]
-            channels_list_backbone = [64, 128, 256, 512, 1024]
-            num_repeat_neck = [12, 12, 12, 12]
-            channels_list_neck = [256, 128, 128, 256, 256, 512]
-
-            channels = 3
-
-            num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i) for i in
-                          (num_repeat_backbone + num_repeat_neck)]
-
-            channels_list = [make_divisible(i * width_mul, 8) for i in (channels_list_backbone + channels_list_neck)]
-            self.backbone = EfficientRep(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat)
-
+            logging.warning('YOLOX-PAI backbone must in [CSPDarknet, RepVGGYOLOX], otherwise we use RepVGGYOLOX as default')
+            self.backbone = RepVGGYOLOX(in_channels=channels,depth=depth, width=width)
+        self.backbone_name = backbone
 
+        # build neck
         self.in_features = in_features
         self.in_channels = in_channels
         Conv = DWConv if depthwise else BaseConv
-
         self.neck = neck
         self.neck_mode = neck_mode
-        if neck =='yolo':
+        if neck = 'gsconv':
+            if neck != 'yolo':
+                logging.warning('YOLOX-PAI backbone must in [yolo, gsconv], otherwise we use yolo as default')
+            self.neck = 'yolo'
+            
             self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
             self.lateral_conv0 = BaseConv(
                 int(in_channels[2] * width),
@@ -128,7 +118,6 @@ def __init__(
                 depthwise=depthwise,
                 act=act)
         else:
-            # gsconv
             self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
             self.gsconv1 = GSConv(
                 int(in_channels[2] * width),
@@ -229,22 +218,11 @@ def __init__(
                     depthwise=depthwise,
                     act=act)
 
+        # build attention after PAN
         self.use_att=use_att
-
-        if self.use_att!=None and self.use_att!='ASFF' and self.use_att!='ASFF_sim':
-            # add attention layer
-            if self.use_att=="CBAM":
-                ATT = CBAM
-            elif self.use_att=="SE":
-                ATT = SE
-            elif self.use_att=="ECA":
-                ATT = ECA
-            else:
-                assert "Unknown Attention Layer!"
-
-            self.att_1 = ATT(int(in_channels[2] * width))  # 对应dark5输出的1024维度通道
-            self.att_2 = ATT(int(in_channels[1] * width))  # 对应dark4输出的512维度通道
-            self.att_3 = ATT(int(in_channels[0] * width))  # 对应dark3输出的256维度通道
+        default_attention_list = ['ASFF', 'ASFF_sim']
+        if use_att is not None and use_att not in default_attention_list:
+            logging.warning('YOLOX-PAI backbone must in [ASFF, ASFF_sim], otherwise we use ASFF as default')
 
         if self.use_att=='ASFF' or self.use_att=='ASFF_sim':
             if self.use_att=='ASFF':
@@ -268,11 +246,6 @@ def forward(self, input):
             Tuple[Tensor]: FPN feature.
         """
 
-        #  backbone
-        # out_features = self.backbone(input)
-        # features = [out_features[f] for f in self.in_features]
-        # [x2, x1, x0] = features
-        #  backbone
         if self.backbone_name == "CSPDarknet":
             out_features = self.backbone(input)
             features = [out_features[f] for f in self.in_features]
@@ -281,12 +254,6 @@ def forward(self, input):
             features = self.backbone(input)
             [x2, x1, x0] = features
 
-        # add attention
-        if self.use_att!=None and self.use_att!='ASFF' and self.use_att!='ASFF_sim':
-            x0 = self.att_1(x0)
-            x1 = self.att_2(x1)
-            x2 = self.att_3(x2)
-
         if self.neck =='yolo':
             fpn_out0 = self.lateral_conv0(x0)  # 1024->512/32
             f_out0 = self.upsample(fpn_out0)  # 512/16
@@ -340,6 +307,7 @@ def forward(self, input):
 
         outputs = (pan_out2, pan_out1, pan_out0)
 
+        # forward for attention
         if self.use_att == 'ASFF' or self.use_att=='ASFF_sim':
             pan_out0 = self.asff_1(outputs)
             pan_out1 = self.asff_2(outputs)

From 6a9241e90cd5353fce7100f248ef43bbd371268a Mon Sep 17 00:00:00 2001
From: "yanhaiqiang.yhq" <yanhaiqiang.yhq@alibaba-inc.com>
Date: Sat, 6 Aug 2022 00:23:14 +0000
Subject: [PATCH 11/69] fix bug

---
 easycv/models/backbones/repvgg_yolox_backbone.py      | 4 ++++
 easycv/models/detection/detectors/yolox/tood_head.py  | 2 +-
 easycv/models/detection/detectors/yolox/yolo_pafpn.py | 6 +++---
 easycv/models/detection/detectors/yolox/yolox.py      | 2 +-
 4 files changed, 9 insertions(+), 5 deletions(-)

diff --git a/easycv/models/backbones/repvgg_yolox_backbone.py b/easycv/models/backbones/repvgg_yolox_backbone.py
index 2ec82049..792eb870 100644
--- a/easycv/models/backbones/repvgg_yolox_backbone.py
+++ b/easycv/models/backbones/repvgg_yolox_backbone.py
@@ -4,7 +4,11 @@
 import torch
 import copy
 import warnings
+import math
 
+def make_divisible(x, divisor):
+    # Upward revision the value x to make it evenly divisible by the divisor.
+    return math.ceil(x / divisor) * divisor
 
 def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
     '''Basic cell for rep-style block, including conv and bn'''
diff --git a/easycv/models/detection/detectors/yolox/tood_head.py b/easycv/models/detection/detectors/yolox/tood_head.py
index 088626c7..70c4077d 100644
--- a/easycv/models/detection/detectors/yolox/tood_head.py
+++ b/easycv/models/detection/detectors/yolox/tood_head.py
@@ -7,7 +7,7 @@
 import torch.nn as nn
 import torch.nn.functional as F
 
-from easycv.models.backbone.repvgg_yolox_backbone import RepVGGBlock
+from easycv.models.backbones.repvgg_yolox_backbone import RepVGGBlock
 from easycv.models.backbones.network_blocks import BaseConv, DWConv
 from easycv.models.detection.utils import bboxes_iou
 from easycv.models.loss import IOUloss
diff --git a/easycv/models/detection/detectors/yolox/yolo_pafpn.py b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
index a6f0ce73..c827c41f 100644
--- a/easycv/models/detection/detectors/yolox/yolo_pafpn.py
+++ b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
@@ -42,10 +42,10 @@ def __init__(
         if backbone == "CSPDarknet":
             self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act)
         elif backbone == "RepVGGYOLOX":
-            self.backbone = RepVGGYOLOX(in_channels=channels,depth=depth, width=width)
+            self.backbone = RepVGGYOLOX(in_channels=3,depth=depth, width=width)
         else:
             logging.warning('YOLOX-PAI backbone must in [CSPDarknet, RepVGGYOLOX], otherwise we use RepVGGYOLOX as default')
-            self.backbone = RepVGGYOLOX(in_channels=channels,depth=depth, width=width)
+            self.backbone = RepVGGYOLOX(in_channels=3,depth=depth, width=width)
         self.backbone_name = backbone
 
         # build neck
@@ -54,7 +54,7 @@ def __init__(
         Conv = DWConv if depthwise else BaseConv
         self.neck = neck
         self.neck_mode = neck_mode
-        if neck = 'gsconv':
+        if neck == 'gsconv':
             if neck != 'yolo':
                 logging.warning('YOLOX-PAI backbone must in [yolo, gsconv], otherwise we use yolo as default')
             self.neck = 'yolo'
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index 61d81a3a..bcc30e87 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -78,7 +78,7 @@ def __init__(self,
         depth = self.param_map[model_type][0]
         width = self.param_map[model_type][1]
 
-        self.backbone = YOLOPAFPN(depth, width, in_channels=in_channels, asff_channel=asff_channel, act=act, use_att=use_att, spp_type=spp_type, backbone = backbone, neck = neck, neck_mode=neck_mode, expand_kernel=expand_kernel, down_rate = down_rate, use_dconv = use_dconv, use_expand = use_expand)
+        self.backbone = YOLOPAFPN(depth, width, in_channels=in_channels, asff_channel=asff_channel, act=act, use_att=use_att, backbone = backbone, neck = neck, neck_mode=neck_mode, expand_kernel=expand_kernel, down_rate = down_rate, use_dconv = use_dconv, use_expand = use_expand)
 
         self.head_type = head_type
         if head_type == 'yolox':

From bc2b238193453429980d0c7f1251be8bb84b155b Mon Sep 17 00:00:00 2001
From: "yanhaiqiang.yhq" <yanhaiqiang.yhq@alibaba-inc.com>
Date: Sat, 6 Aug 2022 00:28:55 +0000
Subject: [PATCH 12/69] fix bug

---
 easycv/models/detection/detectors/yolox/yolo_pafpn.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/easycv/models/detection/detectors/yolox/yolo_pafpn.py b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
index c827c41f..1ba25ccf 100644
--- a/easycv/models/detection/detectors/yolox/yolo_pafpn.py
+++ b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
@@ -54,7 +54,7 @@ def __init__(
         Conv = DWConv if depthwise else BaseConv
         self.neck = neck
         self.neck_mode = neck_mode
-        if neck == 'gsconv':
+        if neck != 'gsconv':
             if neck != 'yolo':
                 logging.warning('YOLOX-PAI backbone must in [yolo, gsconv], otherwise we use yolo as default')
             self.neck = 'yolo'

From e9e0dfcecc299690fa8ca74369958b140140040d Mon Sep 17 00:00:00 2001
From: "yanhaiqiang.yhq" <yanhaiqiang.yhq@alibaba-inc.com>
Date: Sat, 6 Aug 2022 00:38:08 +0000
Subject: [PATCH 13/69] fix tood act bug

---
 .../models/backbones/repvgg_yolox_backbone.py |  2 +-
 .../detection/detectors/yolox/tood_head.py    | 21 +++++++++++--------
 2 files changed, 13 insertions(+), 10 deletions(-)

diff --git a/easycv/models/backbones/repvgg_yolox_backbone.py b/easycv/models/backbones/repvgg_yolox_backbone.py
index 792eb870..a5edd941 100644
--- a/easycv/models/backbones/repvgg_yolox_backbone.py
+++ b/easycv/models/backbones/repvgg_yolox_backbone.py
@@ -20,7 +20,7 @@ def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
 
 class RepVGGBlock(nn.Module):
     def __init__(self, in_channels, out_channels, kernel_size=3,
-                 stride=1, padding=1, dilation=1, groups=1, padding_mode='zeros', deploy=False, use_se=False):
+                 stride=1, padding=1, dilation=1, groups=1, padding_mode='zeros', deploy=False, use_se=False, act=None):
         super(RepVGGBlock, self).__init__()
         self.deploy = deploy
         self.groups = groups
diff --git a/easycv/models/detection/detectors/yolox/tood_head.py b/easycv/models/detection/detectors/yolox/tood_head.py
index 70c4077d..3361286e 100644
--- a/easycv/models/detection/detectors/yolox/tood_head.py
+++ b/easycv/models/detection/detectors/yolox/tood_head.py
@@ -94,7 +94,7 @@ def __init__(self,
                  width=1.0,
                  strides=[8, 16, 32],
                  in_channels=[256, 512, 1024],
-                 conv_type='conv',
+                 conv_type='repconv',
                  act='silu',
                  stage='CLOUD',
                  obj_loss_type='l1',
@@ -167,16 +167,17 @@ def __init__(self,
                         Conv(
                             in_channels=int(256 * width),
                             out_channels=int(256 * width),
-                            ksize=3,
-                            stride=1,
                             act=act,
+                            # ksize=3,
+                            # stride=1,
                         ),
                         Conv(
                             in_channels=int(256 * width),
                             out_channels=int(256 * width),
-                            ksize=3,
-                            stride=1,
                             act=act,
+                            # ksize=3,
+                            # stride=1,
+                            # act=act,
                         ),
                     ]))
                 self.reg_convs.append(
@@ -184,16 +185,18 @@ def __init__(self,
                         Conv(
                             in_channels=int(256 * width),
                             out_channels=int(256 * width),
-                            ksize=3,
-                            stride=1,
                             act=act,
+                            # ksize=3,
+                            # stride=1,
+                            # act=act,
                         ),
                         Conv(
                             in_channels=int(256 * width),
                             out_channels=int(256 * width),
-                            ksize=3,
-                            stride=1,
                             act=act,
+                            # ksize=3,
+                            # stride=1,
+                            # act=act,
                         ),
                     ]))
             elif conv_layers==1:

From bb16a3fdfd63fcf8be9ce1029f6f8ee21ac07213 Mon Sep 17 00:00:00 2001
From: "yanhaiqiang.yhq" <yanhaiqiang.yhq@alibaba-inc.com>
Date: Mon, 8 Aug 2022 05:58:18 +0000
Subject: [PATCH 14/69] blade utils fit faster

---
 easycv/toolkit/blade/cv_blade_utils.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/easycv/toolkit/blade/cv_blade_utils.py b/easycv/toolkit/blade/cv_blade_utils.py
index 4cdc8e3b..fae51cae 100644
--- a/easycv/toolkit/blade/cv_blade_utils.py
+++ b/easycv/toolkit/blade/cv_blade_utils.py
@@ -74,9 +74,9 @@ def opt_trt_config(input_config=dict(enable_fp16=True)):
         optimization_pipeline='TensorRT',
         enable_fp16=True,
         customize_op_black_list=[
-            'aten::select', 'aten::index', 'aten::slice', 'aten::view'
+            # 'aten::select', 'aten::index', 'aten::slice', 'aten::view'
         ],
-        fp16_fallback_op_ratio=0.3,
+        fp16_fallback_op_ratio=0.1,
     )
     BLADE_CONFIG_KEYS = list(BLADE_CONFIG_DEFAULT.keys())
 

From bbdfb9f3585aadc263add1b0699619d818c01be4 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Mon, 8 Aug 2022 14:51:49 +0800
Subject: [PATCH 15/69] blade optimize for yolox static & fp16

---
 easycv/toolkit/blade/cv_blade_utils.py | 75 ++++++++++++++++++++------
 1 file changed, 58 insertions(+), 17 deletions(-)

diff --git a/easycv/toolkit/blade/cv_blade_utils.py b/easycv/toolkit/blade/cv_blade_utils.py
index fae51cae..47ea5802 100644
--- a/easycv/toolkit/blade/cv_blade_utils.py
+++ b/easycv/toolkit/blade/cv_blade_utils.py
@@ -74,7 +74,7 @@ def opt_trt_config(input_config=dict(enable_fp16=True)):
         optimization_pipeline='TensorRT',
         enable_fp16=True,
         customize_op_black_list=[
-            # 'aten::select', 'aten::index', 'aten::slice', 'aten::view'
+             #'aten::select', 'aten::index', 'aten::slice', 'aten::view', 'aten::upsample'
         ],
         fp16_fallback_op_ratio=0.1,
     )
@@ -114,7 +114,8 @@ def cu_prof_stop():
 @contextmanager
 def opt_blade_mixprec():
     try:
-        dummy = torch.classes.torch_blade.MixPrecision(True)
+        #dummy = torch.classes.torch_blade.MixPrecision(True)
+        dummy = torch.cuda.amp.autocast(True)
         yield
     finally:
         pass
@@ -235,21 +236,31 @@ def check_results(results0, results1):
     except Exception as err:
         logging.error(err)
 
-
 def blade_optimize(script_model,
                    model,
                    inputs,
                    blade_config=dict(enable_fp16=True),
                    backend='TensorRT',
                    batch=1,
-                   compute_cost=False):
-
-    with opt_trt_config(blade_config):
-        opt_model = optimize(
-            model,
-            allow_tracing=True,
-            model_inputs=tuple(inputs),
-        )
+                   compute_cost=True,
+                   static_opt=True):
+
+    if not static_opt:
+        with opt_trt_config(blade_config):
+            opt_model = optimize(
+                model,
+                allow_tracing=True,
+                model_inputs=tuple(inputs),
+            )
+    else:
+        print("GTY: use static shape optimization")
+        from torch_blade.optimization import _static_optimize
+        with opt_trt_config(blade_config):
+            opt_model = _static_optimize(
+                model,
+                allow_tracing=True,
+                model_inputs=tuple(inputs),
+            )
 
     if compute_cost:
         results = []
@@ -269,14 +280,44 @@ def blade_optimize(script_model,
         summary = pd.DataFrame(results)
         logging.warning(summary.to_markdown())
 
-    output = model(*inputs)
+    print(opt_model.forward.code)
+    print(opt_model.forward.graph)
+    torch.cuda.empty_cache()
+    # warm-up
+    for k in range(10):
+        test_result = opt_model(*inputs)
+        torch.cuda.synchronize()
+
+    # output = model(*inputs)
+    # if blade_config.get('enable_fp16', True):
+    #     with opt_blade_mixprec():
+    #         test_result = model(*inputs)
+    # else:
+    # test_result = opt_model(*inputs)
+    # test_result = opt_model(*inputs)
+    print("GTY: do nv profiling")
+    torch.cuda.synchronize()
     cu_prof_start()
-    if blade_config.get('enable_fp16', True):
-        with opt_blade_mixprec():
-            test_result = model(*inputs)
-    else:
+    for k in range(10):
         test_result = opt_model(*inputs)
+        torch.cuda.synchronize()
     cu_prof_stop()
-    check_results(output, test_result)
+
+    print("GTY: do torch profiling")
+    import torch.autograd.profiler as profiler
+    with profiler.profile(use_cuda=True) as prof:
+        for k in range(10):
+            test_result = opt_model(*inputs)
+            torch.cuda.synchronize()
+
+    with profiler.profile(use_cuda=True) as prof:
+        for k in range(10):
+            test_result = opt_model(*inputs)
+            torch.cuda.synchronize()
+ 
+    prof_str = prof.key_averages().table(sort_by="cuda_time_total")
+    print(f"{prof_str}")
+
+    # check_results(output, test_result)
 
     return opt_model

From 2f5c6c4ef4122d0a8f9fb0416721050bc3c3fcde Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Mon, 8 Aug 2022 15:22:04 +0800
Subject: [PATCH 16/69] decode output for yolox control by cfg

---
 easycv/apis/export.py                             |  1 +
 .../models/detection/detectors/yolox/tood_head.py |  3 ++-
 easycv/models/detection/detectors/yolox/yolox.py  | 15 +++++++++++----
 3 files changed, 14 insertions(+), 5 deletions(-)

diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index 019e2986..02e69ebb 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -170,6 +170,7 @@ def _export_yolox(model, cfg, filename):
         if LooseVersion(torch.__version__) < LooseVersion('1.7.0') and end2end:
             raise ValueError('`end2end` only support torch1.7.0 and later!')
 
+        #batch_size = cfg.export.get('batch_size', 32)
         batch_size = cfg.export.get('batch_size', 1)
         img_scale = cfg.get('img_scale', (640, 640))
         assert (
diff --git a/easycv/models/detection/detectors/yolox/tood_head.py b/easycv/models/detection/detectors/yolox/tood_head.py
index 3361286e..d8a0de8b 100644
--- a/easycv/models/detection/detectors/yolox/tood_head.py
+++ b/easycv/models/detection/detectors/yolox/tood_head.py
@@ -102,6 +102,7 @@ def __init__(self,
                  stacked_convs=6,
                  la_down_rate=8,
                  conv_layers=2,
+                 decode_in_inference=True,
                  conv_cfg=None,
                  norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
                  ):
@@ -122,7 +123,7 @@ def __init__(self,
         self.n_anchors = 1
         self.num_classes = num_classes
         self.stage = stage
-        self.decode_in_inference = True  # for deploy, set to False
+        self.decode_in_inference = decode_in_inference  # for deploy, set to False
 
         self.stacked_convs = stacked_convs
         self.conv_cfg = conv_cfg
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index bcc30e87..1e3f084d 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -65,6 +65,7 @@ def __init__(self,
                  stacked_convs: int = 6,
                  la_down_rate: int = 8,
                  conv_layers: int = 2,
+                 decode_in_inference: bool = True,
                  backbone="CSPDarknet",
                  expand_kernel=3,
                  down_rate=32,
@@ -87,7 +88,8 @@ def __init__(self,
         elif head_type == 'tood':
             self.head = TOODHead(num_classes, width, in_channels=in_channels, act=act, obj_loss_type=obj_loss_type, reg_loss_type=reg_loss_type, stacked_convs=stacked_convs,
                  la_down_rate=la_down_rate,
-                 conv_layers=conv_layers)
+                 conv_layers=conv_layers,
+                 decode_in_inference=decode_in_inference)
             self.head.initialize_biases(1e-2)
         elif head_type == 'ppyoloe':
             self.head = PPYOLOEHead(
@@ -107,9 +109,13 @@ def __init__(self,
                 # assigner=TaskAlignedAssigner(topk=self.tal_topk, alpha=1.0, beta=6.0)
             )
 
-        self.apply(init_yolo)  # init_yolo(self)
-
 
+        self.decode_in_inference = decode_in_inference
+            
+        if not self.decode_in_inference:
+            logging.warning('YOLOX-PAI head decode_in_inference close for speed test, post process will be close at same time!')
+        
+        self.apply(init_yolo)  # init_yolo(self)
         self.num_classes = num_classes
         self.test_conf = test_conf
         self.nms_thre = nms_thre
@@ -251,7 +257,8 @@ def forward_export(self, img):
             fpn_outs = self.backbone(img)
             outputs = self.head(fpn_outs)
 
-            outputs = postprocess(outputs, self.num_classes, self.test_conf,
+            if self.decode_in_inference:
+                outputs = postprocess(outputs, self.num_classes, self.test_conf,
                                   self.nms_thre)
 
         return outputs

From 71c6ea20f08d34ec66e47fdbeb84189652ab4289 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Mon, 8 Aug 2022 15:48:08 +0800
Subject: [PATCH 17/69] fix some bug

---
 easycv/models/detection/detectors/yolox/yolox.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index 1e3f084d..f1f600e0 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -1,5 +1,6 @@
 # Copyright (c) 2014-2021 Megvii Inc And Alibaba PAI-Teams. All rights reserved.
 from typing import Dict
+import logging
 
 import numpy as np
 import torch

From 6fcb2934478bae978df519fac9346ea9906db735 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Tue, 9 Aug 2022 13:54:10 +0800
Subject: [PATCH 18/69] fix tood stem to repconv

---
 easycv/apis/export.py                         | 33 +++++++++++++++----
 .../models/backbones/repvgg_yolox_backbone.py |  1 -
 .../detection/detectors/yolox/tood_head.py    | 16 +++------
 .../models/detection/detectors/yolox/yolox.py |  9 +++--
 easycv/toolkit/blade/cv_blade_utils.py        |  9 +++--
 5 files changed, 41 insertions(+), 27 deletions(-)

diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index 02e69ebb..058ddb9b 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -13,6 +13,8 @@
 from easycv.file import io
 from easycv.models import (DINO, MOCO, SWAV, YOLOX, Classification, MoBY,
                            build_model)
+from easycv.models.backbones.repvgg_yolox_backbone import RepVGGBlock
+
 from easycv.utils.bbox_util import scale_coords
 from easycv.utils.checkpoint import load_checkpoint
 
@@ -35,6 +37,14 @@ def export(cfg, ckpt_path, filename):
     else:
         cfg.model.backbone.pretrained = False
 
+    num=0
+    for layer in model.modules():
+        num+=1
+        if isinstance(layer, RepVGGBlock):
+            print('switch to deploy')
+            layer.switch_to_deploy()
+    logging.info('export : PAI-RepVGGBlock switch to deploy with {} blocks'.format(num))
+
     if isinstance(model, MOCO) or isinstance(model, DINO):
         _export_moco(model, cfg, filename)
     elif isinstance(model, MoBY):
@@ -170,8 +180,9 @@ def _export_yolox(model, cfg, filename):
         if LooseVersion(torch.__version__) < LooseVersion('1.7.0') and end2end:
             raise ValueError('`end2end` only support torch1.7.0 and later!')
 
-        #batch_size = cfg.export.get('batch_size', 32)
-        batch_size = cfg.export.get('batch_size', 1)
+        batch_size = cfg.export.get('batch_size', 32)
+        # batch_size = cfg.export.get('batch_size', 1)
+        static_opt = cfg.export.get('static_opt', True)
         img_scale = cfg.get('img_scale', (640, 640))
         assert (
             len(img_scale) == 2
@@ -195,7 +206,6 @@ def _export_yolox(model, cfg, filename):
         # use trace is a litter bit faster than script. But it is not supported in an end2end model.
         if end2end:
             yolox_trace = torch.jit.script(model_export)
-
         else:
             yolox_trace = torch.jit.trace(model_export, input.to(device))
 
@@ -208,13 +218,18 @@ def _export_yolox(model, cfg, filename):
             assert blade_env_assert()
 
             if end2end:
-                input = 255 * torch.rand(img_scale + (3, ))
+                if batch_size == 1:
+                    input = 255 * torch.rand(img_scale + (3, ))
+                else:
+                    input = 255 * torch.rand(img_scale + (3, batch_size))
+
 
             yolox_blade = blade_optimize(
                 script_model=model,
                 model=yolox_trace,
                 inputs=(input.to(device), ),
-                blade_config=blade_config)
+                blade_config=blade_config,
+                static_opt=static_opt)
 
             with io.open(filename + '.blade', 'wb') as ofile:
                 torch.jit.save(yolox_blade, ofile)
@@ -645,7 +660,12 @@ def __init__(self,
 
         self.example_inputs = example_inputs
         self.preprocess_fn = preprocess_fn
-        self.postprocess_fn = postprocess_fn
+        self.ignore_postprocess = getattr(self.model, 'ignore_postprocess', False) 
+        if not self.ignore_postprocess:
+            self.postprocess_fn = postprocess_fn
+        else:
+            self.postprocess_fn = None
+        logging.warning("Model {} ignore_postprocess set to be {} during export !".format(type(model), self.ignore_postprocess))
         self.trace_model = trace_model
         if self.trace_model:
             self.trace_module()
@@ -670,7 +690,6 @@ def forward(self, image):
                     image = output
 
             model_output = self.model.forward_export(image)
-
             if self.postprocess_fn is not None:
                 model_output = self.postprocess_fn(model_output,
                                                    *preprocess_outputs)
diff --git a/easycv/models/backbones/repvgg_yolox_backbone.py b/easycv/models/backbones/repvgg_yolox_backbone.py
index a5edd941..d7e2055e 100644
--- a/easycv/models/backbones/repvgg_yolox_backbone.py
+++ b/easycv/models/backbones/repvgg_yolox_backbone.py
@@ -208,7 +208,6 @@ def __init__(
         width=1.0,
     ):
         super().__init__()
-
         num_repeat_backbone = [1, 6, 12, 18, 6]
         channels_list_backbone = [64, 128, 256, 512, 1024]
         num_repeat_neck = [12, 12, 12, 12]
diff --git a/easycv/models/detection/detectors/yolox/tood_head.py b/easycv/models/detection/detectors/yolox/tood_head.py
index d8a0de8b..88e4d250 100644
--- a/easycv/models/detection/detectors/yolox/tood_head.py
+++ b/easycv/models/detection/detectors/yolox/tood_head.py
@@ -155,13 +155,15 @@ def __init__(self,
 
         for i in range(len(in_channels)):
             self.stems.append(
-                BaseConv(
+                # BaseConv(
+                Conv(
                     in_channels=int(in_channels[i] * width),
                     out_channels=int(256 * width),
                     ksize=1,
                     stride=1,
                     act=act,
-                ))
+                )
+            )
             if conv_layers==2:
                 self.cls_convs.append(
                     nn.Sequential(*[
@@ -187,17 +189,11 @@ def __init__(self,
                             in_channels=int(256 * width),
                             out_channels=int(256 * width),
                             act=act,
-                            # ksize=3,
-                            # stride=1,
-                            # act=act,
                         ),
                         Conv(
                             in_channels=int(256 * width),
                             out_channels=int(256 * width),
                             act=act,
-                            # ksize=3,
-                            # stride=1,
-                            # act=act,
                         ),
                     ]))
             elif conv_layers==1:
@@ -206,8 +202,6 @@ def __init__(self,
                         Conv(
                             in_channels=int(256 * width),
                             out_channels=int(256 * width),
-                            ksize=3,
-                            stride=1,
                             act=act,
                         )
                     ]))
@@ -216,8 +210,6 @@ def __init__(self,
                         Conv(
                             in_channels=int(256 * width),
                             out_channels=int(256 * width),
-                            ksize=3,
-                            stride=1,
                             act=act,
                         )
                     ]))
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index f1f600e0..f89b368d 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -112,10 +112,15 @@ def __init__(self,
 
 
         self.decode_in_inference = decode_in_inference
-            
+        # use decode, we will use post process as default
         if not self.decode_in_inference:
             logging.warning('YOLOX-PAI head decode_in_inference close for speed test, post process will be close at same time!')
-        
+            self.ignore_postprocess = True
+            logging.warning('YOLOX-PAI ignore_postprocess set to be True')
+        else:
+            self.ignore_postprocess = False
+
+
         self.apply(init_yolo)  # init_yolo(self)
         self.num_classes = num_classes
         self.test_conf = test_conf
diff --git a/easycv/toolkit/blade/cv_blade_utils.py b/easycv/toolkit/blade/cv_blade_utils.py
index 47ea5802..1e71b103 100644
--- a/easycv/toolkit/blade/cv_blade_utils.py
+++ b/easycv/toolkit/blade/cv_blade_utils.py
@@ -76,7 +76,7 @@ def opt_trt_config(input_config=dict(enable_fp16=True)):
         customize_op_black_list=[
              #'aten::select', 'aten::index', 'aten::slice', 'aten::view', 'aten::upsample'
         ],
-        fp16_fallback_op_ratio=0.1,
+        fp16_fallback_op_ratio=0.05,
     )
     BLADE_CONFIG_KEYS = list(BLADE_CONFIG_DEFAULT.keys())
 
@@ -246,6 +246,7 @@ def blade_optimize(script_model,
                    static_opt=True):
 
     if not static_opt:
+        logging.info('PAI-Blade use dynamic optimize for input model, export model is build for dynamic shape input')
         with opt_trt_config(blade_config):
             opt_model = optimize(
                 model,
@@ -253,7 +254,7 @@ def blade_optimize(script_model,
                 model_inputs=tuple(inputs),
             )
     else:
-        print("GTY: use static shape optimization")
+        logging.info('PAI-Blade use static optimize for input model, export model must be used as static shape input')
         from torch_blade.optimization import _static_optimize
         with opt_trt_config(blade_config):
             opt_model = _static_optimize(
@@ -295,15 +296,13 @@ def blade_optimize(script_model,
     # else:
     # test_result = opt_model(*inputs)
     # test_result = opt_model(*inputs)
-    print("GTY: do nv profiling")
+    
     torch.cuda.synchronize()
     cu_prof_start()
     for k in range(10):
         test_result = opt_model(*inputs)
         torch.cuda.synchronize()
     cu_prof_stop()
-
-    print("GTY: do torch profiling")
     import torch.autograd.profiler as profiler
     with profiler.profile(use_cuda=True) as prof:
         for k in range(10):

From 5ecb695da09d77dac902eb1047a1d8e4cb82833e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Tue, 9 Aug 2022 14:49:28 +0800
Subject: [PATCH 19/69] fix tood interconv with repconv

---
 .../detection/detectors/yolox/tood_head.py    | 26 ++++++++++++-------
 1 file changed, 16 insertions(+), 10 deletions(-)

diff --git a/easycv/models/detection/detectors/yolox/tood_head.py b/easycv/models/detection/detectors/yolox/tood_head.py
index 88e4d250..06044e78 100644
--- a/easycv/models/detection/detectors/yolox/tood_head.py
+++ b/easycv/models/detection/detectors/yolox/tood_head.py
@@ -155,8 +155,7 @@ def __init__(self,
 
         for i in range(len(in_channels)):
             self.stems.append(
-                # BaseConv(
-                Conv(
+                BaseConv(
                     in_channels=int(in_channels[i] * width),
                     out_channels=int(256 * width),
                     ksize=1,
@@ -254,14 +253,21 @@ def __init__(self,
             conv_cfg = self.conv_cfg
             chn = self.feat_channels
             self.inter_convs.append(
-                ConvModule(
-                    chn,
-                    self.feat_channels,
-                    3,
-                    stride=1,
-                    padding=1,
-                    conv_cfg=conv_cfg,
-                    norm_cfg=self.norm_cfg))
+                Conv(
+                    in_channels=chn,
+                    out_channels=chn,
+                    act=act,
+                )
+            )
+            # self.inter_convs.append(
+            #     ConvModule(
+            #         chn,
+            #         self.feat_channels,
+            #         3,
+            #         stride=1,
+            #         padding=1,
+            #         conv_cfg=conv_cfg,
+            #         norm_cfg=self.norm_cfg))
 
         self.bcewithlog_loss = nn.BCEWithLogitsLoss(reduction='none')
 

From 2346aaf569e95edda51a3828c4f7139432825daa Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Tue, 9 Aug 2022 14:55:22 +0800
Subject: [PATCH 20/69] add reparameterize_models for export

---
 easycv/apis/export.py | 23 +++++++++++++++--------
 1 file changed, 15 insertions(+), 8 deletions(-)

diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index 058ddb9b..2af904a8 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -22,6 +22,20 @@
     'export', 'PreProcess', 'DetPostProcess', 'End2endModelExportWrapper'
 ]
 
+def reparameterize_models(model):
+    """ reparameterize model for inference, especially for 
+            1. rep conv block : merge 3x3 weight 1x1 weights
+        call module switch_to_deploy recursively
+    Args:
+        model: nn.Module
+    """
+    reparameterize_count=0
+    for layer in model.modules():
+        reparameterize_count+=1
+        if isinstance(layer, RepVGGBlock):
+            layer.switch_to_deploy()
+    logging.info('export : PAI-export reparameterize_count(RepVGGBlock, ) switch to deploy with {} blocks'.format(reparameterize_count))
+    return model
 
 def export(cfg, ckpt_path, filename):
     """ export model for inference
@@ -36,14 +50,7 @@ def export(cfg, ckpt_path, filename):
         load_checkpoint(model, ckpt_path, map_location='cpu')
     else:
         cfg.model.backbone.pretrained = False
-
-    num=0
-    for layer in model.modules():
-        num+=1
-        if isinstance(layer, RepVGGBlock):
-            print('switch to deploy')
-            layer.switch_to_deploy()
-    logging.info('export : PAI-RepVGGBlock switch to deploy with {} blocks'.format(num))
+    model = reparameterize_models(model)
 
     if isinstance(model, MOCO) or isinstance(model, DINO):
         _export_moco(model, cfg, filename)

From 437342c0bd6c56c9442393db427ee6124ceb4e2f Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Tue, 9 Aug 2022 15:18:55 +0800
Subject: [PATCH 21/69] pre-commit fix

---
 compute_model_params.py                       |  73 +++---
 configs/detection/yolox/yolox_best_asff_1.py  |   2 +-
 configs/detection/yolox/yolox_best_asff_2.py  |   3 +-
 configs/detection/yolox/yolox_best_asff_4.py  |   2 +-
 configs/detection/yolox/yolox_best_asff_8.py  |   2 +-
 configs/detection/yolox/yolox_best_conv1.py   |   3 +-
 configs/detection/yolox/yolox_best_la_16.py   |   3 +-
 configs/detection/yolox/yolox_best_la_32.py   |   4 +-
 .../yolox/yolox_best_la_32_sconv_2.py         |   3 +-
 .../yolox/yolox_best_la_32_sconv_4.py         |   3 +-
 configs/detection/yolox/yolox_best_lrelu.py   |   4 +-
 configs/detection/yolox/yolox_best_ori.py     |   2 +-
 configs/detection/yolox/yolox_best_relu.py    |   4 +-
 configs/detection/yolox/yolox_best_stack_1.py |   3 +-
 configs/detection/yolox/yolox_best_stack_2.py |   3 +-
 configs/detection/yolox/yolox_best_stack_3.py |   3 +-
 configs/detection/yolox/yolox_best_stack_4.py |   3 +-
 configs/detection/yolox/yolox_best_stack_5.py |   3 +-
 .../yolox/yolox_s_8xb16_300e_coco.py          |   3 +-
 .../yolox/yolox_s_8xb16_300e_coco_asff.py     |   3 +-
 .../yolox/yolox_s_8xb16_300e_coco_base.py     |  20 +-
 .../yolox/yolox_s_8xb16_300e_coco_cbam.py     |   3 +-
 .../yolox/yolox_s_8xb16_300e_coco_ciou.py     |   3 +-
 .../yolox/yolox_s_8xb16_300e_coco_diou.py     |   3 +-
 .../yolox/yolox_s_8xb16_300e_coco_eca.py      |   3 +-
 .../yolox/yolox_s_8xb16_300e_coco_eiou.py     |   3 +-
 .../yolox/yolox_s_8xb16_300e_coco_focal.py    |   3 +-
 .../yolox/yolox_s_8xb16_300e_coco_giou.py     |   3 +-
 .../yolox/yolox_s_8xb16_300e_coco_se.py       |   3 +-
 .../yolox/yolox_s_8xb16_300e_coco_siou.py     |   3 +-
 .../yolox/yolox_s_8xb16_300e_coco_siou2.py    |   3 +-
 .../yolox/yolox_s_8xb16_300e_coco_sppf.py     |   3 +-
 .../yolox/yolox_s_8xb16_300e_coco_vfocal.py   |   3 +-
 .../detection/yolox/yolox_s_8xb16_300e_tal.py |   2 +-
 .../yolox/yolox_s_8xb16_300e_tal_asff_giou.py |   2 +-
 .../yolox_s_8xb16_300e_tal_asff_sppf_giou.py  |   6 +-
 .../yolox/yolox_s_8xb16_300e_wonorm.py        |   2 +-
 .../yolox/yolox_tiny_8xb16_300e_coco.py       |   1 -
 configs/detection/yolox/yolox_yolo6.py        |   2 +-
 .../detection/yolox/yolox_yolo6_asff_sim.py   |   2 +-
 configs/detection/yolox/yolox_yolo6_att.py    |   3 +-
 .../detection/yolox/yolox_yolo6_att_relu.py   |   4 +-
 .../detection/yolox/yolox_yolo6_att_sim.py    |   3 +-
 .../detection/yolox/yolox_yolo6_att_sim_1.py  |   3 +-
 .../detection/yolox/yolox_yolo6_att_sim_16.py |   3 +-
 .../detection/yolox/yolox_yolo6_att_sim_32.py |   3 +-
 .../detection/yolox/yolox_yolo6_att_sim_8.py  |   3 +-
 .../detection/yolox/yolox_yolo6_att_sim_d.py  |   3 +-
 .../yolox/yolox_yolo6_att_sim_no_expand.py    |   3 +-
 configs/detection/yolox/yolox_yolo6_gsconv.py |   3 +-
 .../yolox/yolox_yolo6_gsconv_asff_sim.py      |   7 +-
 .../yolox/yolox_yolo6_gsconv_part.py          |   5 +-
 .../detection/yolox/yolox_yolo6_head_ori.py   |   3 +-
 .../detection/yolox/yolox_yolo6_head_tood.py  |   3 +-
 .../detection/yolox/yolox_yolo6_yoloe_head.py |   3 +-
 easycv/apis/export.py                         |  21 +-
 easycv/apis/train.py                          |   2 +-
 easycv/hooks/yolox_mode_switch_hook.py        |   2 +-
 easycv/models/backbones/__init__.py           |   2 +-
 easycv/models/backbones/darknet.py            |  43 ++--
 easycv/models/backbones/efficientrep.py       |  68 +++--
 easycv/models/backbones/network_blocks.py     |  12 +-
 .../models/backbones/repvgg_yolox_backbone.py | 236 +++++++++++++-----
 easycv/models/backbones/yolo6_blocks.py       | 118 +++++++--
 .../models/detection/detectors/yolox/ASFF.py  |  71 ++++--
 .../detection/detectors/yolox/ASFF_sim.py     | 175 +++++++++----
 .../detection/detectors/yolox/__init__.py     |   2 +-
 .../detection/detectors/yolox/tood_head.py    | 212 ++++++++--------
 .../detection/detectors/yolox/yolo_head.py    |  23 +-
 .../detection/detectors/yolox/yolo_pafpn.py   | 140 +++++++----
 .../models/detection/detectors/yolox/yolox.py |  94 ++++---
 easycv/models/loss/__init__.py                |   3 +-
 easycv/models/loss/focal_loss.py              |   1 +
 easycv/models/loss/iou_loss.py                | 204 ++++++++-------
 easycv/toolkit/blade/cv_blade_utils.py        |  19 +-
 easycv/utils/checkpoint.py                    |   1 -
 show_predict.py                               |  25 +-
 tools/eval.py                                 |   3 +-
 78 files changed, 1017 insertions(+), 714 deletions(-)

diff --git a/compute_model_params.py b/compute_model_params.py
index fda0000c..c50f8ebb 100644
--- a/compute_model_params.py
+++ b/compute_model_params.py
@@ -1,13 +1,15 @@
 # Copyright (c) 2014-2021 Megvii Inc And Alibaba PAI-Teams. All rights reserved.
 
+import math
+
 import torch
 import torch.nn as nn
+from torchsummaryX import summary
 
 from easycv.models.backbones.darknet import CSPDarknet
 from easycv.models.backbones.efficientrep import EfficientRep
-from easycv.models.backbones.network_blocks import BaseConv, CSPLayer, DWConv, GSConv, VoVGSCSP
-from torchsummaryX import summary
-import math
+from easycv.models.backbones.network_blocks import (BaseConv, CSPLayer, DWConv,
+                                                    GSConv, VoVGSCSP)
 
 
 def make_divisible(x, divisor):
@@ -28,22 +30,23 @@ def __init__(
         in_channels=[256, 512, 1024],
         depthwise=False,
         act='silu',
-        asff_channel = 16,
+        asff_channel=16,
         use_att=None,
         expand_kernel=3,
         down_rate=32,
         use_dconv=False,
         use_expand=True,
         spp_type='spp',
-        backbone = "CSPDarknet",
-        neck = 'gsconv',
-        neck_mode = 'part',
+        backbone='CSPDarknet',
+        neck='gsconv',
+        neck_mode='part',
     ):
         super().__init__()
         # self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act,spp_type=spp_type)
         self.backbone_name = backbone
-        if backbone == "CSPDarknet":
-            self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act)
+        if backbone == 'CSPDarknet':
+            self.backbone = CSPDarknet(
+                depth, width, depthwise=depthwise, act=act)
         else:
             depth_mul = depth
             width_mul = width
@@ -54,12 +57,17 @@ def __init__(
 
             channels = 3
 
-            num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i) for i in
-                          (num_repeat_backbone + num_repeat_neck)]
-
-            channels_list = [make_divisible(i * width_mul, 8) for i in (channels_list_backbone + channels_list_neck)]
-            self.backbone = EfficientRep(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat)
+            num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i)
+                          for i in (num_repeat_backbone + num_repeat_neck)]
 
+            channels_list = [
+                make_divisible(i * width_mul, 8)
+                for i in (channels_list_backbone + channels_list_neck)
+            ]
+            self.backbone = EfficientRep(
+                in_channels=channels,
+                channels_list=channels_list,
+                num_repeats=num_repeat)
 
         self.in_features = in_features
         self.in_channels = in_channels
@@ -68,7 +76,7 @@ def __init__(
         self.neck = neck
         self.neck_mode = neck_mode
 
-        if neck =='yolo':
+        if neck == 'yolo':
             self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
             self.lateral_conv0 = BaseConv(
                 int(in_channels[2] * width),
@@ -142,24 +150,21 @@ def __init__(
                 int(in_channels[0] * width),
                 1,
                 1,
-                act=act
-            )
+                act=act)
 
             self.gsconv4 = GSConv(
                 int(in_channels[0] * width),
                 int(in_channels[0] * width),
                 3,
                 2,
-                act=act
-            )
+                act=act)
 
             self.gsconv5 = GSConv(
                 int(in_channels[1] * width),
                 int(in_channels[1] * width),
                 3,
                 2,
-                act=act
-            )
+                act=act)
 
             if self.neck_mode == 'all':
                 self.vovGSCSP1 = VoVGSCSP(
@@ -174,16 +179,14 @@ def __init__(
                     int(2 * in_channels[0] * width),
                     1,
                     1,
-                    act=act
-                )
+                    act=act)
                 self.vovGSCSP2 = VoVGSCSP(
-                    int(2*in_channels[0] * width),
+                    int(2 * in_channels[0] * width),
                     int(in_channels[0] * width),
                     round(3 * depth),
                     False,
                 )
 
-
                 self.vovGSCSP3 = VoVGSCSP(
                     int(2 * in_channels[0] * width),
                     int(in_channels[1] * width),
@@ -230,8 +233,6 @@ def __init__(
                     depthwise=depthwise,
                     act=act)
 
-
-
     def forward(self, input):
         """
         Args:
@@ -246,8 +247,8 @@ def forward(self, input):
         # features = [out_features[f] for f in self.in_features]
         # [x2, x1, x0] = features
         #  backbone
-        x2,x1,x0 = x
-        if self.neck =='yolo':
+        x2, x1, x0 = x
+        if self.neck == 'yolo':
             fpn_out0 = self.lateral_conv0(x0)  # 1024->512/32
             f_out0 = self.upsample(fpn_out0)  # 512/16
             f_out0 = torch.cat([f_out0, x1], 1)  # 512->1024/16
@@ -271,7 +272,7 @@ def forward(self, input):
             fpn_out0 = self.gsconv1(x0)  # 1024->512/32
             f_out0 = self.upsample(fpn_out0)  # 512/16
             f_out0 = torch.cat([f_out0, x1], 1)  # 512->1024/16
-            if self.neck_mode =='all':
+            if self.neck_mode == 'all':
                 f_out0 = self.vovGSCSP1(f_out0)  # 1024->512/16
             else:
                 f_out0 = self.C3_p4(f_out0)
@@ -279,13 +280,12 @@ def forward(self, input):
             fpn_out1 = self.gsconv2(f_out0)  # 512->256/16
             f_out1 = self.upsample(fpn_out1)  # 256/8
             f_out1 = torch.cat([f_out1, x2], 1)  # 256->512/8
-            if self.neck_mode =='all':
+            if self.neck_mode == 'all':
                 f_out1 = self.gsconv3(f_out1)
                 pan_out2 = self.vovGSCSP2(f_out1)  # 512->256/8
             else:
                 pan_out2 = self.C3_p3(f_out1)  # 512->256/8
 
-
             p_out1 = self.gsconv4(pan_out2)  # 256->256/16
             p_out1 = torch.cat([p_out1, fpn_out1], 1)  # 256->512/16
             if self.neck_mode == 'all':
@@ -304,7 +304,10 @@ def forward(self, input):
 
         return outputs
 
-if __name__=='__main__':
-    x = (torch.randn(1,128,80,80).cuda(),torch.randn(1,256,40,40).cuda(),torch.randn(1,512,20,20).cuda())
+
+if __name__ == '__main__':
+    x = (torch.randn(1, 128, 80, 80).cuda(), torch.randn(1, 256, 40,
+                                                         40).cuda(),
+         torch.randn(1, 512, 20, 20).cuda())
     model = YOLOPAFPN(depth=0.33, width=0.5).cuda()
-    summary(model,x)
\ No newline at end of file
+    summary(model, x)
diff --git a/configs/detection/yolox/yolox_best_asff_1.py b/configs/detection/yolox/yolox_best_asff_1.py
index c5af9388..ebdd02b1 100644
--- a/configs/detection/yolox/yolox_best_asff_1.py
+++ b/configs/detection/yolox/yolox_best_asff_1.py
@@ -11,7 +11,7 @@
     asff_channel=1,
     obj_loss_type='BCE',
     reg_loss_type='giou',
-    head_type ='tood' # yolox
+    head_type='tood'  # yolox
 )
 
 # s m l x
diff --git a/configs/detection/yolox/yolox_best_asff_2.py b/configs/detection/yolox/yolox_best_asff_2.py
index cb58358b..48e05d49 100644
--- a/configs/detection/yolox/yolox_best_asff_2.py
+++ b/configs/detection/yolox/yolox_best_asff_2.py
@@ -11,7 +11,7 @@
     asff_channel=2,
     obj_loss_type='BCE',
     reg_loss_type='giou',
-    head_type ='tood' # yolox
+    head_type='tood'  # yolox
 )
 
 # s m l x
@@ -45,7 +45,6 @@
 # data_root = '/mnt/data/nas/data/detection/coco/'
 data_root = '/cpfs01/shared/public/dataset/coco2017/'
 
-
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/configs/detection/yolox/yolox_best_asff_4.py b/configs/detection/yolox/yolox_best_asff_4.py
index db60710a..83c53948 100644
--- a/configs/detection/yolox/yolox_best_asff_4.py
+++ b/configs/detection/yolox/yolox_best_asff_4.py
@@ -11,7 +11,7 @@
     asff_channel=4,
     obj_loss_type='BCE',
     reg_loss_type='giou',
-    head_type ='tood' # yolox
+    head_type='tood'  # yolox
 )
 
 # s m l x
diff --git a/configs/detection/yolox/yolox_best_asff_8.py b/configs/detection/yolox/yolox_best_asff_8.py
index 2c59d3ad..af2a24fe 100644
--- a/configs/detection/yolox/yolox_best_asff_8.py
+++ b/configs/detection/yolox/yolox_best_asff_8.py
@@ -11,7 +11,7 @@
     asff_channel=8,
     obj_loss_type='BCE',
     reg_loss_type='giou',
-    head_type ='tood' # yolox
+    head_type='tood'  # yolox
 )
 
 # s m l x
diff --git a/configs/detection/yolox/yolox_best_conv1.py b/configs/detection/yolox/yolox_best_conv1.py
index c07ff11d..4520f4a9 100644
--- a/configs/detection/yolox/yolox_best_conv1.py
+++ b/configs/detection/yolox/yolox_best_conv1.py
@@ -15,8 +15,7 @@
     asff_channel=16,
     stacked_convs=6,
     la_down_rate=8,
-    conv_layers=1
-)
+    conv_layers=1)
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_best_la_16.py b/configs/detection/yolox/yolox_best_la_16.py
index 6f7564cd..9f11105d 100644
--- a/configs/detection/yolox/yolox_best_la_16.py
+++ b/configs/detection/yolox/yolox_best_la_16.py
@@ -15,8 +15,7 @@
     asff_channel=16,
     stacked_convs=6,
     la_down_rate=16,
-    conv_layers=2
-)
+    conv_layers=2)
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_best_la_32.py b/configs/detection/yolox/yolox_best_la_32.py
index 6a18d9bc..c807c539 100644
--- a/configs/detection/yolox/yolox_best_la_32.py
+++ b/configs/detection/yolox/yolox_best_la_32.py
@@ -15,8 +15,7 @@
     asff_channel=16,
     stacked_convs=6,
     la_down_rate=32,
-    conv_layers=2
-)
+    conv_layers=2)
 
 # s m l x
 img_scale = (640, 640)
@@ -49,7 +48,6 @@
 # data_root = '/mnt/data/nas/data/detection/coco/'
 data_root = '/cpfs01/shared/public/dataset/coco2017/'
 
-
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/configs/detection/yolox/yolox_best_la_32_sconv_2.py b/configs/detection/yolox/yolox_best_la_32_sconv_2.py
index 79bfe269..90eb6034 100644
--- a/configs/detection/yolox/yolox_best_la_32_sconv_2.py
+++ b/configs/detection/yolox/yolox_best_la_32_sconv_2.py
@@ -15,8 +15,7 @@
     asff_channel=16,
     stacked_convs=2,
     la_down_rate=32,
-    conv_layers=2
-)
+    conv_layers=2)
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_best_la_32_sconv_4.py b/configs/detection/yolox/yolox_best_la_32_sconv_4.py
index f6d5f153..e2ce80bc 100644
--- a/configs/detection/yolox/yolox_best_la_32_sconv_4.py
+++ b/configs/detection/yolox/yolox_best_la_32_sconv_4.py
@@ -15,8 +15,7 @@
     asff_channel=16,
     stacked_convs=4,
     la_down_rate=32,
-    conv_layers=2
-)
+    conv_layers=2)
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_best_lrelu.py b/configs/detection/yolox/yolox_best_lrelu.py
index 5ea1fa27..db43347c 100644
--- a/configs/detection/yolox/yolox_best_lrelu.py
+++ b/configs/detection/yolox/yolox_best_lrelu.py
@@ -7,11 +7,11 @@
     model_type='s',  # s m l x tiny nano
     test_conf=0.01,
     nms_thre=0.65,
-    act = 'lrelu',
+    act='lrelu',
     use_att='ASFF',
     obj_loss_type='BCE',
     reg_loss_type='giou',
-    head_type ='tood' # yolox
+    head_type='tood'  # yolox
 )
 
 # s m l x
diff --git a/configs/detection/yolox/yolox_best_ori.py b/configs/detection/yolox/yolox_best_ori.py
index 52469b17..e984efed 100644
--- a/configs/detection/yolox/yolox_best_ori.py
+++ b/configs/detection/yolox/yolox_best_ori.py
@@ -10,7 +10,7 @@
     use_att='ASFF',
     obj_loss_type='BCE',
     reg_loss_type='giou',
-    head_type ='tood' # yolox
+    head_type='tood'  # yolox
 )
 
 # s m l x
diff --git a/configs/detection/yolox/yolox_best_relu.py b/configs/detection/yolox/yolox_best_relu.py
index ef4a5a77..87dc91c9 100644
--- a/configs/detection/yolox/yolox_best_relu.py
+++ b/configs/detection/yolox/yolox_best_relu.py
@@ -7,11 +7,11 @@
     model_type='s',  # s m l x tiny nano
     test_conf=0.01,
     nms_thre=0.65,
-    act = 'relu',
+    act='relu',
     use_att='ASFF',
     obj_loss_type='BCE',
     reg_loss_type='giou',
-    head_type ='tood' # yolox
+    head_type='tood'  # yolox
 )
 
 # s m l x
diff --git a/configs/detection/yolox/yolox_best_stack_1.py b/configs/detection/yolox/yolox_best_stack_1.py
index 0e3241ef..53a1d8e3 100644
--- a/configs/detection/yolox/yolox_best_stack_1.py
+++ b/configs/detection/yolox/yolox_best_stack_1.py
@@ -15,8 +15,7 @@
     asff_channel=16,
     stacked_convs=1,
     la_down_rate=8,
-    conv_layers=2
-)
+    conv_layers=2)
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_best_stack_2.py b/configs/detection/yolox/yolox_best_stack_2.py
index 5dbbc05b..53d865be 100644
--- a/configs/detection/yolox/yolox_best_stack_2.py
+++ b/configs/detection/yolox/yolox_best_stack_2.py
@@ -15,8 +15,7 @@
     asff_channel=16,
     stacked_convs=2,
     la_down_rate=8,
-    conv_layers=2
-)
+    conv_layers=2)
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_best_stack_3.py b/configs/detection/yolox/yolox_best_stack_3.py
index 0695c3ef..4987e0d3 100644
--- a/configs/detection/yolox/yolox_best_stack_3.py
+++ b/configs/detection/yolox/yolox_best_stack_3.py
@@ -15,8 +15,7 @@
     asff_channel=16,
     stacked_convs=3,
     la_down_rate=8,
-    conv_layers=2
-)
+    conv_layers=2)
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_best_stack_4.py b/configs/detection/yolox/yolox_best_stack_4.py
index 5a30aaa1..8d7cf0ca 100644
--- a/configs/detection/yolox/yolox_best_stack_4.py
+++ b/configs/detection/yolox/yolox_best_stack_4.py
@@ -15,8 +15,7 @@
     asff_channel=16,
     stacked_convs=4,
     la_down_rate=8,
-    conv_layers=2
-)
+    conv_layers=2)
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_best_stack_5.py b/configs/detection/yolox/yolox_best_stack_5.py
index d5a13881..07f36a7b 100644
--- a/configs/detection/yolox/yolox_best_stack_5.py
+++ b/configs/detection/yolox/yolox_best_stack_5.py
@@ -15,8 +15,7 @@
     asff_channel=16,
     stacked_convs=5,
     la_down_rate=8,
-    conv_layers=2
-)
+    conv_layers=2)
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index 967bb2fc..65af25b9 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -9,8 +9,7 @@
     nms_thre=0.65,
     use_att=None,
     obj_loss_type='BCE',
-    reg_loss_type='iou'
-)
+    reg_loss_type='iou')
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff.py
index afff65fa..35d4654f 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff.py
@@ -9,8 +9,7 @@
     nms_thre=0.65,
     use_att='ASFF',
     obj_loss_type='BCE',
-    reg_loss_type='iou'
-)
+    reg_loss_type='iou')
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_base.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_base.py
index d61dbabe..8407160f 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_base.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_base.py
@@ -65,7 +65,6 @@
     dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
 ]
 
-
 test_pipeline = [
     dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
     dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
@@ -109,10 +108,7 @@
     label_padding=False)
 
 data = dict(
-    imgs_per_gpu=16,
-    workers_per_gpu=4,
-    train=train_dataset,
-    val=val_dataset)
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
 
 # additional hooks
 interval = 10
@@ -149,12 +145,12 @@
 # optimizer
 # basic_lr_per_img = 0.01 / 64.0
 optimizer = dict(
-  type='SGD',
-  # lr=0.01,
-  lr=0.02,
-  momentum=0.9,
-  weight_decay=5e-4,
-  nesterov=True)
+    type='SGD',
+    # lr=0.01,
+    lr=0.02,
+    momentum=0.9,
+    weight_decay=5e-4,
+    nesterov=True)
 optimizer_config = {}
 
 # learning policy
@@ -191,4 +187,4 @@
 resume_from = None
 workflow = [('train', 1)]
 
-export = dict(use_jit=False)
\ No newline at end of file
+export = dict(use_jit=False)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_cbam.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_cbam.py
index d89cc8eb..d0542c2e 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_cbam.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_cbam.py
@@ -9,8 +9,7 @@
     nms_thre=0.65,
     use_att='CBAM',
     obj_loss_type='BCE',
-    reg_loss_type='iou'
-)
+    reg_loss_type='iou')
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_ciou.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_ciou.py
index 0a3e50a2..bc272d05 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_ciou.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_ciou.py
@@ -9,8 +9,7 @@
     nms_thre=0.65,
     use_att=None,
     obj_loss_type='BCE',
-    reg_loss_type='ciou'
-)
+    reg_loss_type='ciou')
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_diou.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_diou.py
index 7c143e28..93ae73c4 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_diou.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_diou.py
@@ -9,8 +9,7 @@
     nms_thre=0.65,
     use_att=None,
     obj_loss_type='BCE',
-    reg_loss_type='diou'
-)
+    reg_loss_type='diou')
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_eca.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_eca.py
index 499287e4..8c8857e5 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_eca.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_eca.py
@@ -9,8 +9,7 @@
     nms_thre=0.65,
     use_att='ECA',
     obj_loss_type='BCE',
-    reg_loss_type='iou'
-)
+    reg_loss_type='iou')
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_eiou.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_eiou.py
index 77939e15..04de5ccd 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_eiou.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_eiou.py
@@ -9,8 +9,7 @@
     nms_thre=0.65,
     use_att=None,
     obj_loss_type='BCE',
-    reg_loss_type='eiou'
-)
+    reg_loss_type='eiou')
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_focal.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_focal.py
index 820ee1ad..abc16b56 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_focal.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_focal.py
@@ -9,8 +9,7 @@
     nms_thre=0.65,
     use_att=None,
     obj_loss_type='focal',
-    reg_loss_type='iou'
-)
+    reg_loss_type='iou')
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_giou.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_giou.py
index e2964f11..fadf8928 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_giou.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_giou.py
@@ -9,8 +9,7 @@
     nms_thre=0.65,
     use_att=None,
     obj_loss_type='BCE',
-    reg_loss_type='giou'
-)
+    reg_loss_type='giou')
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_se.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_se.py
index 0fa960ce..ddd54917 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_se.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_se.py
@@ -9,8 +9,7 @@
     nms_thre=0.65,
     use_att='SE',
     obj_loss_type='BCE',
-    reg_loss_type='iou'
-)
+    reg_loss_type='iou')
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou.py
index 27021d31..068ca777 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou.py
@@ -9,8 +9,7 @@
     nms_thre=0.65,
     use_att=None,
     obj_loss_type='BCE',
-    reg_loss_type='siou'
-)
+    reg_loss_type='siou')
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou2.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou2.py
index ea81125d..23291c81 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou2.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou2.py
@@ -9,8 +9,7 @@
     nms_thre=0.65,
     use_att=None,
     obj_loss_type='BCE',
-    reg_loss_type='siou2'
-)
+    reg_loss_type='siou2')
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_sppf.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_sppf.py
index 40aee4fa..f8db6528 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_sppf.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_sppf.py
@@ -10,8 +10,7 @@
     use_att=None,
     obj_loss_type='BCE',
     reg_loss_type='iou',
-    spp_type='sppf'
-)
+    spp_type='sppf')
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_vfocal.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_vfocal.py
index d0909cdb..0a7fa108 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_vfocal.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_vfocal.py
@@ -9,8 +9,7 @@
     nms_thre=0.65,
     use_att=None,
     obj_loss_type='v_focal',
-    reg_loss_type='iou'
-)
+    reg_loss_type='iou')
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_tal.py b/configs/detection/yolox/yolox_s_8xb16_300e_tal.py
index ec01654a..dc25fe50 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_tal.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_tal.py
@@ -10,7 +10,7 @@
     use_att=None,
     obj_loss_type='BCE',
     reg_loss_type='iou',
-    head_type ='tood' # yolox
+    head_type='tood'  # yolox
 )
 
 # s m l x
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py b/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py
index 7feccc12..403a7771 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py
@@ -10,7 +10,7 @@
     use_att='ASFF',
     obj_loss_type='BCE',
     reg_loss_type='giou',
-    head_type ='tood' # yolox
+    head_type='tood'  # yolox
 )
 
 # s m l x
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_sppf_giou.py b/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_sppf_giou.py
index 50496148..960826ea 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_sppf_giou.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_sppf_giou.py
@@ -10,9 +10,8 @@
     use_att='ASFF',
     obj_loss_type='BCE',
     reg_loss_type='giou',
-    head_type ='tood', # yolox
-    spp_type='sppf'
-)
+    head_type='tood',  # yolox
+    spp_type='sppf')
 
 # s m l x
 img_scale = (640, 640)
@@ -45,7 +44,6 @@
 # data_root = '/mnt/data/nas/data/detection/coco/'
 data_root = '/cpfs01/shared/public/dataset/coco2017/'
 
-
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_wonorm.py b/configs/detection/yolox/yolox_s_8xb16_300e_wonorm.py
index 782e5a41..b0c9895b 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_wonorm.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_wonorm.py
@@ -183,4 +183,4 @@
     ])
 
 export = dict(use_jit=False)
-mp_start_method = 'fork'
\ No newline at end of file
+mp_start_method = 'fork'
diff --git a/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py b/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
index 24a36389..5c3dda76 100644
--- a/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
@@ -63,7 +63,6 @@
 # data_root = 'data/coco/'
 data_root = '/apsarapangu/disk5/zxy/data/coco/'
 
-
 train_dataset = dict(
     type='DetImagesMixDataset',
     data_source=dict(
diff --git a/configs/detection/yolox/yolox_yolo6.py b/configs/detection/yolox/yolox_yolo6.py
index c96e979e..a32b6461 100644
--- a/configs/detection/yolox/yolox_yolo6.py
+++ b/configs/detection/yolox/yolox_yolo6.py
@@ -10,7 +10,7 @@
     nms_thre=0.65,
     obj_loss_type='BCE',
     reg_loss_type='giou',
-    )
+)
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_yolo6_asff_sim.py b/configs/detection/yolox/yolox_yolo6_asff_sim.py
index 9881ab69..b3bc4f3d 100644
--- a/configs/detection/yolox/yolox_yolo6_asff_sim.py
+++ b/configs/detection/yolox/yolox_yolo6_asff_sim.py
@@ -17,7 +17,7 @@
     down_rate=None,
     use_dconv=False,
     use_expand=True,
-    )
+)
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_yolo6_att.py b/configs/detection/yolox/yolox_yolo6_att.py
index 4da952ab..b77dffc1 100644
--- a/configs/detection/yolox/yolox_yolo6_att.py
+++ b/configs/detection/yolox/yolox_yolo6_att.py
@@ -14,7 +14,7 @@
     reg_loss_type='giou',
     head_type='tood',
     la_down_rate=32,
-    )
+)
 
 # s m l x
 img_scale = (640, 640)
@@ -48,7 +48,6 @@
 # data_root = '/mnt/data/nas/data/detection/coco/'
 # data_root = '/cpfs01/shared/public/dataset/coco2017/'
 
-
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/configs/detection/yolox/yolox_yolo6_att_relu.py b/configs/detection/yolox/yolox_yolo6_att_relu.py
index a2f51299..a9b418ac 100644
--- a/configs/detection/yolox/yolox_yolo6_att_relu.py
+++ b/configs/detection/yolox/yolox_yolo6_att_relu.py
@@ -14,8 +14,7 @@
     reg_loss_type='giou',
     head_type='tood',
     la_down_rate=32,
-    act='relu'
-    )
+    act='relu')
 
 # s m l x
 img_scale = (640, 640)
@@ -49,7 +48,6 @@
 # data_root = '/mnt/data/nas/data/detection/coco/'
 data_root = '/cpfs01/shared/public/dataset/coco2017/'
 
-
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim.py b/configs/detection/yolox/yolox_yolo6_att_sim.py
index 26d51a2a..9418db65 100644
--- a/configs/detection/yolox/yolox_yolo6_att_sim.py
+++ b/configs/detection/yolox/yolox_yolo6_att_sim.py
@@ -18,7 +18,7 @@
     down_rate=None,
     use_dconv=False,
     use_expand=True,
-    )
+)
 
 # s m l x
 img_scale = (640, 640)
@@ -52,7 +52,6 @@
 data_root = '/mnt/data/nas/data/detection/coco/'
 # data_root = '/cpfs01/shared/public/dataset/coco2017/'
 
-
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_1.py b/configs/detection/yolox/yolox_yolo6_att_sim_1.py
index e6fe76e2..e00eb7c1 100644
--- a/configs/detection/yolox/yolox_yolo6_att_sim_1.py
+++ b/configs/detection/yolox/yolox_yolo6_att_sim_1.py
@@ -19,7 +19,7 @@
     use_dconv=False,
     use_expand=True,
     # norm_cfg = 'SyncBN'
-    )
+)
 
 sync_bn = True
 
@@ -55,7 +55,6 @@
 # data_root = '/mnt/data/nas/data/detection/coco/'
 # data_root = '/cpfs01/shared/public/dataset/coco2017/'
 
-
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_16.py b/configs/detection/yolox/yolox_yolo6_att_sim_16.py
index b94c2b4b..ae45620a 100644
--- a/configs/detection/yolox/yolox_yolo6_att_sim_16.py
+++ b/configs/detection/yolox/yolox_yolo6_att_sim_16.py
@@ -18,7 +18,7 @@
     down_rate=16,
     use_dconv=False,
     use_expand=True,
-    )
+)
 
 # s m l x
 img_scale = (640, 640)
@@ -52,7 +52,6 @@
 data_root = '/mnt/data/nas/data/detection/coco/'
 # data_root = '/cpfs01/shared/public/dataset/coco2017/'
 
-
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_32.py b/configs/detection/yolox/yolox_yolo6_att_sim_32.py
index 273b6e7f..ab77c87b 100644
--- a/configs/detection/yolox/yolox_yolo6_att_sim_32.py
+++ b/configs/detection/yolox/yolox_yolo6_att_sim_32.py
@@ -18,7 +18,7 @@
     down_rate=32,
     use_dconv=False,
     use_expand=True,
-    )
+)
 
 # s m l x
 img_scale = (640, 640)
@@ -52,7 +52,6 @@
 data_root = '/mnt/data/nas/data/detection/coco/'
 # data_root = '/cpfs01/shared/public/dataset/coco2017/'
 
-
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_8.py b/configs/detection/yolox/yolox_yolo6_att_sim_8.py
index b1b6b1c7..05f0d9ac 100644
--- a/configs/detection/yolox/yolox_yolo6_att_sim_8.py
+++ b/configs/detection/yolox/yolox_yolo6_att_sim_8.py
@@ -18,7 +18,7 @@
     down_rate=8,
     use_dconv=False,
     use_expand=True,
-    )
+)
 
 # s m l x
 img_scale = (640, 640)
@@ -52,7 +52,6 @@
 data_root = '/mnt/data/nas/data/detection/coco/'
 # data_root = '/cpfs01/shared/public/dataset/coco2017/'
 
-
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_d.py b/configs/detection/yolox/yolox_yolo6_att_sim_d.py
index 75270d10..773a24c9 100644
--- a/configs/detection/yolox/yolox_yolo6_att_sim_d.py
+++ b/configs/detection/yolox/yolox_yolo6_att_sim_d.py
@@ -18,7 +18,7 @@
     down_rate=None,
     use_dconv=True,
     use_expand=True,
-    )
+)
 
 # s m l x
 img_scale = (640, 640)
@@ -52,7 +52,6 @@
 data_root = '/mnt/data/nas/data/detection/coco/'
 # data_root = '/cpfs01/shared/public/dataset/coco2017/'
 
-
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_no_expand.py b/configs/detection/yolox/yolox_yolo6_att_sim_no_expand.py
index 737c9fed..2f94459f 100644
--- a/configs/detection/yolox/yolox_yolo6_att_sim_no_expand.py
+++ b/configs/detection/yolox/yolox_yolo6_att_sim_no_expand.py
@@ -18,7 +18,7 @@
     down_rate=None,
     use_dconv=False,
     use_expand=False,
-    )
+)
 
 # s m l x
 img_scale = (640, 640)
@@ -52,7 +52,6 @@
 data_root = '/mnt/data/nas/data/detection/coco/'
 # data_root = '/cpfs01/shared/public/dataset/coco2017/'
 
-
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/configs/detection/yolox/yolox_yolo6_gsconv.py b/configs/detection/yolox/yolox_yolo6_gsconv.py
index dfaaf403..75a6cb25 100644
--- a/configs/detection/yolox/yolox_yolo6_gsconv.py
+++ b/configs/detection/yolox/yolox_yolo6_gsconv.py
@@ -10,8 +10,7 @@
     nms_thre=0.65,
     obj_loss_type='BCE',
     reg_loss_type='giou',
-    neck = 'gsconv'
-    )
+    neck='gsconv')
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_yolo6_gsconv_asff_sim.py b/configs/detection/yolox/yolox_yolo6_gsconv_asff_sim.py
index a90f13c2..5573c136 100644
--- a/configs/detection/yolox/yolox_yolo6_gsconv_asff_sim.py
+++ b/configs/detection/yolox/yolox_yolo6_gsconv_asff_sim.py
@@ -10,7 +10,7 @@
     nms_thre=0.65,
     obj_loss_type='BCE',
     reg_loss_type='giou',
-    neck = 'gsconv',
+    neck='gsconv',
     use_att='ASFF_sim',
     asff_channel=2,
     la_down_rate=32,
@@ -18,10 +18,9 @@
     down_rate=None,
     use_dconv=False,
     use_expand=True,
+)
 
-    )
-
-find_unused_parameters=True
+find_unused_parameters = True
 # s m l x
 img_scale = (640, 640)
 random_size = (14, 26)
diff --git a/configs/detection/yolox/yolox_yolo6_gsconv_part.py b/configs/detection/yolox/yolox_yolo6_gsconv_part.py
index d023d427..cbb0dc78 100644
--- a/configs/detection/yolox/yolox_yolo6_gsconv_part.py
+++ b/configs/detection/yolox/yolox_yolo6_gsconv_part.py
@@ -10,9 +10,8 @@
     nms_thre=0.65,
     obj_loss_type='BCE',
     reg_loss_type='giou',
-    neck = 'gsconv',
-    neck_mode = 'part'
-    )
+    neck='gsconv',
+    neck_mode='part')
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_yolo6_head_ori.py b/configs/detection/yolox/yolox_yolo6_head_ori.py
index ffeb109a..05914423 100644
--- a/configs/detection/yolox/yolox_yolo6_head_ori.py
+++ b/configs/detection/yolox/yolox_yolo6_head_ori.py
@@ -19,7 +19,7 @@
     use_dconv=False,
     use_expand=True,
     # norm_cfg = 'SyncBN'
-    )
+)
 
 sync_bn = True
 
@@ -55,7 +55,6 @@
 # data_root = '/mnt/data/nas/data/detection/coco/'
 # data_root = '/cpfs01/shared/public/dataset/coco2017/'
 
-
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/configs/detection/yolox/yolox_yolo6_head_tood.py b/configs/detection/yolox/yolox_yolo6_head_tood.py
index c349c4d9..b8142c12 100644
--- a/configs/detection/yolox/yolox_yolo6_head_tood.py
+++ b/configs/detection/yolox/yolox_yolo6_head_tood.py
@@ -19,7 +19,7 @@
     use_dconv=False,
     use_expand=True,
     # norm_cfg = 'SyncBN'
-    )
+)
 
 # sync_bn = True
 
@@ -55,7 +55,6 @@
 # data_root = '/mnt/data/nas/data/detection/coco/'
 # data_root = '/cpfs01/shared/public/dataset/coco2017/'
 
-
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/configs/detection/yolox/yolox_yolo6_yoloe_head.py b/configs/detection/yolox/yolox_yolo6_yoloe_head.py
index 4d49a570..be34b9a8 100644
--- a/configs/detection/yolox/yolox_yolo6_yoloe_head.py
+++ b/configs/detection/yolox/yolox_yolo6_yoloe_head.py
@@ -14,7 +14,7 @@
     reg_loss_type='giou',
     head_type='ppyoloe',
     la_down_rate=32,
-    )
+)
 
 # s m l x
 img_scale = (640, 640)
@@ -48,7 +48,6 @@
 # data_root = '/mnt/data/nas/data/detection/coco/'
 # data_root = '/cpfs01/shared/public/dataset/coco2017/'
 
-
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index 2af904a8..060743a5 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -14,7 +14,6 @@
 from easycv.models import (DINO, MOCO, SWAV, YOLOX, Classification, MoBY,
                            build_model)
 from easycv.models.backbones.repvgg_yolox_backbone import RepVGGBlock
-
 from easycv.utils.bbox_util import scale_coords
 from easycv.utils.checkpoint import load_checkpoint
 
@@ -22,21 +21,25 @@
     'export', 'PreProcess', 'DetPostProcess', 'End2endModelExportWrapper'
 ]
 
+
 def reparameterize_models(model):
-    """ reparameterize model for inference, especially for 
+    """ reparameterize model for inference, especially for
             1. rep conv block : merge 3x3 weight 1x1 weights
         call module switch_to_deploy recursively
     Args:
         model: nn.Module
     """
-    reparameterize_count=0
+    reparameterize_count = 0
     for layer in model.modules():
-        reparameterize_count+=1
+        reparameterize_count += 1
         if isinstance(layer, RepVGGBlock):
             layer.switch_to_deploy()
-    logging.info('export : PAI-export reparameterize_count(RepVGGBlock, ) switch to deploy with {} blocks'.format(reparameterize_count))
+    logging.info(
+        'export : PAI-export reparameterize_count(RepVGGBlock, ) switch to deploy with {} blocks'
+        .format(reparameterize_count))
     return model
 
+
 def export(cfg, ckpt_path, filename):
     """ export model for inference
 
@@ -230,7 +233,6 @@ def _export_yolox(model, cfg, filename):
                 else:
                     input = 255 * torch.rand(img_scale + (3, batch_size))
 
-
             yolox_blade = blade_optimize(
                 script_model=model,
                 model=yolox_trace,
@@ -667,12 +669,15 @@ def __init__(self,
 
         self.example_inputs = example_inputs
         self.preprocess_fn = preprocess_fn
-        self.ignore_postprocess = getattr(self.model, 'ignore_postprocess', False) 
+        self.ignore_postprocess = getattr(self.model, 'ignore_postprocess',
+                                          False)
         if not self.ignore_postprocess:
             self.postprocess_fn = postprocess_fn
         else:
             self.postprocess_fn = None
-        logging.warning("Model {} ignore_postprocess set to be {} during export !".format(type(model), self.ignore_postprocess))
+        logging.warning(
+            'Model {} ignore_postprocess set to be {} during export !'.format(
+                type(model), self.ignore_postprocess))
         self.trace_model = trace_model
         if self.trace_model:
             self.trace_module()
diff --git a/easycv/apis/train.py b/easycv/apis/train.py
index fbdb99f9..ff8b2715 100644
--- a/easycv/apis/train.py
+++ b/easycv/apis/train.py
@@ -89,7 +89,7 @@ def train_model(model,
 
     # SyncBatchNorm
     open_sync_bn = cfg.get('sync_bn', False)
-    print("!!Sync_bn",open_sync_bn)
+    print('!!Sync_bn', open_sync_bn)
 
     if open_sync_bn:
         model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
diff --git a/easycv/hooks/yolox_mode_switch_hook.py b/easycv/hooks/yolox_mode_switch_hook.py
index 7b2afddc..c723cab9 100644
--- a/easycv/hooks/yolox_mode_switch_hook.py
+++ b/easycv/hooks/yolox_mode_switch_hook.py
@@ -41,5 +41,5 @@ def before_train_epoch(self, runner):
             runner.logger.info('Add additional L1 loss now!')
             model.head.use_l1 = True
 
-        if hasattr(runner.model.module,'epoch_counter'):
+        if hasattr(runner.model.module, 'epoch_counter'):
             runner.model.module.epoch_counter = epoch
diff --git a/easycv/models/backbones/__init__.py b/easycv/models/backbones/__init__.py
index 3aa12261..9e5241a9 100644
--- a/easycv/models/backbones/__init__.py
+++ b/easycv/models/backbones/__init__.py
@@ -12,11 +12,11 @@
 from .mnasnet import MNASNet
 from .mobilenetv2 import MobileNetV2
 from .pytorch_image_models_wrapper import *
+from .repvgg_yolox_backbone import RepVGGYOLOX
 from .resnest import ResNeSt
 from .resnet import ResNet
 from .resnet_jit import ResNetJIT
 from .resnext import ResNeXt
 from .shuffle_transformer import ShuffleTransformer
 from .swin_transformer_dynamic import SwinTransformer
-from .repvgg_yolox_backbone import RepVGGYOLOX
 from .vitdet import ViTDet
diff --git a/easycv/models/backbones/darknet.py b/easycv/models/backbones/darknet.py
index ebd9025a..49c1dc2b 100644
--- a/easycv/models/backbones/darknet.py
+++ b/easycv/models/backbones/darknet.py
@@ -1,7 +1,8 @@
 # Copyright (c) Alibaba, Inc. and its affiliates.
 
-from torch import nn
 import torch
+from torch import nn
+
 from .network_blocks import (BaseConv, CSPLayer, DWConv, Focus, ResLayer,
                              SPPBottleneck, SPPFBottleneck)
 
@@ -10,14 +11,12 @@ class Darknet(nn.Module):
     # number of blocks from dark2 to dark5.
     depth2blocks = {21: [1, 2, 2, 1], 53: [2, 8, 8, 4]}
 
-    def __init__(
-            self,
-            depth,
-            in_channels=3,
-            stem_out_channels=32,
-            out_features=('dark3', 'dark4', 'dark5'),
-            spp_type = 'spp'
-    ):
+    def __init__(self,
+                 depth,
+                 in_channels=3,
+                 stem_out_channels=32,
+                 out_features=('dark3', 'dark4', 'dark5'),
+                 spp_type='spp'):
         """
         Args:
             depth (int): depth of darknet used in model, usually use [21, 53] for this param.
@@ -50,17 +49,17 @@ def __init__(
             *self.make_group_layer(in_channels, num_blocks[2], stride=2))
         in_channels *= 2  # 512
 
-        if spp_type=='spp':
+        if spp_type == 'spp':
             self.dark5 = nn.Sequential(
                 *self.make_group_layer(in_channels, num_blocks[3], stride=2),
                 *self.make_spp_block([in_channels, in_channels * 2],
                                      in_channels * 2),
             )
-        elif spp_type=='sppf':
+        elif spp_type == 'sppf':
             self.dark5 = nn.Sequential(
                 *self.make_group_layer(in_channels, num_blocks[3], stride=2),
                 *self.make_sppf_block([in_channels, in_channels * 2],
-                                       in_channels * 2),
+                                      in_channels * 2),
             )
 
     def make_group_layer(self,
@@ -129,15 +128,13 @@ def forward(self, x):
 
 class CSPDarknet(nn.Module):
 
-    def __init__(
-        self,
-        dep_mul,
-        wid_mul,
-        out_features=('dark3', 'dark4', 'dark5'),
-        depthwise=False,
-        act='silu',
-        spp_type='spp'
-    ):
+    def __init__(self,
+                 dep_mul,
+                 wid_mul,
+                 out_features=('dark3', 'dark4', 'dark5'),
+                 depthwise=False,
+                 act='silu',
+                 spp_type='spp'):
         super().__init__()
         assert out_features, 'please provide output features of Darknet'
         self.out_features = out_features
@@ -186,7 +183,7 @@ def __init__(
         )
 
         # dark5
-        if spp_type=='spp':
+        if spp_type == 'spp':
             self.dark5 = nn.Sequential(
                 Conv(base_channels * 8, base_channels * 16, 3, 2, act=act),
                 SPPBottleneck(
@@ -201,7 +198,7 @@ def __init__(
                 ),
             )
 
-        elif spp_type=='sppf':
+        elif spp_type == 'sppf':
             self.dark5 = nn.Sequential(
                 Conv(base_channels * 8, base_channels * 16, 3, 2, act=act),
                 SPPFBottleneck(
diff --git a/easycv/models/backbones/efficientrep.py b/easycv/models/backbones/efficientrep.py
index c3ae4a1f..9ddc6268 100644
--- a/easycv/models/backbones/efficientrep.py
+++ b/easycv/models/backbones/efficientrep.py
@@ -1,9 +1,9 @@
-from torch import nn
-from easycv.models.backbones.yolo6_blocks import RepVGGBlock, RepBlock, SimSPPF
 import math
-import torch
 
+import torch
+from torch import nn
 
+from easycv.models.backbones.yolo6_blocks import RepBlock, RepVGGBlock, SimSPPF
 
 
 class EfficientRep(nn.Module):
@@ -12,12 +12,7 @@ class EfficientRep(nn.Module):
     With rep-style struct, EfficientRep is friendly to high-computation hardware(e.g. GPU).
     '''
 
-    def __init__(
-        self,
-        in_channels=3,
-        channels_list=None,
-        num_repeats=None
-    ):
+    def __init__(self, in_channels=3, channels_list=None, num_repeats=None):
         super().__init__()
 
         assert channels_list is not None
@@ -27,69 +22,56 @@ def __init__(
             in_channels=in_channels,
             out_channels=channels_list[0],
             kernel_size=3,
-            stride=2
-        )
+            stride=2)
 
         self.ERBlock_2 = nn.Sequential(
             RepVGGBlock(
                 in_channels=channels_list[0],
                 out_channels=channels_list[1],
                 kernel_size=3,
-                stride=2
-            ),
+                stride=2),
             RepBlock(
                 in_channels=channels_list[1],
                 out_channels=channels_list[1],
-                n=num_repeats[1]
-            )
-        )
+                n=num_repeats[1]))
 
         self.ERBlock_3 = nn.Sequential(
             RepVGGBlock(
                 in_channels=channels_list[1],
                 out_channels=channels_list[2],
                 kernel_size=3,
-                stride=2
-            ),
+                stride=2),
             RepBlock(
                 in_channels=channels_list[2],
                 out_channels=channels_list[2],
                 n=num_repeats[2],
-            )
-        )
+            ))
 
         self.ERBlock_4 = nn.Sequential(
             RepVGGBlock(
                 in_channels=channels_list[2],
                 out_channels=channels_list[3],
                 kernel_size=3,
-                stride=2
-            ),
+                stride=2),
             RepBlock(
                 in_channels=channels_list[3],
                 out_channels=channels_list[3],
-                n=num_repeats[3]
-            )
-        )
+                n=num_repeats[3]))
 
         self.ERBlock_5 = nn.Sequential(
             RepVGGBlock(
                 in_channels=channels_list[3],
                 out_channels=channels_list[4],
                 kernel_size=3,
-                stride=2
-            ),
+                stride=2),
             RepBlock(
                 in_channels=channels_list[4],
                 out_channels=channels_list[4],
-                n=num_repeats[4]
-            ),
+                n=num_repeats[4]),
             SimSPPF(
                 in_channels=channels_list[4],
                 out_channels=channels_list[4],
-                kernel_size=5
-            )
-        )
+                kernel_size=5))
 
     def forward(self, x):
 
@@ -105,7 +87,8 @@ def forward(self, x):
 
         return tuple(outputs)
 
-if __name__=='__main__':
+
+if __name__ == '__main__':
 
     from torchsummaryX import summary
 
@@ -118,21 +101,26 @@ def forward(self, x):
 
     channels = 3
 
-    num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i) for i in
-                  (num_repeat_backbone + num_repeat_neck)]
+    num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i)
+                  for i in (num_repeat_backbone + num_repeat_neck)]
 
     def make_divisible(x, divisor):
         # Upward revision the value x to make it evenly divisible by the divisor.
         return math.ceil(x / divisor) * divisor
 
-    channels_list = [make_divisible(i * width_mul, 8) for i in (channels_list_backbone + channels_list_neck)]
-    model = EfficientRep(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat)
+    channels_list = [
+        make_divisible(i * width_mul, 8)
+        for i in (channels_list_backbone + channels_list_neck)
+    ]
+    model = EfficientRep(
+        in_channels=channels,
+        channels_list=channels_list,
+        num_repeats=num_repeat)
     for layer in model.modules():
         if isinstance(layer, RepVGGBlock):
             layer.switch_to_deploy()
 
     model = model.cuda()
 
-    a = torch.randn(1,3,640,640).cuda()
-    summary(model,a)
-
+    a = torch.randn(1, 3, 640, 640).cuda()
+    summary(model, a)
diff --git a/easycv/models/backbones/network_blocks.py b/easycv/models/backbones/network_blocks.py
index 59c9a582..5bbfaec8 100644
--- a/easycv/models/backbones/network_blocks.py
+++ b/easycv/models/backbones/network_blocks.py
@@ -165,7 +165,8 @@ def __init__(self,
         #     nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2)
         #     for ks in kernel_sizes
         # ])
-        self.m = nn.MaxPool2d(kernel_size=kernel_size, stride=1, padding=kernel_size // 2)
+        self.m = nn.MaxPool2d(
+            kernel_size=kernel_size, stride=1, padding=kernel_size // 2)
 
         conv2_channels = hidden_channels * 4
         self.conv2 = BaseConv(
@@ -285,6 +286,7 @@ def forward(self, x):
         )
         return self.conv(x)
 
+
 class GSConv(nn.Module):
     # GSConv https://github.com/AlanLi1997/slim-neck-by-gsconv
     def __init__(self, c1, c2, k=1, s=1, g=1, act='silu'):
@@ -313,12 +315,10 @@ def __init__(self, c1, c2, k=3, s=1):
         c_ = c2 // 2
         # for lighting
         self.conv_lighting = nn.Sequential(
-            GSConv(c1, c_, 1, 1),
-            GSConv(c_, c2, 1, 1, act='identity'))
+            GSConv(c1, c_, 1, 1), GSConv(c_, c2, 1, 1, act='identity'))
         # for receptive field
         self.conv = nn.Sequential(
-            GSConv(c1, c_, 3, 1),
-            GSConv(c_, c2, 3, 1, act='identity'))
+            GSConv(c1, c_, 3, 1), GSConv(c_, c2, 3, 1, act='identity'))
         self.shortcut = nn.Identity()
 
     def forward(self, x):
@@ -331,7 +331,7 @@ def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
         super().__init__()
         c_ = int(c2 * e)
         self.cv1 = BaseConv(c1, c_, 1, 1)
-        self.cv2 = BaseConv(2 * c_, c2, 1,1)
+        self.cv2 = BaseConv(2 * c_, c2, 1, 1)
         self.m = nn.Sequential(*(GSBottleneck(c_, c_) for _ in range(n)))
 
     def forward(self, x):
diff --git a/easycv/models/backbones/repvgg_yolox_backbone.py b/easycv/models/backbones/repvgg_yolox_backbone.py
index d7e2055e..df76d5ea 100644
--- a/easycv/models/backbones/repvgg_yolox_backbone.py
+++ b/easycv/models/backbones/repvgg_yolox_backbone.py
@@ -1,26 +1,49 @@
 # borrow some code from https://github.com/DingXiaoH/RepVGG/repvgg.py MIT2.0
-import torch.nn as nn
-import numpy as np
-import torch
 import copy
-import warnings
 import math
+import warnings
+
+import numpy as np
+import torch
+import torch.nn as nn
+
 
 def make_divisible(x, divisor):
     # Upward revision the value x to make it evenly divisible by the divisor.
     return math.ceil(x / divisor) * divisor
 
+
 def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
     '''Basic cell for rep-style block, including conv and bn'''
     result = nn.Sequential()
-    result.add_module('conv', nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
-                                                  kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False))
+    result.add_module(
+        'conv',
+        nn.Conv2d(
+            in_channels=in_channels,
+            out_channels=out_channels,
+            kernel_size=kernel_size,
+            stride=stride,
+            padding=padding,
+            groups=groups,
+            bias=False))
     result.add_module('bn', nn.BatchNorm2d(num_features=out_channels))
     return result
 
+
 class RepVGGBlock(nn.Module):
-    def __init__(self, in_channels, out_channels, kernel_size=3,
-                 stride=1, padding=1, dilation=1, groups=1, padding_mode='zeros', deploy=False, use_se=False, act=None):
+
+    def __init__(self,
+                 in_channels,
+                 out_channels,
+                 kernel_size=3,
+                 stride=1,
+                 padding=1,
+                 dilation=1,
+                 groups=1,
+                 padding_mode='zeros',
+                 deploy=False,
+                 use_se=False,
+                 act=None):
         super(RepVGGBlock, self).__init__()
         self.deploy = deploy
         self.groups = groups
@@ -34,21 +57,43 @@ def __init__(self, in_channels, out_channels, kernel_size=3,
         self.nonlinearity = nn.ReLU()
 
         if use_se:
-            self.se = SEBlock(out_channels, internal_neurons=out_channels // 16)
+            self.se = SEBlock(
+                out_channels, internal_neurons=out_channels // 16)
         else:
             self.se = nn.Identity()
 
         if deploy:
-            self.rbr_reparam = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
-                                      padding=padding, dilation=dilation, groups=groups, bias=True, padding_mode=padding_mode)
+            self.rbr_reparam = nn.Conv2d(
+                in_channels=in_channels,
+                out_channels=out_channels,
+                kernel_size=kernel_size,
+                stride=stride,
+                padding=padding,
+                dilation=dilation,
+                groups=groups,
+                bias=True,
+                padding_mode=padding_mode)
 
         else:
-            self.rbr_identity = nn.BatchNorm2d(num_features=in_channels) if out_channels == in_channels and stride == 1 else None
-            self.rbr_dense = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups)
-            self.rbr_1x1 = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=padding_11, groups=groups)
+            self.rbr_identity = nn.BatchNorm2d(
+                num_features=in_channels
+            ) if out_channels == in_channels and stride == 1 else None
+            self.rbr_dense = conv_bn(
+                in_channels=in_channels,
+                out_channels=out_channels,
+                kernel_size=kernel_size,
+                stride=stride,
+                padding=padding,
+                groups=groups)
+            self.rbr_1x1 = conv_bn(
+                in_channels=in_channels,
+                out_channels=out_channels,
+                kernel_size=1,
+                stride=stride,
+                padding=padding_11,
+                groups=groups)
             print('RepVGG Block, identity = ', self.rbr_identity)
 
-
     def forward(self, inputs):
         if hasattr(self, 'rbr_reparam'):
             return self.nonlinearity(self.se(self.rbr_reparam(inputs)))
@@ -58,8 +103,8 @@ def forward(self, inputs):
         else:
             id_out = self.rbr_identity(inputs)
 
-        return self.nonlinearity(self.se(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out))
-
+        return self.nonlinearity(
+            self.se(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out))
 
     #   Optional. This improves the accuracy and facilitates quantization.
     #   1.  Cancel the original weight decay on rbr_dense.conv.weight and rbr_1x1.conv.weight.
@@ -72,31 +117,37 @@ def forward(self, inputs):
     def get_custom_L2(self):
         K3 = self.rbr_dense.conv.weight
         K1 = self.rbr_1x1.conv.weight
-        t3 = (self.rbr_dense.bn.weight / ((self.rbr_dense.bn.running_var + self.rbr_dense.bn.eps).sqrt())).reshape(-1, 1, 1, 1).detach()
-        t1 = (self.rbr_1x1.bn.weight / ((self.rbr_1x1.bn.running_var + self.rbr_1x1.bn.eps).sqrt())).reshape(-1, 1, 1, 1).detach()
-
-        l2_loss_circle = (K3 ** 2).sum() - (K3[:, :, 1:2, 1:2] ** 2).sum()      # The L2 loss of the "circle" of weights in 3x3 kernel. Use regular L2 on them.
-        eq_kernel = K3[:, :, 1:2, 1:2] * t3 + K1 * t1                           # The equivalent resultant central point of 3x3 kernel.
-        l2_loss_eq_kernel = (eq_kernel ** 2 / (t3 ** 2 + t1 ** 2)).sum()        # Normalize for an L2 coefficient comparable to regular L2.
+        t3 = (self.rbr_dense.bn.weight /
+              ((self.rbr_dense.bn.running_var +
+                self.rbr_dense.bn.eps).sqrt())).reshape(-1, 1, 1, 1).detach()
+        t1 = (self.rbr_1x1.bn.weight / ((self.rbr_1x1.bn.running_var +
+                                         self.rbr_1x1.bn.eps).sqrt())).reshape(
+                                             -1, 1, 1, 1).detach()
+
+        l2_loss_circle = (K3**2).sum() - (K3[:, :, 1:2, 1:2]**2).sum(
+        )  # The L2 loss of the "circle" of weights in 3x3 kernel. Use regular L2 on them.
+        eq_kernel = K3[:, :, 1:2, 1:
+                       2] * t3 + K1 * t1  # The equivalent resultant central point of 3x3 kernel.
+        l2_loss_eq_kernel = (eq_kernel**2 / (t3**2 + t1**2)).sum(
+        )  # Normalize for an L2 coefficient comparable to regular L2.
         return l2_loss_eq_kernel + l2_loss_circle
 
-
-
     #   This func derives the equivalent kernel and bias in a DIFFERENTIABLE way.
     #   You can get the equivalent kernel and bias at any time and do whatever you want,
-        #   for example, apply some penalties or constraints during training, just like you do to the other models.
+    #   for example, apply some penalties or constraints during training, just like you do to the other models.
     #   May be useful for quantization or pruning.
     def get_equivalent_kernel_bias(self):
         kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense)
         kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1)
         kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity)
-        return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid
+        return kernel3x3 + self._pad_1x1_to_3x3_tensor(
+            kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid
 
     def _pad_1x1_to_3x3_tensor(self, kernel1x1):
         if kernel1x1 is None:
             return 0
         else:
-            return torch.nn.functional.pad(kernel1x1, [1,1,1,1])
+            return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1])
 
     def _fuse_bn_tensor(self, branch):
         if branch is None:
@@ -112,10 +163,12 @@ def _fuse_bn_tensor(self, branch):
             assert isinstance(branch, nn.BatchNorm2d)
             if not hasattr(self, 'id_tensor'):
                 input_dim = self.in_channels // self.groups
-                kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), dtype=np.float32)
+                kernel_value = np.zeros((self.in_channels, input_dim, 3, 3),
+                                        dtype=np.float32)
                 for i in range(self.in_channels):
                     kernel_value[i, i % input_dim, 1, 1] = 1
-                self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device)
+                self.id_tensor = torch.from_numpy(kernel_value).to(
+                    branch.weight.device)
             kernel = self.id_tensor
             running_mean = branch.running_mean
             running_var = branch.running_var
@@ -130,9 +183,15 @@ def switch_to_deploy(self):
         if hasattr(self, 'rbr_reparam'):
             return
         kernel, bias = self.get_equivalent_kernel_bias()
-        self.rbr_reparam = nn.Conv2d(in_channels=self.rbr_dense.conv.in_channels, out_channels=self.rbr_dense.conv.out_channels,
-                                     kernel_size=self.rbr_dense.conv.kernel_size, stride=self.rbr_dense.conv.stride,
-                                     padding=self.rbr_dense.conv.padding, dilation=self.rbr_dense.conv.dilation, groups=self.rbr_dense.conv.groups, bias=True)
+        self.rbr_reparam = nn.Conv2d(
+            in_channels=self.rbr_dense.conv.in_channels,
+            out_channels=self.rbr_dense.conv.out_channels,
+            kernel_size=self.rbr_dense.conv.kernel_size,
+            stride=self.rbr_dense.conv.stride,
+            padding=self.rbr_dense.conv.padding,
+            dilation=self.rbr_dense.conv.dilation,
+            groups=self.rbr_dense.conv.groups,
+            bias=True)
         self.rbr_reparam.weight.data = kernel
         self.rbr_reparam.bias.data = bias
         for para in self.parameters():
@@ -145,9 +204,18 @@ def switch_to_deploy(self):
             self.__delattr__('id_tensor')
         self.deploy = True
 
+
 class ConvBNAct(nn.Module):
     '''Normal Conv with SiLU activation'''
-    def __init__(self, in_channels, out_channels, kernel_size, stride, groups=1, bias=False, act='relu'):
+
+    def __init__(self,
+                 in_channels,
+                 out_channels,
+                 kernel_size,
+                 stride,
+                 groups=1,
+                 bias=False,
+                 act='relu'):
         super().__init__()
         padding = kernel_size // 2
         self.conv = nn.Conv2d(
@@ -161,8 +229,8 @@ def __init__(self, in_channels, out_channels, kernel_size, stride, groups=1, bia
         )
         self.bn = nn.BatchNorm2d(out_channels)
 
-        if act =='relu':
-            self.act=nn.ReLU()
+        if act == 'relu':
+            self.act = nn.ReLU()
         if act == 'silu':
             self.act = nn.SiLU()
 
@@ -172,22 +240,55 @@ def forward(self, x):
     def forward_fuse(self, x):
         return self.act(self.conv(x))
 
+
 class ConvBNReLU(ConvBNAct):
-    def __init__(self, in_channels, out_channels, kernel_size, stride, groups=1, bias=False):
-        super().__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, groups=groups, bias=bias, act='relu')
+
+    def __init__(self,
+                 in_channels,
+                 out_channels,
+                 kernel_size,
+                 stride,
+                 groups=1,
+                 bias=False):
+        super().__init__(
+            in_channels=in_channels,
+            out_channels=out_channels,
+            kernel_size=kernel_size,
+            stride=stride,
+            groups=groups,
+            bias=bias,
+            act='relu')
+
 
 class ConvBNSiLU(ConvBNAct):
-    def __init__(self, in_channels, out_channels, kernel_size, stride, groups=1, bias=False):
-        super().__init__(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, groups=groups, bias=bias, act='silu')
+
+    def __init__(self,
+                 in_channels,
+                 out_channels,
+                 kernel_size,
+                 stride,
+                 groups=1,
+                 bias=False):
+        super().__init__(
+            in_channels=in_channels,
+            out_channels=out_channels,
+            kernel_size=kernel_size,
+            stride=stride,
+            groups=groups,
+            bias=bias,
+            act='silu')
+
 
 class MT_SPPF(nn.Module):
     '''Simplified SPPF with ReLU activation'''
+
     def __init__(self, in_channels, out_channels, kernel_size=5):
         super().__init__()
         c_ = in_channels // 2  # hidden channels
         self.cv1 = ConvBNReLU(in_channels, c_, 1, 1)
         self.cv2 = ConvBNReLU(c_ * 4, out_channels, 1, 1)
-        self.maxpool = nn.MaxPool2d(kernel_size=kernel_size, stride=1, padding=kernel_size // 2)
+        self.maxpool = nn.MaxPool2d(
+            kernel_size=kernel_size, stride=1, padding=kernel_size // 2)
 
     def forward(self, x):
         x = self.cv1(x)
@@ -197,6 +298,7 @@ def forward(self, x):
             y2 = self.maxpool(y1)
             return self.cv2(torch.cat([x, y1, y2, self.maxpool(y2)], 1))
 
+
 class RepVGGYOLOX(nn.Module):
     ''' RepVGG with MT_SPPF to build a efficient Yolox backbone
     '''
@@ -212,10 +314,13 @@ def __init__(
         channels_list_backbone = [64, 128, 256, 512, 1024]
         num_repeat_neck = [12, 12, 12, 12]
         channels_list_neck = [256, 128, 128, 256, 256, 512]
-        num_repeats = [(max(round(i * depth), 1) if i > 1 else i) for i in
-                        (num_repeat_backbone + num_repeat_neck)]
+        num_repeats = [(max(round(i * depth), 1) if i > 1 else i)
+                       for i in (num_repeat_backbone + num_repeat_neck)]
 
-        channels_list = [make_divisible(i * width, 8) for i in (channels_list_backbone + channels_list_neck)]
+        channels_list = [
+            make_divisible(i * width, 8)
+            for i in (channels_list_backbone + channels_list_neck)
+        ]
 
         assert channels_list is not None
         assert num_repeats is not None
@@ -224,12 +329,15 @@ def __init__(
             in_channels=in_channels,
             out_channels=channels_list[0],
             kernel_size=3,
-            stride=2
-        )
-        self.stage1 = self._make_stage(channels_list[0], channels_list[1], num_repeats[1])
-        self.stage2 = self._make_stage(channels_list[1], channels_list[2], num_repeats[2])
-        self.stage3 = self._make_stage(channels_list[2], channels_list[3], num_repeats[3])
-        self.stage4 = self._make_stage(channels_list[3], channels_list[4], num_repeats[4], add_ppf=True)
+            stride=2)
+        self.stage1 = self._make_stage(channels_list[0], channels_list[1],
+                                       num_repeats[1])
+        self.stage2 = self._make_stage(channels_list[1], channels_list[2],
+                                       num_repeats[2])
+        self.stage3 = self._make_stage(channels_list[2], channels_list[3],
+                                       num_repeats[3])
+        self.stage4 = self._make_stage(
+            channels_list[3], channels_list[4], num_repeats[4], add_ppf=True)
 
         # self.ERBlock_2 = nn.Sequential(
         #     RepVGGBlock(
@@ -291,9 +399,16 @@ def __init__(
         #     )
         # )
 
-    def _make_stage(self, in_channels, out_channels, repeat, stride=2,  add_ppf=False):
-        blocks= []
-        blocks.append(RepVGGBlock(in_channels, out_channels, kernel_size=3, stride=stride))
+    def _make_stage(self,
+                    in_channels,
+                    out_channels,
+                    repeat,
+                    stride=2,
+                    add_ppf=False):
+        blocks = []
+        blocks.append(
+            RepVGGBlock(
+                in_channels, out_channels, kernel_size=3, stride=stride))
         for i in range(repeat):
             blocks.append(RepVGGBlock(out_channels, out_channels))
         if add_ppf:
@@ -314,7 +429,7 @@ def forward(self, x):
         return tuple(outputs)
 
 
-if __name__=='__main__':
+if __name__ == '__main__':
 
     from torchsummaryX import summary
     import math
@@ -326,14 +441,17 @@ def forward(self, x):
     num_repeat_neck = [12, 12, 12, 12]
     channels_list_neck = [256, 128, 128, 256, 256, 512]
     channels = 3
-    num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i) for i in
-                  (num_repeat_backbone + num_repeat_neck)]
+    num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i)
+                  for i in (num_repeat_backbone + num_repeat_neck)]
 
     def make_divisible(x, divisor):
         # Upward revision the value x to make it evenly divisible by the divisor.
         return math.ceil(x / divisor) * divisor
 
-    channels_list = [make_divisible(i * width_mul, 8) for i in (channels_list_backbone + channels_list_neck)]
+    channels_list = [
+        make_divisible(i * width_mul, 8)
+        for i in (channels_list_backbone + channels_list_neck)
+    ]
     # from easycv.models.backbones.efficientrep import EfficientRep
     # model = EfficientRep(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat)
     # model = RepVGGYOLOX(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat)
@@ -344,5 +462,5 @@ def make_divisible(x, divisor):
 
     model = model.cuda()
 
-    a = torch.randn(1,3,640,640).cuda()
-    summary(model,a)
+    a = torch.randn(1, 3, 640, 640).cuda()
+    summary(model, a)
diff --git a/easycv/models/backbones/yolo6_blocks.py b/easycv/models/backbones/yolo6_blocks.py
index 0688ab8d..d04545f7 100644
--- a/easycv/models/backbones/yolo6_blocks.py
+++ b/easycv/models/backbones/yolo6_blocks.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python3
-# -*- coding:utf-8 -*-
 
 import warnings
 from pathlib import Path
@@ -11,6 +10,7 @@
 
 class SiLU(nn.Module):
     '''Activation of SiLU'''
+
     @staticmethod
     def forward(x):
         return x * torch.sigmoid(x)
@@ -18,7 +18,14 @@ def forward(x):
 
 class Conv(nn.Module):
     '''Normal Conv with SiLU activation'''
-    def __init__(self, in_channels, out_channels, kernel_size, stride, groups=1, bias=False):
+
+    def __init__(self,
+                 in_channels,
+                 out_channels,
+                 kernel_size,
+                 stride,
+                 groups=1,
+                 bias=False):
         super().__init__()
         padding = kernel_size // 2
         self.conv = nn.Conv2d(
@@ -42,7 +49,14 @@ def forward_fuse(self, x):
 
 class SimConv(nn.Module):
     '''Normal Conv with ReLU activation'''
-    def __init__(self, in_channels, out_channels, kernel_size, stride, groups=1, bias=False):
+
+    def __init__(self,
+                 in_channels,
+                 out_channels,
+                 kernel_size,
+                 stride,
+                 groups=1,
+                 bias=False):
         super().__init__()
         padding = kernel_size // 2
         self.conv = nn.Conv2d(
@@ -66,12 +80,14 @@ def forward_fuse(self, x):
 
 class SimSPPF(nn.Module):
     '''Simplified SPPF with ReLU activation'''
+
     def __init__(self, in_channels, out_channels, kernel_size=5):
         super().__init__()
         c_ = in_channels // 2  # hidden channels
         self.cv1 = SimConv(in_channels, c_, 1, 1)
         self.cv2 = SimConv(c_ * 4, out_channels, 1, 1)
-        self.m = nn.MaxPool2d(kernel_size=kernel_size, stride=1, padding=kernel_size // 2)
+        self.m = nn.MaxPool2d(
+            kernel_size=kernel_size, stride=1, padding=kernel_size // 2)
 
     def forward(self, x):
         x = self.cv1(x)
@@ -84,6 +100,7 @@ def forward(self, x):
 
 class Transpose(nn.Module):
     '''Normal Transpose, default for upsampling'''
+
     def __init__(self, in_channels, out_channels, kernel_size=2, stride=2):
         super().__init__()
         self.upsample_transpose = torch.nn.ConvTranspose2d(
@@ -91,14 +108,14 @@ def __init__(self, in_channels, out_channels, kernel_size=2, stride=2):
             out_channels=out_channels,
             kernel_size=kernel_size,
             stride=stride,
-            bias=True
-        )
+            bias=True)
 
     def forward(self, x):
         return self.upsample_transpose(x)
 
 
 class Concat(nn.Module):
+
     def __init__(self, dimension=1):
         super().__init__()
         self.d = dimension
@@ -110,8 +127,16 @@ def forward(self, x):
 def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
     '''Basic cell for rep-style block, including conv and bn'''
     result = nn.Sequential()
-    result.add_module('conv', nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
-                                                  kernel_size=kernel_size, stride=stride, padding=padding, groups=groups, bias=False))
+    result.add_module(
+        'conv',
+        nn.Conv2d(
+            in_channels=in_channels,
+            out_channels=out_channels,
+            kernel_size=kernel_size,
+            stride=stride,
+            padding=padding,
+            groups=groups,
+            bias=False))
     result.add_module('bn', nn.BatchNorm2d(num_features=out_channels))
     return result
 
@@ -120,10 +145,12 @@ class RepBlock(nn.Module):
     '''
         RepBlock is a stage block with rep-style basic block
     '''
+
     def __init__(self, in_channels, out_channels, n=1):
         super().__init__()
         self.conv1 = RepVGGBlock(in_channels, out_channels)
-        self.block = nn.Sequential(*(RepVGGBlock(out_channels, out_channels) for _ in range(n - 1))) if n > 1 else None
+        self.block = nn.Sequential(*(RepVGGBlock(out_channels, out_channels)
+                                     for _ in range(n - 1))) if n > 1 else None
 
     def forward(self, x):
         x = self.conv1(x)
@@ -136,8 +163,18 @@ class RepVGGBlock(nn.Module):
     '''RepVGGBlock is a basic rep-style block, including training and deploy status
     This code is based on https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py
     '''
-    def __init__(self, in_channels, out_channels, kernel_size=3,
-                 stride=1, padding=1, dilation=1, groups=1, padding_mode='zeros', deploy=False, use_se=False):
+
+    def __init__(self,
+                 in_channels,
+                 out_channels,
+                 kernel_size=3,
+                 stride=1,
+                 padding=1,
+                 dilation=1,
+                 groups=1,
+                 padding_mode='zeros',
+                 deploy=False,
+                 use_se=False):
         super(RepVGGBlock, self).__init__()
         """ Intialization of the class.
         Args:
@@ -167,18 +204,40 @@ def __init__(self, in_channels, out_channels, kernel_size=3,
         self.nonlinearity = nn.ReLU()
 
         if use_se:
-            raise NotImplementedError("se block not supported yet")
+            raise NotImplementedError('se block not supported yet')
         else:
             self.se = nn.Identity()
 
         if deploy:
-            self.rbr_reparam = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride,
-                                         padding=padding, dilation=dilation, groups=groups, bias=True, padding_mode=padding_mode)
+            self.rbr_reparam = nn.Conv2d(
+                in_channels=in_channels,
+                out_channels=out_channels,
+                kernel_size=kernel_size,
+                stride=stride,
+                padding=padding,
+                dilation=dilation,
+                groups=groups,
+                bias=True,
+                padding_mode=padding_mode)
 
         else:
-            self.rbr_identity = nn.BatchNorm2d(num_features=in_channels) if out_channels == in_channels and stride == 1 else None
-            self.rbr_dense = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, groups=groups)
-            self.rbr_1x1 = conv_bn(in_channels=in_channels, out_channels=out_channels, kernel_size=1, stride=stride, padding=padding_11, groups=groups)
+            self.rbr_identity = nn.BatchNorm2d(
+                num_features=in_channels
+            ) if out_channels == in_channels and stride == 1 else None
+            self.rbr_dense = conv_bn(
+                in_channels=in_channels,
+                out_channels=out_channels,
+                kernel_size=kernel_size,
+                stride=stride,
+                padding=padding,
+                groups=groups)
+            self.rbr_1x1 = conv_bn(
+                in_channels=in_channels,
+                out_channels=out_channels,
+                kernel_size=1,
+                stride=stride,
+                padding=padding_11,
+                groups=groups)
 
     def forward(self, inputs):
         '''Forward process'''
@@ -190,13 +249,15 @@ def forward(self, inputs):
         else:
             id_out = self.rbr_identity(inputs)
 
-        return self.nonlinearity(self.se(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out))
+        return self.nonlinearity(
+            self.se(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out))
 
     def get_equivalent_kernel_bias(self):
         kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense)
         kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1)
         kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity)
-        return kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid
+        return kernel3x3 + self._pad_1x1_to_3x3_tensor(
+            kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid
 
     def _pad_1x1_to_3x3_tensor(self, kernel1x1):
         if kernel1x1 is None:
@@ -218,10 +279,12 @@ def _fuse_bn_tensor(self, branch):
             assert isinstance(branch, nn.BatchNorm2d)
             if not hasattr(self, 'id_tensor'):
                 input_dim = self.in_channels // self.groups
-                kernel_value = np.zeros((self.in_channels, input_dim, 3, 3), dtype=np.float32)
+                kernel_value = np.zeros((self.in_channels, input_dim, 3, 3),
+                                        dtype=np.float32)
                 for i in range(self.in_channels):
                     kernel_value[i, i % input_dim, 1, 1] = 1
-                self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device)
+                self.id_tensor = torch.from_numpy(kernel_value).to(
+                    branch.weight.device)
             kernel = self.id_tensor
             running_mean = branch.running_mean
             running_var = branch.running_var
@@ -236,9 +299,15 @@ def switch_to_deploy(self):
         if hasattr(self, 'rbr_reparam'):
             return
         kernel, bias = self.get_equivalent_kernel_bias()
-        self.rbr_reparam = nn.Conv2d(in_channels=self.rbr_dense.conv.in_channels, out_channels=self.rbr_dense.conv.out_channels,
-                                     kernel_size=self.rbr_dense.conv.kernel_size, stride=self.rbr_dense.conv.stride,
-                                     padding=self.rbr_dense.conv.padding, dilation=self.rbr_dense.conv.dilation, groups=self.rbr_dense.conv.groups, bias=True)
+        self.rbr_reparam = nn.Conv2d(
+            in_channels=self.rbr_dense.conv.in_channels,
+            out_channels=self.rbr_dense.conv.out_channels,
+            kernel_size=self.rbr_dense.conv.kernel_size,
+            stride=self.rbr_dense.conv.stride,
+            padding=self.rbr_dense.conv.padding,
+            dilation=self.rbr_dense.conv.dilation,
+            groups=self.rbr_dense.conv.groups,
+            bias=True)
         self.rbr_reparam.weight.data = kernel
         self.rbr_reparam.bias.data = bias
         for para in self.parameters():
@@ -250,4 +319,3 @@ def switch_to_deploy(self):
         if hasattr(self, 'id_tensor'):
             self.__delattr__('id_tensor')
         self.deploy = True
-
diff --git a/easycv/models/detection/detectors/yolox/ASFF.py b/easycv/models/detection/detectors/yolox/ASFF.py
index c163c4fb..d9236125 100644
--- a/easycv/models/detection/detectors/yolox/ASFF.py
+++ b/easycv/models/detection/detectors/yolox/ASFF.py
@@ -1,14 +1,17 @@
 import torch
 import torch.nn as nn
 import torch.nn.functional as F
+
 from easycv.models.backbones.network_blocks import SiLU
 
+
 def autopad(k, p=None):  # kernel, padding
     # Pad to 'same'
     if p is None:
         p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad
     return p
 
+
 def get_activation(name='silu', inplace=True):
     if name == 'silu':
         module = SiLU(inplace=inplace)
@@ -23,9 +26,17 @@ def get_activation(name='silu', inplace=True):
 
 class Conv(nn.Module):
     # Standard convolution
-    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act='silu'):  # ch_in, ch_out, kernel, stride, padding, groups
+    def __init__(self,
+                 c1,
+                 c2,
+                 k=1,
+                 s=1,
+                 p=None,
+                 g=1,
+                 act='silu'):  # ch_in, ch_out, kernel, stride, padding, groups
         super(Conv, self).__init__()
-        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
+        self.conv = nn.Conv2d(
+            c1, c2, k, s, autopad(k, p), groups=g, bias=False)
         self.bn = nn.BatchNorm2d(c2)
         self.act = get_activation(act, inplace=True)
 
@@ -37,7 +48,14 @@ def forward_fuse(self, x):
 
 
 class ASFF(nn.Module):
-    def __init__(self, level, multiplier=1, asff_channel=16, rfb=False, vis=False, act='silu'):
+
+    def __init__(self,
+                 level,
+                 multiplier=1,
+                 asff_channel=16,
+                 rfb=False,
+                 vis=False,
+                 act='silu'):
         """
         multiplier should be 1, 0.5
         which means, the channel of ASFF can be
@@ -47,45 +65,46 @@ def __init__(self, level, multiplier=1, asff_channel=16, rfb=False, vis=False, a
         """
         super(ASFF, self).__init__()
         self.level = level
-        self.dim = [int(1024 * multiplier), int(512 * multiplier),
-                    int(256 * multiplier)]
-
+        self.dim = [
+            int(1024 * multiplier),
+            int(512 * multiplier),
+            int(256 * multiplier)
+        ]
 
         self.inter_dim = self.dim[self.level]
         if level == 0:
-            self.stride_level_1 = Conv(int(512 * multiplier), self.inter_dim, 3, 2,act=act)
+            self.stride_level_1 = Conv(
+                int(512 * multiplier), self.inter_dim, 3, 2, act=act)
 
-            self.stride_level_2 = Conv(int(256 * multiplier), self.inter_dim, 3, 2,act=act)
+            self.stride_level_2 = Conv(
+                int(256 * multiplier), self.inter_dim, 3, 2, act=act)
 
-            self.expand = Conv(self.inter_dim, int(
-                1024 * multiplier), 3, 1, act=act)
+            self.expand = Conv(
+                self.inter_dim, int(1024 * multiplier), 3, 1, act=act)
         elif level == 1:
             self.compress_level_0 = Conv(
-                int(1024 * multiplier), self.inter_dim, 1, 1,act=act)
+                int(1024 * multiplier), self.inter_dim, 1, 1, act=act)
             self.stride_level_2 = Conv(
-                int(256 * multiplier), self.inter_dim, 3, 2,act=act)
-            self.expand = Conv(self.inter_dim, int(512 * multiplier), 3, 1,act=act)
+                int(256 * multiplier), self.inter_dim, 3, 2, act=act)
+            self.expand = Conv(
+                self.inter_dim, int(512 * multiplier), 3, 1, act=act)
         elif level == 2:
             self.compress_level_0 = Conv(
-                int(1024 * multiplier), self.inter_dim, 1, 1,act=act)
+                int(1024 * multiplier), self.inter_dim, 1, 1, act=act)
             self.compress_level_1 = Conv(
-                int(512 * multiplier), self.inter_dim, 1, 1,act=act)
-            self.expand = Conv(self.inter_dim, int(
-                256 * multiplier), 3, 1,act=act)
+                int(512 * multiplier), self.inter_dim, 1, 1, act=act)
+            self.expand = Conv(
+                self.inter_dim, int(256 * multiplier), 3, 1, act=act)
 
         # when adding rfb, we use half number of channels to save memory
         # compress_c = 8 if rfb else 16
         compress_c = asff_channel
 
-        self.weight_level_0 = Conv(
-            self.inter_dim, compress_c, 1, 1,act=act)
-        self.weight_level_1 = Conv(
-            self.inter_dim, compress_c, 1, 1,act=act)
-        self.weight_level_2 = Conv(
-            self.inter_dim, compress_c, 1, 1,act=act)
+        self.weight_level_0 = Conv(self.inter_dim, compress_c, 1, 1, act=act)
+        self.weight_level_1 = Conv(self.inter_dim, compress_c, 1, 1, act=act)
+        self.weight_level_2 = Conv(self.inter_dim, compress_c, 1, 1, act=act)
 
-        self.weight_levels = Conv(
-            compress_c * 3, 3, 1, 1,act=act)
+        self.weight_levels = Conv(compress_c * 3, 3, 1, 1, act=act)
         self.vis = vis
 
     def forward(self, x):  # l,m,s
@@ -138,5 +157,3 @@ def forward(self, x):  # l,m,s
             return out, levels_weight, fused_out_reduced.sum(dim=1)
         else:
             return out
-
-
diff --git a/easycv/models/detection/detectors/yolox/ASFF_sim.py b/easycv/models/detection/detectors/yolox/ASFF_sim.py
index 9fa0cef4..107c474e 100644
--- a/easycv/models/detection/detectors/yolox/ASFF_sim.py
+++ b/easycv/models/detection/detectors/yolox/ASFF_sim.py
@@ -1,8 +1,8 @@
 import torch
 import torch.nn as nn
 import torch.nn.functional as F
-from easycv.models.backbones.network_blocks import SiLU
-from easycv.models.backbones.network_blocks import DWConv
+
+from easycv.models.backbones.network_blocks import DWConv, SiLU
 
 
 def autopad(k, p=None):  # kernel, padding
@@ -11,6 +11,7 @@ def autopad(k, p=None):  # kernel, padding
         p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad
     return p
 
+
 def get_activation(name='silu', inplace=True):
     if name == 'silu':
         # @ to do nn.SiLU 1.7.0
@@ -24,11 +25,20 @@ def get_activation(name='silu', inplace=True):
         raise AttributeError('Unsupported act type: {}'.format(name))
     return module
 
+
 class Conv(nn.Module):
     # Standard convolution
-    def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act='silu'):  # ch_in, ch_out, kernel, stride, padding, groups
+    def __init__(self,
+                 c1,
+                 c2,
+                 k=1,
+                 s=1,
+                 p=None,
+                 g=1,
+                 act='silu'):  # ch_in, ch_out, kernel, stride, padding, groups
         super(Conv, self).__init__()
-        self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
+        self.conv = nn.Conv2d(
+            c1, c2, k, s, autopad(k, p), groups=g, bias=False)
         self.bn = nn.BatchNorm2d(c2)
         self.act = get_activation(act, inplace=True)
 
@@ -38,8 +48,20 @@ def forward(self, x):
     def forward_fuse(self, x):
         return self.act(self.conv(x))
 
+
 class ASFF(nn.Module):
-    def __init__(self, level, multiplier=1, asff_channel=2, expand_kernel=3, down_rate = None, use_dconv = False, use_expand = True, rfb=False, vis=False, act='silu'):
+
+    def __init__(self,
+                 level,
+                 multiplier=1,
+                 asff_channel=2,
+                 expand_kernel=3,
+                 down_rate=None,
+                 use_dconv=False,
+                 use_expand=True,
+                 rfb=False,
+                 vis=False,
+                 act='silu'):
         """
         multiplier should be 1, 0.5
         which means, the channel of ASFF can be
@@ -49,8 +71,11 @@ def __init__(self, level, multiplier=1, asff_channel=2, expand_kernel=3, down_ra
         """
         super(ASFF, self).__init__()
         self.level = level
-        self.dim = [int(1024 * multiplier), int(512 * multiplier),
-                    int(256 * multiplier)]
+        self.dim = [
+            int(1024 * multiplier),
+            int(512 * multiplier),
+            int(256 * multiplier)
+        ]
 
         self.inter_dim = self.dim[self.level]
 
@@ -58,61 +83,106 @@ def __init__(self, level, multiplier=1, asff_channel=2, expand_kernel=3, down_ra
 
         if level == 0:
             if down_rate == None:
-                self.expand = Conv(self.inter_dim, int(
-                    1024 * multiplier), expand_kernel, 1, act=act)
+                self.expand = Conv(
+                    self.inter_dim,
+                    int(1024 * multiplier),
+                    expand_kernel,
+                    1,
+                    act=act)
             else:
                 if use_dconv:
-                    self.expand = DWConv(self.inter_dim, int(
-                        1024 * multiplier), expand_kernel, 1, act=act)
+                    self.expand = DWConv(
+                        self.inter_dim,
+                        int(1024 * multiplier),
+                        expand_kernel,
+                        1,
+                        act=act)
                 else:
                     self.expand = nn.Sequential(
-                        Conv(self.inter_dim, int(self.inter_dim//down_rate), 1, 1, act=act),
-                        Conv(int(self.inter_dim//down_rate), int(1024 * multiplier), 1, 1, act=act)
-                    )
+                        Conv(
+                            self.inter_dim,
+                            int(self.inter_dim // down_rate),
+                            1,
+                            1,
+                            act=act),
+                        Conv(
+                            int(self.inter_dim // down_rate),
+                            int(1024 * multiplier),
+                            1,
+                            1,
+                            act=act))
 
         elif level == 1:
             if down_rate == None:
-                self.expand = Conv(self.inter_dim, int(
-                    512 * multiplier), expand_kernel, 1, act=act)
+                self.expand = Conv(
+                    self.inter_dim,
+                    int(512 * multiplier),
+                    expand_kernel,
+                    1,
+                    act=act)
             else:
                 if use_dconv:
-                    self.expand = DWConv(self.inter_dim, int(
-                        512 * multiplier), expand_kernel, 1, act=act)
+                    self.expand = DWConv(
+                        self.inter_dim,
+                        int(512 * multiplier),
+                        expand_kernel,
+                        1,
+                        act=act)
                 else:
                     self.expand = nn.Sequential(
-                        Conv(self.inter_dim, int(self.inter_dim//down_rate), 1, 1, act=act),
-                        Conv(int(self.inter_dim//down_rate),
-                             int(512 * multiplier), 1, 1, act=act)
-                    )
+                        Conv(
+                            self.inter_dim,
+                            int(self.inter_dim // down_rate),
+                            1,
+                            1,
+                            act=act),
+                        Conv(
+                            int(self.inter_dim // down_rate),
+                            int(512 * multiplier),
+                            1,
+                            1,
+                            act=act))
 
         elif level == 2:
             if down_rate == None:
-                self.expand = Conv(self.inter_dim, int(
-                    256 * multiplier), expand_kernel, 1, act=act)
+                self.expand = Conv(
+                    self.inter_dim,
+                    int(256 * multiplier),
+                    expand_kernel,
+                    1,
+                    act=act)
             else:
                 if use_dconv:
-                    self.expand = DWConv(self.inter_dim, int(
-                        256 * multiplier), expand_kernel, 1, act=act)
+                    self.expand = DWConv(
+                        self.inter_dim,
+                        int(256 * multiplier),
+                        expand_kernel,
+                        1,
+                        act=act)
                 else:
                     self.expand = nn.Sequential(
-                        Conv(self.inter_dim, int(self.inter_dim//down_rate), 1, 1, act=act),
-                        Conv(int(self.inter_dim//down_rate),
-                             int(256 * multiplier), 1, 1, act=act)
-                    )
+                        Conv(
+                            self.inter_dim,
+                            int(self.inter_dim // down_rate),
+                            1,
+                            1,
+                            act=act),
+                        Conv(
+                            int(self.inter_dim // down_rate),
+                            int(256 * multiplier),
+                            1,
+                            1,
+                            act=act))
 
         # when adding rfb, we use half number of channels to save memory
         # compress_c = 8 if rfb else 16
         compress_c = asff_channel
 
-        self.weight_level_0 = Conv(
-            self.inter_dim, compress_c, 1, 1,act=act)
-        self.weight_level_1 = Conv(
-            self.inter_dim, compress_c, 1, 1,act=act)
-        self.weight_level_2 = Conv(
-            self.inter_dim, compress_c, 1, 1,act=act)
+        self.weight_level_0 = Conv(self.inter_dim, compress_c, 1, 1, act=act)
+        self.weight_level_1 = Conv(self.inter_dim, compress_c, 1, 1, act=act)
+        self.weight_level_2 = Conv(self.inter_dim, compress_c, 1, 1, act=act)
 
-        self.weight_levels = Conv(
-            compress_c * 3, 3, 1, 1,act=act)
+        self.weight_levels = Conv(compress_c * 3, 3, 1, 1, act=act)
         self.vis = vis
 
     def expand_channel(self, x):
@@ -134,9 +204,9 @@ def expand_channel(self, x):
 
     def mean_channel(self, x):
         # [b,c,h,w]->[b,c/4,h*2,w*2]
-        x1 = x[:,::2,:,:]
-        x2 = x[:,1::2,:,:]
-        return (x1+x2)/2
+        x1 = x[:, ::2, :, :]
+        x2 = x[:, 1::2, :, :]
+        return (x1 + x2) / 2
 
     def forward(self, x):  # l,m,s
         """
@@ -166,7 +236,8 @@ def forward(self, x):  # l,m,s
         elif self.level == 2:
             level_0_resized = F.interpolate(
                 x_level_0, scale_factor=4, mode='nearest')
-            level_0_resized = self.mean_channel(self.mean_channel(level_0_resized))
+            level_0_resized = self.mean_channel(
+                self.mean_channel(level_0_resized))
             level_1_resized = F.interpolate(
                 x_level_1, scale_factor=2, mode='nearest')
             level_1_resized = self.mean_channel(level_1_resized)
@@ -195,7 +266,8 @@ def forward(self, x):  # l,m,s
         else:
             return out
 
-if __name__=="__main__":
+
+if __name__ == '__main__':
     width = 0.5
     num_classes = 80
     in_channels = [256, 512, 1024]
@@ -203,12 +275,16 @@ def forward(self, x):  # l,m,s
     asff_channel = 2
     act = 'relu'
 
-    asff_1 = ASFF(level=0, multiplier=width, asff_channel=asff_channel, act=act).cuda()
-    asff_2 = ASFF(level=1, multiplier=width, asff_channel=asff_channel, act=act).cuda()
-    asff_3 = ASFF(level=2, multiplier=width, asff_channel=asff_channel, act=act).cuda()
+    asff_1 = ASFF(
+        level=0, multiplier=width, asff_channel=asff_channel, act=act).cuda()
+    asff_2 = ASFF(
+        level=1, multiplier=width, asff_channel=asff_channel, act=act).cuda()
+    asff_3 = ASFF(
+        level=2, multiplier=width, asff_channel=asff_channel, act=act).cuda()
 
-    input = (
-        torch.rand(1, 128, 80, 80).cuda(), torch.rand(1, 256, 40, 40).cuda(), torch.rand(1, 512, 20, 20).cuda())
+    input = (torch.rand(1, 128, 80, 80).cuda(), torch.rand(1, 256, 40,
+                                                           40).cuda(),
+             torch.rand(1, 512, 20, 20).cuda())
 
     # flops, params = get_model_complexity_info(asff_1, input, as_strings=True,
     #                                           print_per_layer_stat=True)
@@ -218,10 +294,9 @@ def forward(self, x):  # l,m,s
     # input = torch.randn(1, 3, 640, 640).cuda()
     # flops, params = profile(asff_1, inputs=(input,))
     # print('flops: {}, params: {}'.format(flops, params))
-    
+
     from torchsummaryX import summary
 
     summary(asff_1, input)
     summary(asff_2, input)
     summary(asff_3, input)
-
diff --git a/easycv/models/detection/detectors/yolox/__init__.py b/easycv/models/detection/detectors/yolox/__init__.py
index 9aaa408b..c2e4bf0d 100644
--- a/easycv/models/detection/detectors/yolox/__init__.py
+++ b/easycv/models/detection/detectors/yolox/__init__.py
@@ -1 +1 @@
-from .yolox import YOLOX
\ No newline at end of file
+from .yolox import YOLOX
diff --git a/easycv/models/detection/detectors/yolox/tood_head.py b/easycv/models/detection/detectors/yolox/tood_head.py
index 06044e78..db14b3e8 100644
--- a/easycv/models/detection/detectors/yolox/tood_head.py
+++ b/easycv/models/detection/detectors/yolox/tood_head.py
@@ -6,13 +6,12 @@
 import torch
 import torch.nn as nn
 import torch.nn.functional as F
+from mmcv.cnn import ConvModule, normal_init
 
-from easycv.models.backbones.repvgg_yolox_backbone import RepVGGBlock
 from easycv.models.backbones.network_blocks import BaseConv, DWConv
+from easycv.models.backbones.repvgg_yolox_backbone import RepVGGBlock
 from easycv.models.detection.utils import bboxes_iou
-from easycv.models.loss import IOUloss
-from easycv.models.loss import FocalLoss, VarifocalLoss
-from mmcv.cnn import ConvModule, normal_init
+from easycv.models.loss import FocalLoss, IOUloss, VarifocalLoss
 
 
 class TaskDecomposition(nn.Module):
@@ -74,7 +73,7 @@ def forward(self, feat, avg_feat=None):
         conv_weight = weight.reshape(
             b, 1, self.stacked_convs,
             1) * self.reduction_conv.conv.weight.reshape(
-            1, self.feat_channels, self.stacked_convs, self.feat_channels)
+                1, self.feat_channels, self.stacked_convs, self.feat_channels)
         conv_weight = conv_weight.reshape(b, self.feat_channels,
                                           self.in_channels)
         feat = feat.reshape(b, self.in_channels, h * w)
@@ -89,23 +88,24 @@ def forward(self, feat, avg_feat=None):
 
 class TOODHead(nn.Module):
 
-    def __init__(self,
-                 num_classes,
-                 width=1.0,
-                 strides=[8, 16, 32],
-                 in_channels=[256, 512, 1024],
-                 conv_type='repconv',
-                 act='silu',
-                 stage='CLOUD',
-                 obj_loss_type='l1',
-                 reg_loss_type='iou',
-                 stacked_convs=6,
-                 la_down_rate=8,
-                 conv_layers=2,
-                 decode_in_inference=True,
-                 conv_cfg=None,
-                 norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
-                 ):
+    def __init__(
+            self,
+            num_classes,
+            width=1.0,
+            strides=[8, 16, 32],
+            in_channels=[256, 512, 1024],
+            conv_type='repconv',
+            act='silu',
+            stage='CLOUD',
+            obj_loss_type='l1',
+            reg_loss_type='iou',
+            stacked_convs=6,
+            la_down_rate=8,
+            conv_layers=2,
+            decode_in_inference=True,
+            conv_cfg=None,
+            norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
+    ):
         """
         Args:
             num_classes (int): detection class numbers.
@@ -143,8 +143,10 @@ def __init__(self,
 
         default_conv_type_list = ['conv', 'dwconv', 'repconv']
         # Conv = DWConv if depthwise else BaseConv
-        if conv_type not in    default_conv_type_list:
-            logging.warning('YOLOX-PAI tood head conv_type must in [conv, dwconv, repconv], otherwise we use repconv as default')
+        if conv_type not in default_conv_type_list:
+            logging.warning(
+                'YOLOX-PAI tood head conv_type must in [conv, dwconv, repconv], otherwise we use repconv as default'
+            )
             conv_type = repconv
         if conv_type == 'conv':
             Conv = BaseConv
@@ -161,9 +163,8 @@ def __init__(self,
                     ksize=1,
                     stride=1,
                     act=act,
-                )
-            )
-            if conv_layers==2:
+                ))
+            if conv_layers == 2:
                 self.cls_convs.append(
                     nn.Sequential(*[
                         Conv(
@@ -195,7 +196,7 @@ def __init__(self,
                             act=act,
                         ),
                     ]))
-            elif conv_layers==1:
+            elif conv_layers == 1:
                 self.cls_convs.append(
                     nn.Sequential(*[
                         Conv(
@@ -238,16 +239,13 @@ def __init__(self,
                     padding=0,
                 ))
             self.cls_decomps.append(
-                TaskDecomposition(self.feat_channels,
-                                  self.stacked_convs,
+                TaskDecomposition(self.feat_channels, self.stacked_convs,
                                   self.stacked_convs * la_down_rate,
                                   self.conv_cfg, self.norm_cfg))
             self.reg_decomps.append(
-                TaskDecomposition(self.feat_channels,
-                                  self.stacked_convs,
+                TaskDecomposition(self.feat_channels, self.stacked_convs,
                                   self.stacked_convs * la_down_rate,
-                                  self.conv_cfg, self.norm_cfg)
-            )
+                                  self.conv_cfg, self.norm_cfg))
 
         for i in range(self.stacked_convs):
             conv_cfg = self.conv_cfg
@@ -257,8 +255,7 @@ def __init__(self,
                     in_channels=chn,
                     out_channels=chn,
                     act=act,
-                )
-            )
+                ))
             # self.inter_convs.append(
             #     ConvModule(
             #         chn,
@@ -284,7 +281,7 @@ def __init__(self,
         elif obj_loss_type == 'v_focal':
             self.obj_loss = VarifocalLoss(reduction='none')
         else:
-            assert "Undefined loss type: {}".format(obj_loss_type)
+            assert 'Undefined loss type: {}'.format(obj_loss_type)
 
         self.strides = strides
         self.grids = [torch.zeros(1)] * len(in_channels)
@@ -307,8 +304,10 @@ def forward(self, xin, labels=None, imgs=None):
         y_shifts = []
         expanded_strides = []
 
-        for k, (cls_decomp, reg_decomp, cls_conv, reg_conv, stride_this_level, x) in enumerate(
-                zip(self.cls_decomps, self.reg_decomps, self.cls_convs, self.reg_convs, self.strides, xin)):
+        for k, (cls_decomp, reg_decomp, cls_conv, reg_conv, stride_this_level,
+                x) in enumerate(
+                    zip(self.cls_decomps, self.reg_decomps, self.cls_convs,
+                        self.reg_convs, self.strides, xin)):
             x = self.stems[k](x)
 
             inter_feats = []
@@ -337,7 +336,7 @@ def forward(self, xin, labels=None, imgs=None):
                 expanded_strides.append(
                     torch.zeros(
                         1, grid.shape[1]).fill_(stride_this_level).type_as(
-                        xin[0]))
+                            xin[0]))
                 if self.use_l1:
                     batch_size = reg_output.shape[0]
                     hsize, wsize = reg_output.shape[-2:]
@@ -424,15 +423,15 @@ def decode_outputs(self, outputs, dtype):
         return outputs
 
     def get_losses(
-            self,
-            imgs,
-            x_shifts,
-            y_shifts,
-            expanded_strides,
-            labels,
-            outputs,
-            origin_preds,
-            dtype,
+        self,
+        imgs,
+        x_shifts,
+        y_shifts,
+        expanded_strides,
+        labels,
+        outputs,
+        origin_preds,
+        dtype,
     ):
         bbox_preds = outputs[:, :, :4]  # [batch, n_anchors_all, 4]
         obj_preds = outputs[:, :, 4].unsqueeze(-1)  # [batch, n_anchors_all, 1]
@@ -565,9 +564,8 @@ def get_losses(
             bbox_preds.view(-1, 4)[fg_masks], reg_targets)).sum() / num_fg
 
         if self.obj_loss_type == 'focal':
-            loss_obj = (
-                           self.focal_loss(obj_preds.sigmoid().view(-1, 1), obj_targets)
-                       ).sum() / num_fg
+            loss_obj = (self.focal_loss(obj_preds.sigmoid().view(-1, 1),
+                                        obj_targets)).sum() / num_fg
         else:
             loss_obj = (self.obj_loss(obj_preds.view(-1, 1),
                                       obj_targets)).sum() / num_fg
@@ -596,8 +594,10 @@ def get_losses(
     def focal_loss(self, pred, gt):
         pos_inds = gt.eq(1).float()
         neg_inds = gt.eq(0).float()
-        pos_loss = torch.log(pred + 1e-5) * torch.pow(1 - pred, 2) * pos_inds * 0.75
-        neg_loss = torch.log(1 - pred + 1e-5) * torch.pow(pred, 2) * neg_inds * 0.25
+        pos_loss = torch.log(pred + 1e-5) * torch.pow(1 - pred,
+                                                      2) * pos_inds * 0.75
+        neg_loss = torch.log(1 - pred + 1e-5) * torch.pow(pred,
+                                                          2) * neg_inds * 0.25
         loss = -(pos_loss + neg_loss)
         return loss
 
@@ -616,22 +616,22 @@ def get_l1_target(self,
 
     @torch.no_grad()
     def get_assignments(
-            self,
-            batch_idx,
-            num_gt,
-            total_num_anchors,
-            gt_bboxes_per_image,
-            gt_classes,
-            bboxes_preds_per_image,
-            expanded_strides,
-            x_shifts,
-            y_shifts,
-            cls_preds,
-            bbox_preds,
-            obj_preds,
-            labels,
-            imgs,
-            mode='gpu',
+        self,
+        batch_idx,
+        num_gt,
+        total_num_anchors,
+        gt_bboxes_per_image,
+        gt_classes,
+        bboxes_preds_per_image,
+        expanded_strides,
+        x_shifts,
+        y_shifts,
+        cls_preds,
+        bbox_preds,
+        obj_preds,
+        labels,
+        imgs,
+        mode='gpu',
     ):
 
         if mode == 'cpu':
@@ -695,7 +695,7 @@ def get_assignments(
         gt_cls_per_image = (
             F.one_hot(gt_classes.to(torch.int64),
                       self.num_classes).float().unsqueeze(1).repeat(
-                1, num_in_boxes_anchor, 1))
+                          1, num_in_boxes_anchor, 1))
         pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8)
 
         if mode == 'cpu':
@@ -704,27 +704,27 @@ def get_assignments(
         if LooseVersion(torch.__version__) >= LooseVersion('1.6.0'):
             with torch.cuda.amp.autocast(enabled=False):
                 cls_preds_ = (
-                        cls_preds_.float().unsqueeze(0).repeat(num_gt, 1,
-                                                               1).sigmoid_() *
-                        obj_preds_.float().unsqueeze(0).repeat(num_gt, 1,
-                                                               1).sigmoid_())
+                    cls_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                           1).sigmoid_() *
+                    obj_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                           1).sigmoid_())
                 pair_wise_cls_loss = F.binary_cross_entropy(
                     cls_preds_.sqrt_(), gt_cls_per_image,
                     reduction='none').sum(-1)
         else:
             cls_preds_ = (
-                    cls_preds_.float().unsqueeze(0).repeat(num_gt, 1,
-                                                           1).sigmoid_() *
-                    obj_preds_.float().unsqueeze(0).repeat(num_gt, 1,
-                                                           1).sigmoid_())
+                cls_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                       1).sigmoid_() *
+                obj_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                       1).sigmoid_())
             pair_wise_cls_loss = F.binary_cross_entropy(
                 cls_preds_.sqrt_(), gt_cls_per_image, reduction='none').sum(-1)
 
         del cls_preds_
 
         cost = (
-                pair_wise_cls_loss + 3.0 * pair_wise_ious_loss + 100000.0 *
-                (~is_in_boxes_and_center))
+            pair_wise_cls_loss + 3.0 * pair_wise_ious_loss + 100000.0 *
+            (~is_in_boxes_and_center))
 
         (
             num_fg,
@@ -751,13 +751,13 @@ def get_assignments(
         )
 
     def get_in_boxes_info(
-            self,
-            gt_bboxes_per_image,
-            expanded_strides,
-            x_shifts,
-            y_shifts,
-            total_num_anchors,
-            num_gt,
+        self,
+        gt_bboxes_per_image,
+        expanded_strides,
+        x_shifts,
+        y_shifts,
+        total_num_anchors,
+        num_gt,
     ):
         expanded_strides_per_image = expanded_strides[0]
         x_shifts_per_image = x_shifts[0] * expanded_strides_per_image
@@ -773,19 +773,19 @@ def get_in_boxes_info(
         gt_bboxes_per_image_l = (
             (gt_bboxes_per_image[:, 0] -
              0.5 * gt_bboxes_per_image[:, 2]).unsqueeze(1).repeat(
-                1, total_num_anchors))
+                 1, total_num_anchors))
         gt_bboxes_per_image_r = (
             (gt_bboxes_per_image[:, 0] +
              0.5 * gt_bboxes_per_image[:, 2]).unsqueeze(1).repeat(
-                1, total_num_anchors))
+                 1, total_num_anchors))
         gt_bboxes_per_image_t = (
             (gt_bboxes_per_image[:, 1] -
              0.5 * gt_bboxes_per_image[:, 3]).unsqueeze(1).repeat(
-                1, total_num_anchors))
+                 1, total_num_anchors))
         gt_bboxes_per_image_b = (
             (gt_bboxes_per_image[:, 1] +
              0.5 * gt_bboxes_per_image[:, 3]).unsqueeze(1).repeat(
-                1, total_num_anchors))
+                 1, total_num_anchors))
 
         b_l = x_centers_per_image - gt_bboxes_per_image_l
         b_r = gt_bboxes_per_image_r - x_centers_per_image
@@ -800,21 +800,21 @@ def get_in_boxes_info(
         center_radius = 2.5
 
         gt_bboxes_per_image_l = (
-                                    gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
-            1, total_num_anchors
-        ) - center_radius * expanded_strides_per_image.unsqueeze(0)
+            gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
+                1, total_num_anchors
+            ) - center_radius * expanded_strides_per_image.unsqueeze(0)
         gt_bboxes_per_image_r = (
-                                    gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
-            1, total_num_anchors
-        ) + center_radius * expanded_strides_per_image.unsqueeze(0)
+            gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
+                1, total_num_anchors
+            ) + center_radius * expanded_strides_per_image.unsqueeze(0)
         gt_bboxes_per_image_t = (
-                                    gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
-            1, total_num_anchors
-        ) - center_radius * expanded_strides_per_image.unsqueeze(0)
+            gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
+                1, total_num_anchors
+            ) - center_radius * expanded_strides_per_image.unsqueeze(0)
         gt_bboxes_per_image_b = (
-                                    gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
-            1, total_num_anchors
-        ) + center_radius * expanded_strides_per_image.unsqueeze(0)
+            gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
+                1, total_num_anchors
+            ) + center_radius * expanded_strides_per_image.unsqueeze(0)
 
         c_l = x_centers_per_image - gt_bboxes_per_image_l
         c_r = gt_bboxes_per_image_r - x_centers_per_image
@@ -828,8 +828,8 @@ def get_in_boxes_info(
         is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all
 
         is_in_boxes_and_center = (
-                is_in_boxes[:, is_in_boxes_anchor]
-                & is_in_centers[:, is_in_boxes_anchor])
+            is_in_boxes[:, is_in_boxes_anchor]
+            & is_in_centers[:, is_in_boxes_anchor])
         return is_in_boxes_anchor, is_in_boxes_and_center
 
     def dynamic_k_matching(self, cost, pair_wise_ious, gt_classes, num_gt,
diff --git a/easycv/models/detection/detectors/yolox/yolo_head.py b/easycv/models/detection/detectors/yolox/yolo_head.py
index a93ef79c..fa092fc0 100644
--- a/easycv/models/detection/detectors/yolox/yolo_head.py
+++ b/easycv/models/detection/detectors/yolox/yolo_head.py
@@ -9,8 +9,7 @@
 
 from easycv.models.backbones.network_blocks import BaseConv, DWConv
 from easycv.models.detection.utils import bboxes_iou
-from easycv.models.loss import IOUloss
-from easycv.models.loss import FocalLoss,VarifocalLoss
+from easycv.models.loss import FocalLoss, IOUloss, VarifocalLoss
 
 
 class YOLOXHead(nn.Module):
@@ -120,7 +119,6 @@ def __init__(self,
                     padding=0,
                 ))
 
-
         self.bcewithlog_loss = nn.BCEWithLogitsLoss(reduction='none')
 
         # if reg_loss_type=='l1':
@@ -129,7 +127,7 @@ def __init__(self,
         # else:
         #     self.use_l1 = False
 
-        self.iou_loss = IOUloss(reduction='none',loss_type=reg_loss_type)
+        self.iou_loss = IOUloss(reduction='none', loss_type=reg_loss_type)
 
         self.obj_loss_type = obj_loss_type
         if obj_loss_type == 'BCE':
@@ -140,7 +138,7 @@ def __init__(self,
         elif obj_loss_type == 'v_focal':
             self.obj_loss = VarifocalLoss(reduction='none')
         else:
-            assert "Undefined loss type: {}".format(obj_loss_type)
+            assert 'Undefined loss type: {}'.format(obj_loss_type)
 
         self.strides = strides
         self.grids = [torch.zeros(1)] * len(in_channels)
@@ -413,12 +411,11 @@ def get_losses(
             bbox_preds.view(-1, 4)[fg_masks], reg_targets)).sum() / num_fg
 
         if self.obj_loss_type == 'focal':
-            loss_obj = (
-                           self.focal_loss(obj_preds.sigmoid().view(-1, 1), obj_targets)
-                       ).sum() / num_fg
+            loss_obj = (self.focal_loss(obj_preds.sigmoid().view(-1, 1),
+                                        obj_targets)).sum() / num_fg
         else:
             loss_obj = (self.obj_loss(obj_preds.view(-1, 1),
-                                             obj_targets)).sum() / num_fg
+                                      obj_targets)).sum() / num_fg
         loss_cls = (self.bcewithlog_loss(
             cls_preds.view(-1, self.num_classes)[fg_masks],
             cls_targets)).sum() / num_fg
@@ -444,12 +441,13 @@ def get_losses(
     def focal_loss(self, pred, gt):
         pos_inds = gt.eq(1).float()
         neg_inds = gt.eq(0).float()
-        pos_loss = torch.log(pred + 1e-5) * torch.pow(1 - pred, 2) * pos_inds * 0.75
-        neg_loss = torch.log(1 - pred + 1e-5) * torch.pow(pred, 2) * neg_inds * 0.25
+        pos_loss = torch.log(pred + 1e-5) * torch.pow(1 - pred,
+                                                      2) * pos_inds * 0.75
+        neg_loss = torch.log(1 - pred + 1e-5) * torch.pow(pred,
+                                                          2) * neg_inds * 0.25
         loss = -(pos_loss + neg_loss)
         return loss
 
-
     def get_l1_target(self,
                       l1_target,
                       gt,
@@ -537,7 +535,6 @@ def get_assignments(
         pair_wise_ious = bboxes_iou(gt_bboxes_per_image,
                                     bboxes_preds_per_image, False)
 
-
         if (torch.isnan(pair_wise_ious.max())):
             pair_wise_ious = bboxes_iou(gt_bboxes_per_image,
                                         bboxes_preds_per_image, False)
diff --git a/easycv/models/detection/detectors/yolox/yolo_pafpn.py b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
index 1ba25ccf..2f16faef 100644
--- a/easycv/models/detection/detectors/yolox/yolo_pafpn.py
+++ b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
@@ -1,12 +1,14 @@
 # Copyright (c) 2014-2021 Megvii Inc And Alibaba PAI-Teams. All rights reserved.
 import logging
+import math
+
 import torch
 import torch.nn as nn
 
 from easycv.models.backbones.darknet import CSPDarknet
+from easycv.models.backbones.network_blocks import (BaseConv, CSPLayer, DWConv,
+                                                    GSConv, VoVGSCSP)
 from easycv.models.backbones.repvgg_yolox_backbone import RepVGGYOLOX
-from easycv.models.backbones.network_blocks import BaseConv, CSPLayer, DWConv, GSConv, VoVGSCSP
-import math
 
 
 def make_divisible(x, divisor):
@@ -19,33 +21,36 @@ class YOLOPAFPN(nn.Module):
     YOLOv3 model. Darknet 53 is the default backbone of this model.
     """
 
-    def __init__(
-        self,
-        depth=1.0,
-        width=1.0,
-        in_features=('dark3', 'dark4', 'dark5'),
-        in_channels=[256, 512, 1024],
-        depthwise=False,
-        act='silu',
-        asff_channel = 16,
-        use_att=None,
-        expand_kernel=3,
-        down_rate=32,
-        use_dconv=False,
-        use_expand=True,
-        backbone = "CSPDarknet",
-        neck = 'yolo',
-        neck_mode = 'all'
-    ):
+    def __init__(self,
+                 depth=1.0,
+                 width=1.0,
+                 in_features=('dark3', 'dark4', 'dark5'),
+                 in_channels=[256, 512, 1024],
+                 depthwise=False,
+                 act='silu',
+                 asff_channel=16,
+                 use_att=None,
+                 expand_kernel=3,
+                 down_rate=32,
+                 use_dconv=False,
+                 use_expand=True,
+                 backbone='CSPDarknet',
+                 neck='yolo',
+                 neck_mode='all'):
         super().__init__()
         # build backbone
-        if backbone == "CSPDarknet":
-            self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act)
-        elif backbone == "RepVGGYOLOX":
-            self.backbone = RepVGGYOLOX(in_channels=3,depth=depth, width=width)
+        if backbone == 'CSPDarknet':
+            self.backbone = CSPDarknet(
+                depth, width, depthwise=depthwise, act=act)
+        elif backbone == 'RepVGGYOLOX':
+            self.backbone = RepVGGYOLOX(
+                in_channels=3, depth=depth, width=width)
         else:
-            logging.warning('YOLOX-PAI backbone must in [CSPDarknet, RepVGGYOLOX], otherwise we use RepVGGYOLOX as default')
-            self.backbone = RepVGGYOLOX(in_channels=3,depth=depth, width=width)
+            logging.warning(
+                'YOLOX-PAI backbone must in [CSPDarknet, RepVGGYOLOX], otherwise we use RepVGGYOLOX as default'
+            )
+            self.backbone = RepVGGYOLOX(
+                in_channels=3, depth=depth, width=width)
         self.backbone_name = backbone
 
         # build neck
@@ -56,9 +61,11 @@ def __init__(
         self.neck_mode = neck_mode
         if neck != 'gsconv':
             if neck != 'yolo':
-                logging.warning('YOLOX-PAI backbone must in [yolo, gsconv], otherwise we use yolo as default')
+                logging.warning(
+                    'YOLOX-PAI backbone must in [yolo, gsconv], otherwise we use yolo as default'
+                )
             self.neck = 'yolo'
-            
+
             self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
             self.lateral_conv0 = BaseConv(
                 int(in_channels[2] * width),
@@ -131,24 +138,21 @@ def __init__(
                 int(in_channels[0] * width),
                 1,
                 1,
-                act=act
-            )
+                act=act)
 
             self.gsconv4 = GSConv(
                 int(in_channels[0] * width),
                 int(in_channels[0] * width),
                 3,
                 2,
-                act=act
-            )
+                act=act)
 
             self.gsconv5 = GSConv(
                 int(in_channels[1] * width),
                 int(in_channels[1] * width),
                 3,
                 2,
-                act=act
-            )
+                act=act)
 
             if self.neck_mode == 'all':
                 self.vovGSCSP1 = VoVGSCSP(
@@ -163,8 +167,7 @@ def __init__(
                     int(2 * in_channels[0] * width),
                     1,
                     1,
-                    act=act
-                )
+                    act=act)
                 self.vovGSCSP2 = VoVGSCSP(
                     int(2 * in_channels[0] * width),
                     int(in_channels[0] * width),
@@ -219,23 +222,60 @@ def __init__(
                     act=act)
 
         # build attention after PAN
-        self.use_att=use_att
+        self.use_att = use_att
         default_attention_list = ['ASFF', 'ASFF_sim']
         if use_att is not None and use_att not in default_attention_list:
-            logging.warning('YOLOX-PAI backbone must in [ASFF, ASFF_sim], otherwise we use ASFF as default')
+            logging.warning(
+                'YOLOX-PAI backbone must in [ASFF, ASFF_sim], otherwise we use ASFF as default'
+            )
 
-        if self.use_att=='ASFF' or self.use_att=='ASFF_sim':
-            if self.use_att=='ASFF':
+        if self.use_att == 'ASFF' or self.use_att == 'ASFF_sim':
+            if self.use_att == 'ASFF':
                 from .ASFF import ASFF
-                self.asff_1 = ASFF(level=0, multiplier=width, asff_channel=asff_channel, act=act)
-                self.asff_2 = ASFF(level=1, multiplier=width, asff_channel=asff_channel, act=act)
-                self.asff_3 = ASFF(level=2, multiplier=width, asff_channel=asff_channel, act=act)
+                self.asff_1 = ASFF(
+                    level=0,
+                    multiplier=width,
+                    asff_channel=asff_channel,
+                    act=act)
+                self.asff_2 = ASFF(
+                    level=1,
+                    multiplier=width,
+                    asff_channel=asff_channel,
+                    act=act)
+                self.asff_3 = ASFF(
+                    level=2,
+                    multiplier=width,
+                    asff_channel=asff_channel,
+                    act=act)
             else:
                 from .ASFF_sim import ASFF
-                self.asff_1 = ASFF(level=0, multiplier=width, asff_channel=asff_channel, act=act,expand_kernel=expand_kernel, down_rate = down_rate, use_dconv = use_dconv, use_expand = use_expand)
-                self.asff_2 = ASFF(level=1, multiplier=width, asff_channel=asff_channel, act=act,expand_kernel=expand_kernel, down_rate = down_rate, use_dconv = use_dconv, use_expand = use_expand)
-                self.asff_3 = ASFF(level=2, multiplier=width, asff_channel=asff_channel, act=act,expand_kernel=expand_kernel, down_rate = down_rate, use_dconv = use_dconv, use_expand = use_expand)
-
+                self.asff_1 = ASFF(
+                    level=0,
+                    multiplier=width,
+                    asff_channel=asff_channel,
+                    act=act,
+                    expand_kernel=expand_kernel,
+                    down_rate=down_rate,
+                    use_dconv=use_dconv,
+                    use_expand=use_expand)
+                self.asff_2 = ASFF(
+                    level=1,
+                    multiplier=width,
+                    asff_channel=asff_channel,
+                    act=act,
+                    expand_kernel=expand_kernel,
+                    down_rate=down_rate,
+                    use_dconv=use_dconv,
+                    use_expand=use_expand)
+                self.asff_3 = ASFF(
+                    level=2,
+                    multiplier=width,
+                    asff_channel=asff_channel,
+                    act=act,
+                    expand_kernel=expand_kernel,
+                    down_rate=down_rate,
+                    use_dconv=use_dconv,
+                    use_expand=use_expand)
 
     def forward(self, input):
         """
@@ -246,7 +286,7 @@ def forward(self, input):
             Tuple[Tensor]: FPN feature.
         """
 
-        if self.backbone_name == "CSPDarknet":
+        if self.backbone_name == 'CSPDarknet':
             out_features = self.backbone(input)
             features = [out_features[f] for f in self.in_features]
             [x2, x1, x0] = features
@@ -254,7 +294,7 @@ def forward(self, input):
             features = self.backbone(input)
             [x2, x1, x0] = features
 
-        if self.neck =='yolo':
+        if self.neck == 'yolo':
             fpn_out0 = self.lateral_conv0(x0)  # 1024->512/32
             f_out0 = self.upsample(fpn_out0)  # 512/16
             f_out0 = torch.cat([f_out0, x1], 1)  # 512->1024/16
@@ -308,7 +348,7 @@ def forward(self, input):
         outputs = (pan_out2, pan_out1, pan_out0)
 
         # forward for attention
-        if self.use_att == 'ASFF' or self.use_att=='ASFF_sim':
+        if self.use_att == 'ASFF' or self.use_att == 'ASFF_sim':
             pan_out0 = self.asff_1(outputs)
             pan_out1 = self.asff_2(outputs)
             pan_out2 = self.asff_3(outputs)
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index f89b368d..abad6fe9 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -1,6 +1,6 @@
 # Copyright (c) 2014-2021 Megvii Inc And Alibaba PAI-Teams. All rights reserved.
-from typing import Dict
 import logging
+from typing import Dict
 
 import numpy as np
 import torch
@@ -10,10 +10,10 @@
 from easycv.models.base import BaseModel
 from easycv.models.builder import MODELS
 from easycv.models.detection.utils import postprocess
+from .tood_head import TOODHead
 # from .ppyoloe_head import PPYOLOEHead
 from .yolo_head import YOLOXHead
 from .yolo_pafpn import YOLOPAFPN
-from .tood_head import TOODHead
 
 
 def init_yolo(M):
@@ -22,8 +22,9 @@ def init_yolo(M):
             m.eps = 1e-3
             m.momentum = 0.03
 
+
 def cxcywh2xyxy(bboxes):
-    bboxes[..., 0] = bboxes[..., 0] - bboxes[..., 2] * 0.5   # x1
+    bboxes[..., 0] = bboxes[..., 0] - bboxes[..., 2] * 0.5  # x1
     bboxes[..., 1] = bboxes[..., 1] - bboxes[..., 3] * 0.5
     bboxes[..., 2] = bboxes[..., 0] + bboxes[..., 2]
     bboxes[..., 3] = bboxes[..., 1] + bboxes[..., 3]
@@ -67,7 +68,7 @@ def __init__(self,
                  la_down_rate: int = 8,
                  conv_layers: int = 2,
                  decode_in_inference: bool = True,
-                 backbone="CSPDarknet",
+                 backbone='CSPDarknet',
                  expand_kernel=3,
                  down_rate=32,
                  use_dconv=False,
@@ -80,17 +81,43 @@ def __init__(self,
         depth = self.param_map[model_type][0]
         width = self.param_map[model_type][1]
 
-        self.backbone = YOLOPAFPN(depth, width, in_channels=in_channels, asff_channel=asff_channel, act=act, use_att=use_att, backbone = backbone, neck = neck, neck_mode=neck_mode, expand_kernel=expand_kernel, down_rate = down_rate, use_dconv = use_dconv, use_expand = use_expand)
+        self.backbone = YOLOPAFPN(
+            depth,
+            width,
+            in_channels=in_channels,
+            asff_channel=asff_channel,
+            act=act,
+            use_att=use_att,
+            backbone=backbone,
+            neck=neck,
+            neck_mode=neck_mode,
+            expand_kernel=expand_kernel,
+            down_rate=down_rate,
+            use_dconv=use_dconv,
+            use_expand=use_expand)
 
         self.head_type = head_type
         if head_type == 'yolox':
-            self.head = YOLOXHead(num_classes, width, in_channels=in_channels, act=act, obj_loss_type=obj_loss_type, reg_loss_type=reg_loss_type)
+            self.head = YOLOXHead(
+                num_classes,
+                width,
+                in_channels=in_channels,
+                act=act,
+                obj_loss_type=obj_loss_type,
+                reg_loss_type=reg_loss_type)
             self.head.initialize_biases(1e-2)
         elif head_type == 'tood':
-            self.head = TOODHead(num_classes, width, in_channels=in_channels, act=act, obj_loss_type=obj_loss_type, reg_loss_type=reg_loss_type, stacked_convs=stacked_convs,
-                 la_down_rate=la_down_rate,
-                 conv_layers=conv_layers,
-                 decode_in_inference=decode_in_inference)
+            self.head = TOODHead(
+                num_classes,
+                width,
+                in_channels=in_channels,
+                act=act,
+                obj_loss_type=obj_loss_type,
+                reg_loss_type=reg_loss_type,
+                stacked_convs=stacked_convs,
+                la_down_rate=la_down_rate,
+                conv_layers=conv_layers,
+                decode_in_inference=decode_in_inference)
             self.head.initialize_biases(1e-2)
         elif head_type == 'ppyoloe':
             self.head = PPYOLOEHead(
@@ -110,17 +137,17 @@ def __init__(self,
                 # assigner=TaskAlignedAssigner(topk=self.tal_topk, alpha=1.0, beta=6.0)
             )
 
-
         self.decode_in_inference = decode_in_inference
         # use decode, we will use post process as default
         if not self.decode_in_inference:
-            logging.warning('YOLOX-PAI head decode_in_inference close for speed test, post process will be close at same time!')
+            logging.warning(
+                'YOLOX-PAI head decode_in_inference close for speed test, post process will be close at same time!'
+            )
             self.ignore_postprocess = True
             logging.warning('YOLOX-PAI ignore_postprocess set to be True')
         else:
             self.ignore_postprocess = False
 
-
         self.apply(init_yolo)  # init_yolo(self)
         self.num_classes = num_classes
         self.test_conf = test_conf
@@ -148,26 +175,25 @@ def forward_train(self,
 
         targets = torch.cat([gt_labels, gt_bboxes], dim=2)
 
-
-        if self.head_type!='ppyoloe':
+        if self.head_type != 'ppyoloe':
             loss, iou_loss, conf_loss, cls_loss, l1_loss, num_fg = self.head(
                 fpn_outs, targets, img)
 
             outputs = {
                 'total_loss':
-                    loss,
+                loss,
                 'iou_l':
-                    iou_loss,
+                iou_loss,
                 'conf_l':
-                    conf_loss,
+                conf_loss,
                 'cls_l':
-                    cls_loss,
+                cls_loss,
                 'img_h':
-                    torch.tensor(img_metas[0]['img_shape'][0],
-                                 device=loss.device).float(),
+                torch.tensor(img_metas[0]['img_shape'][0],
+                             device=loss.device).float(),
                 'img_w':
-                    torch.tensor(img_metas[0]['img_shape'][1],
-                                 device=loss.device).float()
+                torch.tensor(img_metas[0]['img_shape'][1],
+                             device=loss.device).float()
             }
 
         else:
@@ -180,19 +206,21 @@ def forward_train(self,
 
             outputs = {
                 'total_loss':
-                    yolo_losses['total_loss'],
+                yolo_losses['total_loss'],
                 'iou_l':
-                    yolo_losses['loss_iou'],
+                yolo_losses['loss_iou'],
                 'conf_l':
-                    yolo_losses['loss_dfl'],
+                yolo_losses['loss_dfl'],
                 'cls_l':
-                    yolo_losses['loss_cls'],
+                yolo_losses['loss_cls'],
                 'img_h':
-                    torch.tensor(img_metas[0]['img_shape'][0],
-                                 device=yolo_losses['total_loss'].device).float(),
+                torch.tensor(
+                    img_metas[0]['img_shape'][0],
+                    device=yolo_losses['total_loss'].device).float(),
                 'img_w':
-                    torch.tensor(img_metas[0]['img_shape'][1],
-                                 device=yolo_losses['total_loss'].device).float()
+                torch.tensor(
+                    img_metas[0]['img_shape'][1],
+                    device=yolo_losses['total_loss'].device).float()
             }
 
         return outputs
@@ -264,7 +292,7 @@ def forward_export(self, img):
             outputs = self.head(fpn_outs)
 
             if self.decode_in_inference:
-                outputs = postprocess(outputs, self.num_classes, self.test_conf,
-                                  self.nms_thre)
+                outputs = postprocess(outputs, self.num_classes,
+                                      self.test_conf, self.nms_thre)
 
         return outputs
diff --git a/easycv/models/loss/__init__.py b/easycv/models/loss/__init__.py
index b8c905b4..3d991a8c 100644
--- a/easycv/models/loss/__init__.py
+++ b/easycv/models/loss/__init__.py
@@ -1,7 +1,6 @@
 # Copyright (c) Alibaba, Inc. and its affiliates.
 from .cross_entropy_loss import CrossEntropyLoss
-from .focal_loss import FocalLoss
+from .focal_loss import FocalLoss, VarifocalLoss
 from .iou_loss import GIoULoss, IoULoss, IOUloss
 from .mse_loss import JointsMSELoss
 from .pytorch_metric_learning import *
-from .focal_loss import FocalLoss,VarifocalLoss
diff --git a/easycv/models/loss/focal_loss.py b/easycv/models/loss/focal_loss.py
index aa30abff..6adb4630 100644
--- a/easycv/models/loss/focal_loss.py
+++ b/easycv/models/loss/focal_loss.py
@@ -7,6 +7,7 @@
 from easycv.models.builder import LOSSES
 from easycv.models.loss.utils import weight_reduce_loss
 
+
 def reduce_loss(loss, reduction):
     """Reduce loss as specified.
     Args:
diff --git a/easycv/models/loss/iou_loss.py b/easycv/models/loss/iou_loss.py
index d849a653..1fef8a87 100644
--- a/easycv/models/loss/iou_loss.py
+++ b/easycv/models/loss/iou_loss.py
@@ -1,5 +1,6 @@
 # Copyright (c) Alibaba, Inc. and its affiliates.
 
+import math
 import warnings
 
 import mmcv
@@ -9,7 +10,7 @@
 from easycv.models.detection.utils import bbox_overlaps
 from easycv.models.loss.utils import weighted_loss
 from ..registry import LOSSES
-import math
+
 
 @mmcv.jit(derivate=True, coderize=True)
 @weighted_loss
@@ -99,28 +100,35 @@ def forward(self, pred, target):
         if self.loss_type == 'iou':
             loss = 1 - iou**2
 
-        elif self.loss_type == "siou":
+        elif self.loss_type == 'siou':
             # angle cost
-            c_h = torch.max(pred[:, 1], target[:, 1]) - torch.min(pred[:, 1], target[:, 1])
-            c_w = torch.max(pred[:, 0], target[:, 0]) - torch.min(pred[:, 0], target[:, 0])
-            sigma = torch.sqrt(((pred[:, :2] - target[:, :2]) ** 2).sum(dim=1))
+            c_h = torch.max(pred[:, 1], target[:, 1]) - torch.min(
+                pred[:, 1], target[:, 1])
+            c_w = torch.max(pred[:, 0], target[:, 0]) - torch.min(
+                pred[:, 0], target[:, 0])
+            sigma = torch.sqrt(((pred[:, :2] - target[:, :2])**2).sum(dim=1))
             # angle_cost = 1 - 2 * torch.pow(torch.sin(torch.arctan(c_h / c_w) - torch.tensor(math.pi / 4)),2)
-            angle_cost = 2*(c_h*c_w)/(sigma**2)
+            angle_cost = 2 * (c_h * c_w) / (sigma**2)
 
             # distance cost
             gamma = 2 - angle_cost
             # gamma = 1
-            c_dw = torch.max(pred[:, 0], target[:, 0]) - torch.min(pred[:, 0], target[:, 0]) + (pred[:, 2] + target[:, 2])/2
-            c_dh = torch.max(pred[:, 1], target[:, 1]) - torch.min(pred[:, 1], target[:, 1]) + (pred[:, 3] + target[:, 3])/2
-            p_x = ((target[:, 0] - pred[:, 0]) / c_dw) ** 2
-            p_y = ((target[:, 1] - pred[:, 1]) / c_dh) ** 2
+            c_dw = torch.max(pred[:, 0], target[:, 0]) - torch.min(
+                pred[:, 0], target[:, 0]) + (pred[:, 2] + target[:, 2]) / 2
+            c_dh = torch.max(pred[:, 1], target[:, 1]) - torch.min(
+                pred[:, 1], target[:, 1]) + (pred[:, 3] + target[:, 3]) / 2
+            p_x = ((target[:, 0] - pred[:, 0]) / c_dw)**2
+            p_y = ((target[:, 1] - pred[:, 1]) / c_dh)**2
             dist_cost = 2 - torch.exp(-gamma * p_x) - torch.exp(-gamma * p_y)
 
             # shape cost
             theta = 4
-            w_w = torch.abs(pred[:, 2] - target[:, 2]) / torch.max(pred[:, 2], target[:, 2])
-            w_h = torch.abs(pred[:, 3] - target[:, 3]) / torch.max(pred[:, 3], target[:, 3])
-            shape_cost = torch.pow((1 - torch.exp(-w_w)), theta) + torch.pow((1 - torch.exp(-w_h)), theta)
+            w_w = torch.abs(pred[:, 2] - target[:, 2]) / torch.max(
+                pred[:, 2], target[:, 2])
+            w_h = torch.abs(pred[:, 3] - target[:, 3]) / torch.max(
+                pred[:, 3], target[:, 3])
+            shape_cost = torch.pow((1 - torch.exp(-w_w)), theta) + torch.pow(
+                (1 - torch.exp(-w_h)), theta)
 
             loss = 1 - iou + (dist_cost + shape_cost) / 2
 
@@ -133,37 +141,41 @@ def forward(self, pred, target):
             giou = iou - (area_c - area_i) / area_c.clamp(1e-16)
             loss = 1 - giou.clamp(min=-1.0, max=1.0)
 
-        elif self.loss_type == "diou":
+        elif self.loss_type == 'diou':
             c_tl = torch.min(
-                (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2)  # 包围框的左上点
+                (pred[:, :2] - pred[:, 2:] / 2),
+                (target[:, :2] - target[:, 2:] / 2)  # 包围框的左上点
             )
             c_br = torch.max(
-                (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2)  # 包围框的右下点
+                (pred[:, :2] + pred[:, 2:] / 2),
+                (target[:, :2] + target[:, 2:] / 2)  # 包围框的右下点
             )
-            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(c_br[:, 1] - c_tl[:, 1],
-                                                                           2) + 1e-7  # convex diagonal squared
+            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(
+                c_br[:, 1] - c_tl[:, 1], 2) + 1e-7  # convex diagonal squared
 
-            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) + torch.pow(pred[:, 1] - target[:, 1],
-                                                                              2))  # center diagonal squared
+            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) +
+                          torch.pow(pred[:, 1] - target[:, 1], 2)
+                          )  # center diagonal squared
 
             diou = iou - (center_dis / convex_dis)
             loss = 1 - diou.clamp(min=-1.0, max=1.0)
 
-        elif self.loss_type == "ciou":
-            c_tl = torch.min(
-                (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2)
-            )
-            c_br = torch.max(
-                (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2)
-            )
-            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(c_br[:, 1] - c_tl[:, 1],
-                                                                           2) + 1e-7  # convex diagonal squared
+        elif self.loss_type == 'ciou':
+            c_tl = torch.min((pred[:, :2] - pred[:, 2:] / 2),
+                             (target[:, :2] - target[:, 2:] / 2))
+            c_br = torch.max((pred[:, :2] + pred[:, 2:] / 2),
+                             (target[:, :2] + target[:, 2:] / 2))
+            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(
+                c_br[:, 1] - c_tl[:, 1], 2) + 1e-7  # convex diagonal squared
 
-            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) + torch.pow(pred[:, 1] - target[:, 1],
-                                                                              2))  # center diagonal squared
+            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) +
+                          torch.pow(pred[:, 1] - target[:, 1], 2)
+                          )  # center diagonal squared
 
-            v = (4 / math.pi ** 2) * torch.pow(torch.atan(target[:, 2] / torch.clamp(target[:, 3], min=1e-7)) -
-                                               torch.atan(pred[:, 2] / torch.clamp(pred[:, 3], min=1e-7)), 2)
+            v = (4 / math.pi**2) * torch.pow(
+                torch.atan(target[:, 2] / torch.clamp(target[:, 3], min=1e-7))
+                - torch.atan(pred[:, 2] / torch.clamp(pred[:, 3], min=1e-7)),
+                2)
 
             with torch.no_grad():
                 alpha = v / ((1 + 1e-7) - iou + v)
@@ -172,19 +184,18 @@ def forward(self, pred, target):
 
             loss = 1 - ciou.clamp(min=-1.0, max=1.0)
 
-        elif self.loss_type == "eiou":
+        elif self.loss_type == 'eiou':
 
-            c_tl = torch.min(
-                (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2)
-            )
-            c_br = torch.max(
-                (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2)
-            )
-            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(c_br[:, 1] - c_tl[:, 1],
-                                                                           2) + 1e-7  # convex diagonal squared
+            c_tl = torch.min((pred[:, :2] - pred[:, 2:] / 2),
+                             (target[:, :2] - target[:, 2:] / 2))
+            c_br = torch.max((pred[:, :2] + pred[:, 2:] / 2),
+                             (target[:, :2] + target[:, 2:] / 2))
+            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(
+                c_br[:, 1] - c_tl[:, 1], 2) + 1e-7  # convex diagonal squared
 
-            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) + torch.pow(pred[:, 1] - target[:, 1],
-                                                                              2))  # center diagonal squared
+            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) +
+                          torch.pow(pred[:, 1] - target[:, 1], 2)
+                          )  # center diagonal squared
 
             dis_w = torch.pow(pred[:, 2] - target[:, 2], 2)  # 两个框的w欧式距离
             dis_h = torch.pow(pred[:, 3] - target[:, 3], 2)  # 两个框的h欧式距离
@@ -192,7 +203,8 @@ def forward(self, pred, target):
             C_w = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + 1e-7  # 包围框的w平方
             C_h = torch.pow(c_br[:, 1] - c_tl[:, 1], 2) + 1e-7  # 包围框的h平方
 
-            eiou = iou - (center_dis / convex_dis) - (dis_w / C_w) - (dis_h / C_h)
+            eiou = iou - (center_dis / convex_dis) - (dis_w / C_w) - (
+                dis_h / C_h)
 
             loss = 1 - eiou.clamp(min=-1.0, max=1.0)
 
@@ -203,6 +215,7 @@ def forward(self, pred, target):
 
         return loss
 
+
 @LOSSES.register_module()
 class IoULoss(nn.Module):
     """IoULoss.
@@ -314,28 +327,35 @@ def forward(self, pred, target):
         if self.loss_type == 'iou':
             loss = 1 - iou**2
 
-        elif self.loss_type == "siou":
+        elif self.loss_type == 'siou':
             # angle cost
-            c_h = torch.max(pred[:, 1], target[:, 1]) - torch.min(pred[:, 1], target[:, 1])
-            c_w = torch.max(pred[:, 0], target[:, 0]) - torch.min(pred[:, 0], target[:, 0])
-            sigma = torch.sqrt(((pred[:, :2] - target[:, :2]) ** 2).sum(dim=1))
+            c_h = torch.max(pred[:, 1], target[:, 1]) - torch.min(
+                pred[:, 1], target[:, 1])
+            c_w = torch.max(pred[:, 0], target[:, 0]) - torch.min(
+                pred[:, 0], target[:, 0])
+            sigma = torch.sqrt(((pred[:, :2] - target[:, :2])**2).sum(dim=1))
             # angle_cost = 1 - 2 * torch.pow(torch.sin(torch.arctan(c_h / c_w) - torch.tensor(math.pi / 4)),2)
-            angle_cost = 2*(c_h*c_w)/(sigma**2)
+            angle_cost = 2 * (c_h * c_w) / (sigma**2)
 
             # distance cost
             gamma = 2 - angle_cost
             # gamma = 1
-            c_dw = torch.max(pred[:, 0], target[:, 0]) - torch.min(pred[:, 0], target[:, 0]) + (pred[:, 2] + target[:, 2])/2
-            c_dh = torch.max(pred[:, 1], target[:, 1]) - torch.min(pred[:, 1], target[:, 1]) + (pred[:, 3] + target[:, 3])/2
-            p_x = ((target[:, 0] - pred[:, 0]) / c_dw) ** 2
-            p_y = ((target[:, 1] - pred[:, 1]) / c_dh) ** 2
+            c_dw = torch.max(pred[:, 0], target[:, 0]) - torch.min(
+                pred[:, 0], target[:, 0]) + (pred[:, 2] + target[:, 2]) / 2
+            c_dh = torch.max(pred[:, 1], target[:, 1]) - torch.min(
+                pred[:, 1], target[:, 1]) + (pred[:, 3] + target[:, 3]) / 2
+            p_x = ((target[:, 0] - pred[:, 0]) / c_dw)**2
+            p_y = ((target[:, 1] - pred[:, 1]) / c_dh)**2
             dist_cost = 2 - torch.exp(-gamma * p_x) - torch.exp(-gamma * p_y)
 
             # shape cost
             theta = 4
-            w_w = torch.abs(pred[:, 2] - target[:, 2]) / torch.max(pred[:, 2], target[:, 2])
-            w_h = torch.abs(pred[:, 3] - target[:, 3]) / torch.max(pred[:, 3], target[:, 3])
-            shape_cost = torch.pow((1 - torch.exp(-w_w)), theta) + torch.pow((1 - torch.exp(-w_h)), theta)
+            w_w = torch.abs(pred[:, 2] - target[:, 2]) / torch.max(
+                pred[:, 2], target[:, 2])
+            w_h = torch.abs(pred[:, 3] - target[:, 3]) / torch.max(
+                pred[:, 3], target[:, 3])
+            shape_cost = torch.pow((1 - torch.exp(-w_w)), theta) + torch.pow(
+                (1 - torch.exp(-w_h)), theta)
 
             loss = 1 - iou + (dist_cost + shape_cost) / 2
 
@@ -348,37 +368,41 @@ def forward(self, pred, target):
             giou = iou - (area_c - area_i) / area_c.clamp(1e-16)
             loss = 1 - giou.clamp(min=-1.0, max=1.0)
 
-        elif self.loss_type == "diou":
+        elif self.loss_type == 'diou':
             c_tl = torch.min(
-                (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2)  # 包围框的左上点
+                (pred[:, :2] - pred[:, 2:] / 2),
+                (target[:, :2] - target[:, 2:] / 2)  # 包围框的左上点
             )
             c_br = torch.max(
-                (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2)  # 包围框的右下点
+                (pred[:, :2] + pred[:, 2:] / 2),
+                (target[:, :2] + target[:, 2:] / 2)  # 包围框的右下点
             )
-            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(c_br[:, 1] - c_tl[:, 1],
-                                                                           2) + 1e-7  # convex diagonal squared
+            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(
+                c_br[:, 1] - c_tl[:, 1], 2) + 1e-7  # convex diagonal squared
 
-            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) + torch.pow(pred[:, 1] - target[:, 1],
-                                                                              2))  # center diagonal squared
+            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) +
+                          torch.pow(pred[:, 1] - target[:, 1], 2)
+                          )  # center diagonal squared
 
             diou = iou - (center_dis / convex_dis)
             loss = 1 - diou.clamp(min=-1.0, max=1.0)
 
-        elif self.loss_type == "ciou":
-            c_tl = torch.min(
-                (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2)
-            )
-            c_br = torch.max(
-                (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2)
-            )
-            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(c_br[:, 1] - c_tl[:, 1],
-                                                                           2) + 1e-7  # convex diagonal squared
+        elif self.loss_type == 'ciou':
+            c_tl = torch.min((pred[:, :2] - pred[:, 2:] / 2),
+                             (target[:, :2] - target[:, 2:] / 2))
+            c_br = torch.max((pred[:, :2] + pred[:, 2:] / 2),
+                             (target[:, :2] + target[:, 2:] / 2))
+            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(
+                c_br[:, 1] - c_tl[:, 1], 2) + 1e-7  # convex diagonal squared
 
-            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) + torch.pow(pred[:, 1] - target[:, 1],
-                                                                              2))  # center diagonal squared
+            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) +
+                          torch.pow(pred[:, 1] - target[:, 1], 2)
+                          )  # center diagonal squared
 
-            v = (4 / math.pi ** 2) * torch.pow(torch.atan(target[:, 2] / torch.clamp(target[:, 3], min=1e-7)) -
-                                               torch.atan(pred[:, 2] / torch.clamp(pred[:, 3], min=1e-7)), 2)
+            v = (4 / math.pi**2) * torch.pow(
+                torch.atan(target[:, 2] / torch.clamp(target[:, 3], min=1e-7))
+                - torch.atan(pred[:, 2] / torch.clamp(pred[:, 3], min=1e-7)),
+                2)
 
             with torch.no_grad():
                 alpha = v / ((1 + 1e-7) - iou + v)
@@ -387,19 +411,18 @@ def forward(self, pred, target):
 
             loss = 1 - ciou.clamp(min=-1.0, max=1.0)
 
-        elif self.loss_type == "eiou":
+        elif self.loss_type == 'eiou':
 
-            c_tl = torch.min(
-                (pred[:, :2] - pred[:, 2:] / 2), (target[:, :2] - target[:, 2:] / 2)
-            )
-            c_br = torch.max(
-                (pred[:, :2] + pred[:, 2:] / 2), (target[:, :2] + target[:, 2:] / 2)
-            )
-            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(c_br[:, 1] - c_tl[:, 1],
-                                                                           2) + 1e-7  # convex diagonal squared
+            c_tl = torch.min((pred[:, :2] - pred[:, 2:] / 2),
+                             (target[:, :2] - target[:, 2:] / 2))
+            c_br = torch.max((pred[:, :2] + pred[:, 2:] / 2),
+                             (target[:, :2] + target[:, 2:] / 2))
+            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(
+                c_br[:, 1] - c_tl[:, 1], 2) + 1e-7  # convex diagonal squared
 
-            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) + torch.pow(pred[:, 1] - target[:, 1],
-                                                                              2))  # center diagonal squared
+            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) +
+                          torch.pow(pred[:, 1] - target[:, 1], 2)
+                          )  # center diagonal squared
 
             dis_w = torch.pow(pred[:, 2] - target[:, 2], 2)  # 两个框的w欧式距离
             dis_h = torch.pow(pred[:, 3] - target[:, 3], 2)  # 两个框的h欧式距离
@@ -407,7 +430,8 @@ def forward(self, pred, target):
             C_w = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + 1e-7  # 包围框的w平方
             C_h = torch.pow(c_br[:, 1] - c_tl[:, 1], 2) + 1e-7  # 包围框的h平方
 
-            eiou = iou - (center_dis / convex_dis) - (dis_w / C_w) - (dis_h / C_h)
+            eiou = iou - (center_dis / convex_dis) - (dis_w / C_w) - (
+                dis_h / C_h)
 
             loss = 1 - eiou.clamp(min=-1.0, max=1.0)
 
@@ -445,5 +469,3 @@ def forward(self,
             avg_factor=avg_factor,
             **kwargs)
         return loss
-
-
diff --git a/easycv/toolkit/blade/cv_blade_utils.py b/easycv/toolkit/blade/cv_blade_utils.py
index 1e71b103..1981f9ce 100644
--- a/easycv/toolkit/blade/cv_blade_utils.py
+++ b/easycv/toolkit/blade/cv_blade_utils.py
@@ -74,7 +74,7 @@ def opt_trt_config(input_config=dict(enable_fp16=True)):
         optimization_pipeline='TensorRT',
         enable_fp16=True,
         customize_op_black_list=[
-             #'aten::select', 'aten::index', 'aten::slice', 'aten::view', 'aten::upsample'
+            #'aten::select', 'aten::index', 'aten::slice', 'aten::view', 'aten::upsample'
         ],
         fp16_fallback_op_ratio=0.05,
     )
@@ -236,6 +236,7 @@ def check_results(results0, results1):
     except Exception as err:
         logging.error(err)
 
+
 def blade_optimize(script_model,
                    model,
                    inputs,
@@ -246,7 +247,9 @@ def blade_optimize(script_model,
                    static_opt=True):
 
     if not static_opt:
-        logging.info('PAI-Blade use dynamic optimize for input model, export model is build for dynamic shape input')
+        logging.info(
+            'PAI-Blade use dynamic optimize for input model, export model is build for dynamic shape input'
+        )
         with opt_trt_config(blade_config):
             opt_model = optimize(
                 model,
@@ -254,7 +257,9 @@ def blade_optimize(script_model,
                 model_inputs=tuple(inputs),
             )
     else:
-        logging.info('PAI-Blade use static optimize for input model, export model must be used as static shape input')
+        logging.info(
+            'PAI-Blade use static optimize for input model, export model must be used as static shape input'
+        )
         from torch_blade.optimization import _static_optimize
         with opt_trt_config(blade_config):
             opt_model = _static_optimize(
@@ -296,7 +301,7 @@ def blade_optimize(script_model,
     # else:
     # test_result = opt_model(*inputs)
     # test_result = opt_model(*inputs)
-    
+
     torch.cuda.synchronize()
     cu_prof_start()
     for k in range(10):
@@ -313,9 +318,9 @@ def blade_optimize(script_model,
         for k in range(10):
             test_result = opt_model(*inputs)
             torch.cuda.synchronize()
- 
-    prof_str = prof.key_averages().table(sort_by="cuda_time_total")
-    print(f"{prof_str}")
+
+    prof_str = prof.key_averages().table(sort_by='cuda_time_total')
+    print(f'{prof_str}')
 
     # check_results(output, test_result)
 
diff --git a/easycv/utils/checkpoint.py b/easycv/utils/checkpoint.py
index b33f9df8..298219fc 100644
--- a/easycv/utils/checkpoint.py
+++ b/easycv/utils/checkpoint.py
@@ -4,7 +4,6 @@
 import torch
 from mmcv.parallel import is_module_wrapper
 from mmcv.runner import load_checkpoint as mmcv_load_checkpoint
-
 from mmcv.runner.checkpoint import (_save_to_state_dict, get_state_dict,
                                     weights_to_cpu)
 from torch.optim import Optimizer
diff --git a/show_predict.py b/show_predict.py
index d335f495..31992d51 100644
--- a/show_predict.py
+++ b/show_predict.py
@@ -1,8 +1,10 @@
 import os
+import random
+
+import cv2
 import numpy as np
 from PIL import Image
-import cv2
-import random
+
 from easycv.predictors.detector import TorchYoloXPredictor
 
 colors = [[255, 0, 0], [255, 255, 0], [255, 255, 0], [0, 255, 255]
@@ -15,11 +17,9 @@ def plot_boxes(outputs, imgs, save_path=None, color=None, line_thickness=None):
     id = outputs['detection_classes']
     label = outputs['detection_class_names']
 
-
     # Plots one bounding box on image img
     tl = int(
-        line_thickness or round(0.002 *
-                                (imgs.shape[0] + imgs.shape[1]) /
+        line_thickness or round(0.002 * (imgs.shape[0] + imgs.shape[1]) /
                                 2)) + 1  # line/font thickness
     # tl = int(line_thickness)
 
@@ -27,20 +27,14 @@ def plot_boxes(outputs, imgs, save_path=None, color=None, line_thickness=None):
         c1, c2 = (int(x[num][0]), int(x[num][1])), (int(x[num][2]),
                                                     int(x[num][3]))
         cv2.rectangle(
-            imgs,
-            c1,
-            c2,
-            colors[id[num]],
-            thickness=tl,
-            lineType=cv2.LINE_AA)
+            imgs, c1, c2, colors[id[num]], thickness=tl, lineType=cv2.LINE_AA)
 
         tf = max(tl - 1, 1)  # font thickness
         t_size = cv2.getTextSize(
             label[num], 0, fontScale=tl / 10, thickness=tf)[0]
 
         c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
-        cv2.rectangle(imgs, c1, c2, colors[id[num]], -1,
-                      cv2.LINE_AA)  # filled
+        cv2.rectangle(imgs, c1, c2, colors[id[num]], -1, cv2.LINE_AA)  # filled
         cv2.putText(
             imgs,
             label[num], (c1[0], c1[1] - 2),
@@ -67,8 +61,7 @@ def main():
     data_path = '/apsarapangu/disk5/zxy/data/coco/'
     detection_model_path = pretrain_path
 
-    img = os.path.join(data_path,
-                       'val2017/000000037777.jpg')
+    img = os.path.join(data_path, 'val2017/000000037777.jpg')
 
     input_data_list = [np.asarray(Image.open(img))]
     predictor = TorchYoloXPredictor(
@@ -82,7 +75,5 @@ def main():
     print(output)
 
 
-
-
 if __name__ == '__main__':
     main()
diff --git a/tools/eval.py b/tools/eval.py
index 9f72eb57..9786083b 100644
--- a/tools/eval.py
+++ b/tools/eval.py
@@ -231,8 +231,7 @@ def main():
                 workers_per_gpu=cfg.data.workers_per_gpu,
                 dist=distributed,
                 shuffle=False)
-                # oss_config=cfg.get('oss_io_config', None))
-
+            # oss_config=cfg.get('oss_io_config', None))
 
         if not distributed:
             outputs = single_gpu_test(

From 166fe9b2c3ea23d3eda769fae7f7fc9ea53201ea Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Tue, 9 Aug 2022 15:25:19 +0800
Subject: [PATCH 22/69] pre-commit fix 1

---
 .../models/backbones/repvgg_yolox_backbone.py | 76 +------------------
 easycv/models/detection/detectors/__init__.py |  1 -
 .../detection/detectors/yolox/ASFF_sim.py     |  4 +-
 .../detection/detectors/yolox/tood_head.py    |  3 +-
 .../models/detection/detectors/yolox/yolox.py | 17 -----
 easycv/models/loss/iou_loss.py                | 60 +++++++--------
 easycv/toolkit/blade/cv_blade_utils.py        |  3 +-
 7 files changed, 35 insertions(+), 129 deletions(-)

diff --git a/easycv/models/backbones/repvgg_yolox_backbone.py b/easycv/models/backbones/repvgg_yolox_backbone.py
index df76d5ea..f8ddbd9d 100644
--- a/easycv/models/backbones/repvgg_yolox_backbone.py
+++ b/easycv/models/backbones/repvgg_yolox_backbone.py
@@ -1,6 +1,5 @@
 # borrow some code from https://github.com/DingXiaoH/RepVGG/repvgg.py MIT2.0
 import copy
-import math
 import warnings
 
 import numpy as np
@@ -8,11 +7,6 @@
 import torch.nn as nn
 
 
-def make_divisible(x, divisor):
-    # Upward revision the value x to make it evenly divisible by the divisor.
-    return math.ceil(x / divisor) * divisor
-
-
 def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
     '''Basic cell for rep-style block, including conv and bn'''
     result = nn.Sequential()
@@ -42,7 +36,6 @@ def __init__(self,
                  groups=1,
                  padding_mode='zeros',
                  deploy=False,
-                 use_se=False,
                  act=None):
         super(RepVGGBlock, self).__init__()
         self.deploy = deploy
@@ -55,12 +48,7 @@ def __init__(self,
         padding_11 = padding - kernel_size // 2
 
         self.nonlinearity = nn.ReLU()
-
-        if use_se:
-            self.se = SEBlock(
-                out_channels, internal_neurons=out_channels // 16)
-        else:
-            self.se = nn.Identity()
+        self.se = nn.Identity()
 
         if deploy:
             self.rbr_reparam = nn.Conv2d(
@@ -339,66 +327,6 @@ def __init__(
         self.stage4 = self._make_stage(
             channels_list[3], channels_list[4], num_repeats[4], add_ppf=True)
 
-        # self.ERBlock_2 = nn.Sequential(
-        #     RepVGGBlock(
-        #         in_channels=channels_list[0],
-        #         out_channels=channels_list[1],
-        #         kernel_size=3,
-        #         stride=2
-        #     ),
-        #     RepBlock(
-        #         in_channels=channels_list[1],
-        #         out_channels=channels_list[1],
-        #         n=num_repeats[1]
-        #     )
-        # )
-
-        # self.ERBlock_3 = nn.Sequential(
-        #     RepVGGBlock(
-        #         in_channels=channels_list[1],
-        #         out_channels=channels_list[2],
-        #         kernel_size=3,
-        #         stride=2
-        #     ),
-        #     RepBlock(
-        #         in_channels=channels_list[2],
-        #         out_channels=channels_list[2],
-        #         n=num_repeats[2],
-        #     )
-        # )
-
-        # self.ERBlock_4 = nn.Sequential(
-        #     RepVGGBlock(
-        #         in_channels=channels_list[2],
-        #         out_channels=channels_list[3],
-        #         kernel_size=3,
-        #         stride=2
-        #     ),
-        #     RepBlock(
-        #         in_channels=channels_list[3],
-        #         out_channels=channels_list[3],
-        #         n=num_repeats[3]
-        #     )
-        # )
-        # self.ERBlock_5 = nn.Sequential(
-        #     RepVGGBlock(
-        #         in_channels=channels_list[3],
-        #         out_channels=channels_list[4],
-        #         kernel_size=3,
-        #         stride=2
-        #     ),
-        #     RepBlock(
-        #         in_channels=channels_list[4],
-        #         out_channels=channels_list[4],
-        #         n=num_repeats[4]
-        #     ),
-        #     SimSPPF(
-        #         in_channels=channels_list[4],
-        #         out_channels=channels_list[4],
-        #         kernel_size=5
-        #     )
-        # )
-
     def _make_stage(self,
                     in_channels,
                     out_channels,
@@ -452,8 +380,6 @@ def make_divisible(x, divisor):
         make_divisible(i * width_mul, 8)
         for i in (channels_list_backbone + channels_list_neck)
     ]
-    # from easycv.models.backbones.efficientrep import EfficientRep
-    # model = EfficientRep(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat)
     # model = RepVGGYOLOX(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat)
     model = RepVGGYOLOX(in_channels=channels, depth=depth_mul, width=width_mul)
     for layer in model.modules():
diff --git a/easycv/models/detection/detectors/__init__.py b/easycv/models/detection/detectors/__init__.py
index 9d9241ef..fc980228 100644
--- a/easycv/models/detection/detectors/__init__.py
+++ b/easycv/models/detection/detectors/__init__.py
@@ -6,7 +6,6 @@
 from easycv.models.detection.detectors.detection import Detection
 from easycv.models.detection.detectors.detr import DETRHead, DetrTransformer
 from easycv.models.detection.detectors.fcos import FCOSHead
-from easycv.models.detection.detectors.yolox.yolox import YOLOX
 
 try:
     from easycv.models.detection.detectors.yolox.yolox import YOLOX
diff --git a/easycv/models/detection/detectors/yolox/ASFF_sim.py b/easycv/models/detection/detectors/yolox/ASFF_sim.py
index 107c474e..d61cc47a 100644
--- a/easycv/models/detection/detectors/yolox/ASFF_sim.py
+++ b/easycv/models/detection/detectors/yolox/ASFF_sim.py
@@ -252,9 +252,7 @@ def forward(self, x):  # l,m,s
         levels_weight = self.weight_levels(levels_weight_v)
         levels_weight = F.softmax(levels_weight, dim=1)
 
-        fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] + \
-                            level_1_resized * levels_weight[:, 1:2, :, :] + \
-                            level_2_resized * levels_weight[:, 2:, :, :]
+        fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] + level_1_resized * levels_weight[:, 1:2, :, :] + level_2_resized * levels_weight[:, 2:, :, :]
 
         if self.use_expand:
             out = self.expand(fused_out_reduced)
diff --git a/easycv/models/detection/detectors/yolox/tood_head.py b/easycv/models/detection/detectors/yolox/tood_head.py
index db14b3e8..049a39be 100644
--- a/easycv/models/detection/detectors/yolox/tood_head.py
+++ b/easycv/models/detection/detectors/yolox/tood_head.py
@@ -147,7 +147,8 @@ def __init__(
             logging.warning(
                 'YOLOX-PAI tood head conv_type must in [conv, dwconv, repconv], otherwise we use repconv as default'
             )
-            conv_type = repconv
+            conv_type = 'repconv'
+            
         if conv_type == 'conv':
             Conv = BaseConv
         if conv_type == 'dwconv':
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index abad6fe9..d0b2aae2 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -119,23 +119,6 @@ def __init__(self,
                 conv_layers=conv_layers,
                 decode_in_inference=decode_in_inference)
             self.head.initialize_biases(1e-2)
-        elif head_type == 'ppyoloe':
-            self.head = PPYOLOEHead(
-                in_channels=in_channels,
-                width=width,
-                strides=[8, 16, 32],
-                static_assigner_epoch=4,
-                use_varifocal_loss=True,
-                # eval_input_size=self.test_size,
-                eval_input_size=None,
-                loss_weight={
-                    'class': 1.0,
-                    'iou': 2.5,
-                    'dfl': 0.5
-                },
-                # static_assigner=ATSSAssigner(self.atss_topk, num_classes=self.num_classes),
-                # assigner=TaskAlignedAssigner(topk=self.tal_topk, alpha=1.0, beta=6.0)
-            )
 
         self.decode_in_inference = decode_in_inference
         # use decode, we will use post process as default
diff --git a/easycv/models/loss/iou_loss.py b/easycv/models/loss/iou_loss.py
index 1fef8a87..a56a120f 100644
--- a/easycv/models/loss/iou_loss.py
+++ b/easycv/models/loss/iou_loss.py
@@ -300,7 +300,7 @@ def forward(self,
 @LOSSES.register_module()
 class GIoULoss(nn.Module):
 
-    def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
+    def __init__(self, eps=1e-6, reduction='mean', loss_type='giou', loss_weight=1.0):
         super(GIoULoss, self).__init__()
         self.eps = eps
         self.reduction = reduction
@@ -440,32 +440,32 @@ def forward(self, pred, target):
         elif self.reduction == 'sum':
             loss = loss.sum()
 
-    def forward(self,
-                pred,
-                target,
-                weight=None,
-                avg_factor=None,
-                reduction_override=None,
-                **kwargs):
-        if weight is not None and not torch.any(weight > 0):
-            if pred.dim() == weight.dim() + 1:
-                weight = weight.unsqueeze(1)
-            return (pred * weight).sum()  # 0
-        assert reduction_override in (None, 'none', 'mean', 'sum')
-        reduction = (
-            reduction_override if reduction_override else self.reduction)
-        if weight is not None and weight.dim() > 1:
-            # TODO: remove this in the future
-            # reduce the weight of shape (n, 4) to (n,) to match the
-            # giou_loss of shape (n,)
-            assert weight.shape == pred.shape
-            weight = weight.mean(-1)
-        loss = self.loss_weight * giou_loss(
-            pred,
-            target,
-            weight,
-            eps=self.eps,
-            reduction=reduction,
-            avg_factor=avg_factor,
-            **kwargs)
-        return loss
+    # def forward(self,
+    #             pred,
+    #             target,
+    #             weight=None,
+    #             avg_factor=None,
+    #             reduction_override=None,
+    #             **kwargs):
+    #     if weight is not None and not torch.any(weight > 0):
+    #         if pred.dim() == weight.dim() + 1:
+    #             weight = weight.unsqueeze(1)
+    #         return (pred * weight).sum()  # 0
+    #     assert reduction_override in (None, 'none', 'mean', 'sum')
+    #     reduction = (
+    #         reduction_override if reduction_override else self.reduction)
+    #     if weight is not None and weight.dim() > 1:
+    #         # TODO: remove this in the future
+    #         # reduce the weight of shape (n, 4) to (n,) to match the
+    #         # giou_loss of shape (n,)
+    #         assert weight.shape == pred.shape
+    #         weight = weight.mean(-1)
+    #     loss = self.loss_weight * giou_loss(
+    #         pred,
+    #         target,
+    #         weight,
+    #         eps=self.eps,
+    #         reduction=reduction,
+    #         avg_factor=avg_factor,
+    #         **kwargs)
+    #     return loss
diff --git a/easycv/toolkit/blade/cv_blade_utils.py b/easycv/toolkit/blade/cv_blade_utils.py
index 1981f9ce..01f1545e 100644
--- a/easycv/toolkit/blade/cv_blade_utils.py
+++ b/easycv/toolkit/blade/cv_blade_utils.py
@@ -74,7 +74,7 @@ def opt_trt_config(input_config=dict(enable_fp16=True)):
         optimization_pipeline='TensorRT',
         enable_fp16=True,
         customize_op_black_list=[
-            #'aten::select', 'aten::index', 'aten::slice', 'aten::view', 'aten::upsample'
+            # 'aten::select', 'aten::index', 'aten::slice', 'aten::view', 'aten::upsample'
         ],
         fp16_fallback_op_ratio=0.05,
     )
@@ -114,7 +114,6 @@ def cu_prof_stop():
 @contextmanager
 def opt_blade_mixprec():
     try:
-        #dummy = torch.classes.torch_blade.MixPrecision(True)
         dummy = torch.cuda.amp.autocast(True)
         yield
     finally:

From 48975bbdbfacbaf3fbdadc70fbc77fb46f123b0c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Tue, 9 Aug 2022 15:25:58 +0800
Subject: [PATCH 23/69] pre-commit fix 2

---
 easycv/models/detection/detectors/yolox/ASFF_sim.py  | 6 +++++-
 easycv/models/detection/detectors/yolox/tood_head.py | 1 -
 easycv/models/loss/iou_loss.py                       | 6 +++++-
 3 files changed, 10 insertions(+), 3 deletions(-)

diff --git a/easycv/models/detection/detectors/yolox/ASFF_sim.py b/easycv/models/detection/detectors/yolox/ASFF_sim.py
index d61cc47a..da5e1ee9 100644
--- a/easycv/models/detection/detectors/yolox/ASFF_sim.py
+++ b/easycv/models/detection/detectors/yolox/ASFF_sim.py
@@ -252,7 +252,11 @@ def forward(self, x):  # l,m,s
         levels_weight = self.weight_levels(levels_weight_v)
         levels_weight = F.softmax(levels_weight, dim=1)
 
-        fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] + level_1_resized * levels_weight[:, 1:2, :, :] + level_2_resized * levels_weight[:, 2:, :, :]
+        fused_out_reduced = level_0_resized * levels_weight[:, 0:
+                                                            1, :, :] + level_1_resized * levels_weight[:,
+                                                                                                       1:
+                                                                                                       2, :, :] + level_2_resized * levels_weight[:,
+                                                                                                                                                  2:, :, :]
 
         if self.use_expand:
             out = self.expand(fused_out_reduced)
diff --git a/easycv/models/detection/detectors/yolox/tood_head.py b/easycv/models/detection/detectors/yolox/tood_head.py
index 049a39be..3f0cc14c 100644
--- a/easycv/models/detection/detectors/yolox/tood_head.py
+++ b/easycv/models/detection/detectors/yolox/tood_head.py
@@ -148,7 +148,6 @@ def __init__(
                 'YOLOX-PAI tood head conv_type must in [conv, dwconv, repconv], otherwise we use repconv as default'
             )
             conv_type = 'repconv'
-            
         if conv_type == 'conv':
             Conv = BaseConv
         if conv_type == 'dwconv':
diff --git a/easycv/models/loss/iou_loss.py b/easycv/models/loss/iou_loss.py
index a56a120f..f4fbdfd8 100644
--- a/easycv/models/loss/iou_loss.py
+++ b/easycv/models/loss/iou_loss.py
@@ -300,7 +300,11 @@ def forward(self,
 @LOSSES.register_module()
 class GIoULoss(nn.Module):
 
-    def __init__(self, eps=1e-6, reduction='mean', loss_type='giou', loss_weight=1.0):
+    def __init__(self,
+                 eps=1e-6,
+                 reduction='mean',
+                 loss_type='giou',
+                 loss_weight=1.0):
         super(GIoULoss, self).__init__()
         self.eps = eps
         self.reduction = reduction

From 58e3f0ce6b2e3d78a51e5979516cd5588b9545f6 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Tue, 9 Aug 2022 15:26:30 +0800
Subject: [PATCH 24/69] pre-commit fix 3

---
 easycv/models/detection/detectors/yolox/ASFF.py | 4 +---
 1 file changed, 1 insertion(+), 3 deletions(-)

diff --git a/easycv/models/detection/detectors/yolox/ASFF.py b/easycv/models/detection/detectors/yolox/ASFF.py
index d9236125..22167d12 100644
--- a/easycv/models/detection/detectors/yolox/ASFF.py
+++ b/easycv/models/detection/detectors/yolox/ASFF.py
@@ -147,9 +147,7 @@ def forward(self, x):  # l,m,s
         levels_weight = self.weight_levels(levels_weight_v)
         levels_weight = F.softmax(levels_weight, dim=1)
 
-        fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] + \
-                            level_1_resized * levels_weight[:, 1:2, :, :] + \
-                            level_2_resized * levels_weight[:, 2:, :, :]
+        fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] + level_1_resized * levels_weight[:, 1:2, :, :] + level_2_resized * levels_weight[:, 2:, :, :]
 
         out = self.expand(fused_out_reduced)
 

From 95028f4f6adbb0c61126ee35d25e65dfd1cb1d91 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Tue, 9 Aug 2022 15:27:23 +0800
Subject: [PATCH 25/69] pre-commit fix 4

---
 easycv/models/detection/detectors/yolox/ASFF.py | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/easycv/models/detection/detectors/yolox/ASFF.py b/easycv/models/detection/detectors/yolox/ASFF.py
index 22167d12..afda8bf0 100644
--- a/easycv/models/detection/detectors/yolox/ASFF.py
+++ b/easycv/models/detection/detectors/yolox/ASFF.py
@@ -147,7 +147,11 @@ def forward(self, x):  # l,m,s
         levels_weight = self.weight_levels(levels_weight_v)
         levels_weight = F.softmax(levels_weight, dim=1)
 
-        fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] + level_1_resized * levels_weight[:, 1:2, :, :] + level_2_resized * levels_weight[:, 2:, :, :]
+        fused_out_reduced = level_0_resized * levels_weight[:, 0:
+                                                            1, :, :] + level_1_resized * levels_weight[:,
+                                                                                                       1:
+                                                                                                       2, :, :] + level_2_resized * levels_weight[:,
+                                                                                                                                                  2:, :, :]
 
         out = self.expand(fused_out_reduced)
 

From 1d21505af26782a180e60e69249bb667c878754d Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Tue, 9 Aug 2022 15:43:39 +0800
Subject: [PATCH 26/69] fix yolox configs

---
 configs/detection/yolox/yolox_best_asff_1.py  | 190 -----------------
 configs/detection/yolox/yolox_best_asff_2.py  | 191 -----------------
 configs/detection/yolox/yolox_best_asff_4.py  | 190 -----------------
 configs/detection/yolox/yolox_best_asff_8.py  | 190 -----------------
 configs/detection/yolox/yolox_best_conv1.py   | 193 -----------------
 configs/detection/yolox/yolox_best_la_16.py   | 193 -----------------
 configs/detection/yolox/yolox_best_la_32.py   | 194 -----------------
 .../yolox/yolox_best_la_32_sconv_2.py         | 193 -----------------
 .../yolox/yolox_best_la_32_sconv_4.py         | 193 -----------------
 configs/detection/yolox/yolox_best_lrelu.py   | 190 -----------------
 configs/detection/yolox/yolox_best_ori.py     | 190 -----------------
 configs/detection/yolox/yolox_best_relu.py    | 190 -----------------
 configs/detection/yolox/yolox_best_stack_1.py | 193 -----------------
 configs/detection/yolox/yolox_best_stack_2.py | 193 -----------------
 configs/detection/yolox/yolox_best_stack_3.py | 193 -----------------
 configs/detection/yolox/yolox_best_stack_4.py | 193 -----------------
 configs/detection/yolox/yolox_best_stack_5.py | 193 -----------------
 .../detection/yolox/yolox_l_8xb8_300e_coco.py |   1 -
 .../yolox/yolox_l_8xb8_300e_coco_asff_2.py    |  10 -
 .../yolox_l_8xb8_300e_coco_asff_tood_giou.py  |  10 -
 .../yolox/yolox_l_8xb8_300e_coco_best_ori.py  |  10 -
 .../yolox/yolox_l_8xb8_300e_coco_la_32.py     |  10 -
 .../yolox/yolox_l_8xb8_300e_coco_yolo6.py     |  10 -
 .../yolox/yolox_l_8xb8_300e_coco_yolo6_att.py |  10 -
 .../yolox_l_8xb8_300e_coco_yolo6_att_relu.py  |  10 -
 .../yolox/yolox_s_8xb16_300e_coco.py          | 189 ----------------
 .../yolox/yolox_s_8xb16_300e_coco_asff.py     | 187 ----------------
 .../yolox/yolox_s_8xb16_300e_coco_base.py     | 190 -----------------
 .../yolox/yolox_s_8xb16_300e_coco_cbam.py     | 187 ----------------
 .../yolox/yolox_s_8xb16_300e_coco_ciou.py     | 187 ----------------
 .../yolox/yolox_s_8xb16_300e_coco_diou.py     | 187 ----------------
 .../yolox/yolox_s_8xb16_300e_coco_eca.py      | 187 ----------------
 .../yolox/yolox_s_8xb16_300e_coco_eiou.py     | 187 ----------------
 .../yolox/yolox_s_8xb16_300e_coco_focal.py    | 187 ----------------
 .../yolox/yolox_s_8xb16_300e_coco_giou.py     | 187 ----------------
 .../yolox/yolox_s_8xb16_300e_coco_pai.py      |  96 ---------
 .../yolox/yolox_s_8xb16_300e_coco_se.py       | 187 ----------------
 .../yolox/yolox_s_8xb16_300e_coco_siou.py     | 187 ----------------
 .../yolox/yolox_s_8xb16_300e_coco_siou2.py    | 187 ----------------
 .../yolox/yolox_s_8xb16_300e_coco_sppf.py     | 188 ----------------
 .../yolox/yolox_s_8xb16_300e_coco_vfocal.py   | 187 ----------------
 .../detection/yolox/yolox_s_8xb16_300e_tal.py | 189 ----------------
 .../yolox/yolox_s_8xb16_300e_tal_asff_giou.py | 190 -----------------
 .../yolox_s_8xb16_300e_tal_asff_sppf_giou.py  | 190 -----------------
 .../detection/yolox/yolox_s_8xb16_300e_voc.py |  90 --------
 .../yolox/yolox_s_8xb16_300e_wonorm.py        | 186 ----------------
 configs/detection/yolox/yolox_yolo6.py        | 190 -----------------
 .../detection/yolox/yolox_yolo6_asff_sim.py   | 197 -----------------
 configs/detection/yolox/yolox_yolo6_att.py    | 194 -----------------
 .../detection/yolox/yolox_yolo6_att_relu.py   | 194 -----------------
 .../detection/yolox/yolox_yolo6_att_sim.py    | 198 -----------------
 .../detection/yolox/yolox_yolo6_att_sim_1.py  | 201 ------------------
 .../detection/yolox/yolox_yolo6_att_sim_16.py | 198 -----------------
 .../detection/yolox/yolox_yolo6_att_sim_32.py | 198 -----------------
 .../detection/yolox/yolox_yolo6_att_sim_8.py  | 198 -----------------
 .../detection/yolox/yolox_yolo6_att_sim_d.py  | 198 -----------------
 .../yolox/yolox_yolo6_att_sim_no_expand.py    | 198 -----------------
 configs/detection/yolox/yolox_yolo6_gsconv.py | 190 -----------------
 .../yolox/yolox_yolo6_gsconv_asff_sim.py      | 199 -----------------
 .../yolox/yolox_yolo6_gsconv_part.py          | 191 -----------------
 .../detection/yolox/yolox_yolo6_head_ori.py   | 201 ------------------
 .../detection/yolox/yolox_yolo6_head_tood.py  | 201 ------------------
 .../detection/yolox/yolox_yolo6_yoloe_head.py | 194 -----------------
 63 files changed, 10425 deletions(-)
 delete mode 100644 configs/detection/yolox/yolox_best_asff_1.py
 delete mode 100644 configs/detection/yolox/yolox_best_asff_2.py
 delete mode 100644 configs/detection/yolox/yolox_best_asff_4.py
 delete mode 100644 configs/detection/yolox/yolox_best_asff_8.py
 delete mode 100644 configs/detection/yolox/yolox_best_conv1.py
 delete mode 100644 configs/detection/yolox/yolox_best_la_16.py
 delete mode 100644 configs/detection/yolox/yolox_best_la_32.py
 delete mode 100644 configs/detection/yolox/yolox_best_la_32_sconv_2.py
 delete mode 100644 configs/detection/yolox/yolox_best_la_32_sconv_4.py
 delete mode 100644 configs/detection/yolox/yolox_best_lrelu.py
 delete mode 100644 configs/detection/yolox/yolox_best_ori.py
 delete mode 100644 configs/detection/yolox/yolox_best_relu.py
 delete mode 100644 configs/detection/yolox/yolox_best_stack_1.py
 delete mode 100644 configs/detection/yolox/yolox_best_stack_2.py
 delete mode 100644 configs/detection/yolox/yolox_best_stack_3.py
 delete mode 100644 configs/detection/yolox/yolox_best_stack_4.py
 delete mode 100644 configs/detection/yolox/yolox_best_stack_5.py
 delete mode 100644 configs/detection/yolox/yolox_l_8xb8_300e_coco_asff_2.py
 delete mode 100644 configs/detection/yolox/yolox_l_8xb8_300e_coco_asff_tood_giou.py
 delete mode 100644 configs/detection/yolox/yolox_l_8xb8_300e_coco_best_ori.py
 delete mode 100644 configs/detection/yolox/yolox_l_8xb8_300e_coco_la_32.py
 delete mode 100644 configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6.py
 delete mode 100644 configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6_att.py
 delete mode 100644 configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6_att_relu.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_asff.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_base.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_cbam.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_ciou.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_diou.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_eca.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_eiou.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_focal.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_giou.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_pai.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_se.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_siou.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_siou2.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_sppf.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_vfocal.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_tal.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_sppf_giou.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_voc.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_wonorm.py
 delete mode 100644 configs/detection/yolox/yolox_yolo6.py
 delete mode 100644 configs/detection/yolox/yolox_yolo6_asff_sim.py
 delete mode 100644 configs/detection/yolox/yolox_yolo6_att.py
 delete mode 100644 configs/detection/yolox/yolox_yolo6_att_relu.py
 delete mode 100644 configs/detection/yolox/yolox_yolo6_att_sim.py
 delete mode 100644 configs/detection/yolox/yolox_yolo6_att_sim_1.py
 delete mode 100644 configs/detection/yolox/yolox_yolo6_att_sim_16.py
 delete mode 100644 configs/detection/yolox/yolox_yolo6_att_sim_32.py
 delete mode 100644 configs/detection/yolox/yolox_yolo6_att_sim_8.py
 delete mode 100644 configs/detection/yolox/yolox_yolo6_att_sim_d.py
 delete mode 100644 configs/detection/yolox/yolox_yolo6_att_sim_no_expand.py
 delete mode 100644 configs/detection/yolox/yolox_yolo6_gsconv.py
 delete mode 100644 configs/detection/yolox/yolox_yolo6_gsconv_asff_sim.py
 delete mode 100644 configs/detection/yolox/yolox_yolo6_gsconv_part.py
 delete mode 100644 configs/detection/yolox/yolox_yolo6_head_ori.py
 delete mode 100644 configs/detection/yolox/yolox_yolo6_head_tood.py
 delete mode 100644 configs/detection/yolox/yolox_yolo6_yoloe_head.py

diff --git a/configs/detection/yolox/yolox_best_asff_1.py b/configs/detection/yolox/yolox_best_asff_1.py
deleted file mode 100644
index ebdd02b1..00000000
--- a/configs/detection/yolox/yolox_best_asff_1.py
+++ /dev/null
@@ -1,190 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    asff_channel=1,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood'  # yolox
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_asff_2.py b/configs/detection/yolox/yolox_best_asff_2.py
deleted file mode 100644
index 48e05d49..00000000
--- a/configs/detection/yolox/yolox_best_asff_2.py
+++ /dev/null
@@ -1,191 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    asff_channel=2,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood'  # yolox
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_asff_4.py b/configs/detection/yolox/yolox_best_asff_4.py
deleted file mode 100644
index 83c53948..00000000
--- a/configs/detection/yolox/yolox_best_asff_4.py
+++ /dev/null
@@ -1,190 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    asff_channel=4,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood'  # yolox
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_asff_8.py b/configs/detection/yolox/yolox_best_asff_8.py
deleted file mode 100644
index af2a24fe..00000000
--- a/configs/detection/yolox/yolox_best_asff_8.py
+++ /dev/null
@@ -1,190 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    asff_channel=8,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood'  # yolox
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_conv1.py b/configs/detection/yolox/yolox_best_conv1.py
deleted file mode 100644
index 4520f4a9..00000000
--- a/configs/detection/yolox/yolox_best_conv1.py
+++ /dev/null
@@ -1,193 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    act='silu',
-    asff_channel=16,
-    stacked_convs=6,
-    la_down_rate=8,
-    conv_layers=1)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_la_16.py b/configs/detection/yolox/yolox_best_la_16.py
deleted file mode 100644
index 9f11105d..00000000
--- a/configs/detection/yolox/yolox_best_la_16.py
+++ /dev/null
@@ -1,193 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    act='silu',
-    asff_channel=16,
-    stacked_convs=6,
-    la_down_rate=16,
-    conv_layers=2)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_la_32.py b/configs/detection/yolox/yolox_best_la_32.py
deleted file mode 100644
index c807c539..00000000
--- a/configs/detection/yolox/yolox_best_la_32.py
+++ /dev/null
@@ -1,194 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    act='silu',
-    asff_channel=16,
-    stacked_convs=6,
-    la_down_rate=32,
-    conv_layers=2)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_la_32_sconv_2.py b/configs/detection/yolox/yolox_best_la_32_sconv_2.py
deleted file mode 100644
index 90eb6034..00000000
--- a/configs/detection/yolox/yolox_best_la_32_sconv_2.py
+++ /dev/null
@@ -1,193 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    act='silu',
-    asff_channel=16,
-    stacked_convs=2,
-    la_down_rate=32,
-    conv_layers=2)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_la_32_sconv_4.py b/configs/detection/yolox/yolox_best_la_32_sconv_4.py
deleted file mode 100644
index e2ce80bc..00000000
--- a/configs/detection/yolox/yolox_best_la_32_sconv_4.py
+++ /dev/null
@@ -1,193 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    act='silu',
-    asff_channel=16,
-    stacked_convs=4,
-    la_down_rate=32,
-    conv_layers=2)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_lrelu.py b/configs/detection/yolox/yolox_best_lrelu.py
deleted file mode 100644
index db43347c..00000000
--- a/configs/detection/yolox/yolox_best_lrelu.py
+++ /dev/null
@@ -1,190 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    act='lrelu',
-    use_att='ASFF',
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood'  # yolox
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_ori.py b/configs/detection/yolox/yolox_best_ori.py
deleted file mode 100644
index e984efed..00000000
--- a/configs/detection/yolox/yolox_best_ori.py
+++ /dev/null
@@ -1,190 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood'  # yolox
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=10,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_relu.py b/configs/detection/yolox/yolox_best_relu.py
deleted file mode 100644
index 87dc91c9..00000000
--- a/configs/detection/yolox/yolox_best_relu.py
+++ /dev/null
@@ -1,190 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    act='relu',
-    use_att='ASFF',
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood'  # yolox
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_stack_1.py b/configs/detection/yolox/yolox_best_stack_1.py
deleted file mode 100644
index 53a1d8e3..00000000
--- a/configs/detection/yolox/yolox_best_stack_1.py
+++ /dev/null
@@ -1,193 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    act='silu',
-    asff_channel=16,
-    stacked_convs=1,
-    la_down_rate=8,
-    conv_layers=2)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_stack_2.py b/configs/detection/yolox/yolox_best_stack_2.py
deleted file mode 100644
index 53d865be..00000000
--- a/configs/detection/yolox/yolox_best_stack_2.py
+++ /dev/null
@@ -1,193 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    act='silu',
-    asff_channel=16,
-    stacked_convs=2,
-    la_down_rate=8,
-    conv_layers=2)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_stack_3.py b/configs/detection/yolox/yolox_best_stack_3.py
deleted file mode 100644
index 4987e0d3..00000000
--- a/configs/detection/yolox/yolox_best_stack_3.py
+++ /dev/null
@@ -1,193 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    act='silu',
-    asff_channel=16,
-    stacked_convs=3,
-    la_down_rate=8,
-    conv_layers=2)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_stack_4.py b/configs/detection/yolox/yolox_best_stack_4.py
deleted file mode 100644
index 8d7cf0ca..00000000
--- a/configs/detection/yolox/yolox_best_stack_4.py
+++ /dev/null
@@ -1,193 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    act='silu',
-    asff_channel=16,
-    stacked_convs=4,
-    la_down_rate=8,
-    conv_layers=2)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_best_stack_5.py b/configs/detection/yolox/yolox_best_stack_5.py
deleted file mode 100644
index 07f36a7b..00000000
--- a/configs/detection/yolox/yolox_best_stack_5.py
+++ /dev/null
@@ -1,193 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    act='silu',
-    asff_channel=16,
-    stacked_convs=5,
-    la_down_rate=8,
-    conv_layers=2)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
index fbb50dd7..93b3ce33 100644
--- a/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
+++ b/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
@@ -1,5 +1,4 @@
 _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
-# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
 
 # model settings
 model = dict(model_type='l')
diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco_asff_2.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco_asff_2.py
deleted file mode 100644
index ae3b18c0..00000000
--- a/configs/detection/yolox/yolox_l_8xb8_300e_coco_asff_2.py
+++ /dev/null
@@ -1,10 +0,0 @@
-_base_ = 'configs/detection/yolox/yolox_best_asff_2.py'
-# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
-
-# model settings
-model = dict(model_type='l')
-
-data = dict(imgs_per_gpu=8, workers_per_gpu=4)
-
-optimizer = dict(
-    type='SGD', lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True)
diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco_asff_tood_giou.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco_asff_tood_giou.py
deleted file mode 100644
index 10da9745..00000000
--- a/configs/detection/yolox/yolox_l_8xb8_300e_coco_asff_tood_giou.py
+++ /dev/null
@@ -1,10 +0,0 @@
-_base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
-# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
-
-# model settings
-model = dict(model_type='l')
-
-data = dict(imgs_per_gpu=8, workers_per_gpu=4)
-
-optimizer = dict(
-    type='SGD', lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True)
diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco_best_ori.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco_best_ori.py
deleted file mode 100644
index a31acdd4..00000000
--- a/configs/detection/yolox/yolox_l_8xb8_300e_coco_best_ori.py
+++ /dev/null
@@ -1,10 +0,0 @@
-_base_ = 'configs/detection/yolox/yolox_best_ori.py'
-# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
-
-# model settings
-model = dict(model_type='l')
-
-data = dict(imgs_per_gpu=8, workers_per_gpu=4)
-
-optimizer = dict(
-    type='SGD', lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True)
diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco_la_32.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco_la_32.py
deleted file mode 100644
index 864116f4..00000000
--- a/configs/detection/yolox/yolox_l_8xb8_300e_coco_la_32.py
+++ /dev/null
@@ -1,10 +0,0 @@
-_base_ = 'configs/detection/yolox/yolox_best_la_32.py'
-# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
-
-# model settings
-model = dict(model_type='l')
-
-data = dict(imgs_per_gpu=8, workers_per_gpu=4)
-
-optimizer = dict(
-    type='SGD', lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True)
diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6.py
deleted file mode 100644
index f3ac55c4..00000000
--- a/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6.py
+++ /dev/null
@@ -1,10 +0,0 @@
-_base_ = 'configs/detection/yolox/yolox_yolo6.py'
-# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
-
-# model settings
-model = dict(model_type='l')
-
-data = dict(imgs_per_gpu=8, workers_per_gpu=4)
-
-optimizer = dict(
-    type='SGD', lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True)
diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6_att.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6_att.py
deleted file mode 100644
index e7f03efc..00000000
--- a/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6_att.py
+++ /dev/null
@@ -1,10 +0,0 @@
-_base_ = 'configs/detection/yolox/yolox_yolo6_att.py'
-# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
-
-# model settings
-model = dict(model_type='l')
-
-data = dict(imgs_per_gpu=8, workers_per_gpu=4)
-
-optimizer = dict(
-    type='SGD', lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True)
diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6_att_relu.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6_att_relu.py
deleted file mode 100644
index 29eccc8e..00000000
--- a/configs/detection/yolox/yolox_l_8xb8_300e_coco_yolo6_att_relu.py
+++ /dev/null
@@ -1,10 +0,0 @@
-_base_ = 'configs/detection/yolox/yolox_yolo6_att_relu.py'
-# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
-
-# model settings
-model = dict(model_type='l')
-
-data = dict(imgs_per_gpu=8, workers_per_gpu=4)
-
-optimizer = dict(
-    type='SGD', lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
deleted file mode 100644
index 65af25b9..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ /dev/null
@@ -1,189 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att=None,
-    obj_loss_type='BCE',
-    reg_loss_type='iou')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=1,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff.py
deleted file mode 100644
index 35d4654f..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff.py
+++ /dev/null
@@ -1,187 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    obj_loss_type='BCE',
-    reg_loss_type='iou')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=1,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_base.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_base.py
deleted file mode 100644
index 8407160f..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_base.py
+++ /dev/null
@@ -1,190 +0,0 @@
-_base_ = '../../base.py'
-
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/tmp/coco/'
-dataset_type = 'CocoDataset'
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='MultiImageMixDataset',
-    data_source=dict(
-        type='CocoSource',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='MultiImageMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='CocoSource',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-# evaluation
-eval_config = dict(interval=10, gpu_collect=False)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-# basic_lr_per_img = 0.01 / 64.0
-optimizer = dict(
-    type='SGD',
-    # lr=0.01,
-    lr=0.02,
-    momentum=0.9,
-    weight_decay=5e-4,
-    nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHook')
-    ])
-# yapf:enable
-# runtime settings
-dist_params = dict(backend='nccl')
-cudnn_benchmark = True
-log_level = 'INFO'
-# load_from = '/apsarapangu/disk3/peizixiang.pzx/workspace/code/codereviews/ev-torch/work_dirs/modify_ckpts/yoloxs_coco_official_export.pt'
-load_from = None
-resume_from = None
-workflow = [('train', 1)]
-
-export = dict(use_jit=False)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_cbam.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_cbam.py
deleted file mode 100644
index d0542c2e..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_cbam.py
+++ /dev/null
@@ -1,187 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='CBAM',
-    obj_loss_type='BCE',
-    reg_loss_type='iou')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/data/coco'
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=1,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_ciou.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_ciou.py
deleted file mode 100644
index bc272d05..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_ciou.py
+++ /dev/null
@@ -1,187 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att=None,
-    obj_loss_type='BCE',
-    reg_loss_type='ciou')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/data/coco'
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=1,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_diou.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_diou.py
deleted file mode 100644
index 93ae73c4..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_diou.py
+++ /dev/null
@@ -1,187 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att=None,
-    obj_loss_type='BCE',
-    reg_loss_type='diou')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/data/coco'
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=1,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_eca.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_eca.py
deleted file mode 100644
index 8c8857e5..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_eca.py
+++ /dev/null
@@ -1,187 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ECA',
-    obj_loss_type='BCE',
-    reg_loss_type='iou')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/data/coco'
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=1,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_eiou.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_eiou.py
deleted file mode 100644
index 04de5ccd..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_eiou.py
+++ /dev/null
@@ -1,187 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att=None,
-    obj_loss_type='BCE',
-    reg_loss_type='eiou')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/data/coco'
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=1,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_focal.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_focal.py
deleted file mode 100644
index abc16b56..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_focal.py
+++ /dev/null
@@ -1,187 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att=None,
-    obj_loss_type='focal',
-    reg_loss_type='iou')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/data/coco'
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=1,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_giou.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_giou.py
deleted file mode 100644
index fadf8928..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_giou.py
+++ /dev/null
@@ -1,187 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att=None,
-    obj_loss_type='BCE',
-    reg_loss_type='giou')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/data/coco'
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=1,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_pai.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_pai.py
deleted file mode 100644
index 1f509612..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_pai.py
+++ /dev/null
@@ -1,96 +0,0 @@
-_base_ = './yolox_s_8xb16_300e_coco.py'
-
-# oss io config
-oss_io_config = dict(
-    ak_id='your oss ak id',
-    ak_secret='your oss ak secret',
-    hosts='oss-cn-zhangjiakou.aliyuncs.com',  # your oss hosts
-    buckets=['your_bucket'])  # your oss buckets
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-# dataset settings
-img_size = 640
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourcePAI',
-        path='data/coco/train2017.manifest',
-        classes=CLASSES),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourcePAI',
-        path='data/coco/val2017.manifest',
-        classes=CLASSES),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_se.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_se.py
deleted file mode 100644
index ddd54917..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_se.py
+++ /dev/null
@@ -1,187 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='SE',
-    obj_loss_type='BCE',
-    reg_loss_type='iou')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/data/coco'
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=1,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou.py
deleted file mode 100644
index 068ca777..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou.py
+++ /dev/null
@@ -1,187 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att=None,
-    obj_loss_type='BCE',
-    reg_loss_type='siou')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=1,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou2.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou2.py
deleted file mode 100644
index 23291c81..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_siou2.py
+++ /dev/null
@@ -1,187 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att=None,
-    obj_loss_type='BCE',
-    reg_loss_type='siou2')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=1,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_sppf.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_sppf.py
deleted file mode 100644
index f8db6528..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_sppf.py
+++ /dev/null
@@ -1,188 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att=None,
-    obj_loss_type='BCE',
-    reg_loss_type='iou',
-    spp_type='sppf')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=1,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_vfocal.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_vfocal.py
deleted file mode 100644
index 0a7fa108..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_vfocal.py
+++ /dev/null
@@ -1,187 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att=None,
-    obj_loss_type='v_focal',
-    reg_loss_type='iou')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/data/coco'
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=1,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_tal.py b/configs/detection/yolox/yolox_s_8xb16_300e_tal.py
deleted file mode 100644
index dc25fe50..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_tal.py
+++ /dev/null
@@ -1,189 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att=None,
-    obj_loss_type='BCE',
-    reg_loss_type='iou',
-    head_type='tood'  # yolox
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=1,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py b/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py
deleted file mode 100644
index 403a7771..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py
+++ /dev/null
@@ -1,190 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood'  # yolox
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_sppf_giou.py b/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_sppf_giou.py
deleted file mode 100644
index 960826ea..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_sppf_giou.py
+++ /dev/null
@@ -1,190 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',  # yolox
-    spp_type='sppf')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_voc.py b/configs/detection/yolox/yolox_s_8xb16_300e_voc.py
deleted file mode 100644
index d2b0d800..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_voc.py
+++ /dev/null
@@ -1,90 +0,0 @@
-_base_ = './yolox_s_8xb16_300e_coco.py'
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
-    'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
-    'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
-]
-
-model = dict(num_classes=20)
-
-# dataset settings
-data_root = 'data/voc/'
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceVOC',
-        path=data_root + 'ImageSets/Main/train.txt',
-        classes=CLASSES,
-        cache_at_init=True),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceVOC',
-        path=data_root + 'ImageSets/Main/val.txt',
-        classes=CLASSES,
-        cache_at_init=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# # evaluation
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_wonorm.py b/configs/detection/yolox/yolox_s_8xb16_300e_wonorm.py
deleted file mode 100644
index b0c9895b..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_wonorm.py
+++ /dev/null
@@ -1,186 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    # dict(type='MMPhotoMetricDistortion',
-    #      brightness_delta=32,
-    #      contrast_range=(0.5, 1.5),
-    #      saturation_range=(0.5, 1.5),
-    #      hue_delta=18),  # only support float32
-    dict(type='MMYOLOXHSVRandomAug'),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    # dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle', img_to_float=True),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    # dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle', img_to_float=True),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=False),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=32,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[dict(type='LoadImageFromFile', to_float32=False)],
-        classes=CLASSES,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16,
-    workers_per_gpu=4,
-    persistent_workers=True,
-    train=train_dataset,
-    val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=True,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        dist_eval=True,
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        # dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False)
-mp_start_method = 'fork'
diff --git a/configs/detection/yolox/yolox_yolo6.py b/configs/detection/yolox/yolox_yolo6.py
deleted file mode 100644
index a32b6461..00000000
--- a/configs/detection/yolox/yolox_yolo6.py
+++ /dev/null
@@ -1,190 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    backbone='EfficientRep',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = 'data/coco/'
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_asff_sim.py b/configs/detection/yolox/yolox_yolo6_asff_sim.py
deleted file mode 100644
index b3bc4f3d..00000000
--- a/configs/detection/yolox/yolox_yolo6_asff_sim.py
+++ /dev/null
@@ -1,197 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    backbone='EfficientRep',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    use_att='ASFF_sim',
-    asff_channel=2,
-    la_down_rate=32,
-    expand_kernel=1,
-    down_rate=None,
-    use_dconv=False,
-    use_expand=True,
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = 'data/coco/'
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att.py b/configs/detection/yolox/yolox_yolo6_att.py
deleted file mode 100644
index b77dffc1..00000000
--- a/configs/detection/yolox/yolox_yolo6_att.py
+++ /dev/null
@@ -1,194 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    backbone='EfficientRep',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    asff_channel=2,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    la_down_rate=32,
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = 'data/coco/'
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att_relu.py b/configs/detection/yolox/yolox_yolo6_att_relu.py
deleted file mode 100644
index a9b418ac..00000000
--- a/configs/detection/yolox/yolox_yolo6_att_relu.py
+++ /dev/null
@@ -1,194 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    backbone='EfficientRep',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    asff_channel=2,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    la_down_rate=32,
-    act='relu')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = 'data/coco/'
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim.py b/configs/detection/yolox/yolox_yolo6_att_sim.py
deleted file mode 100644
index 9418db65..00000000
--- a/configs/detection/yolox/yolox_yolo6_att_sim.py
+++ /dev/null
@@ -1,198 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    backbone='EfficientRep',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF_sim',
-    asff_channel=2,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    la_down_rate=32,
-    expand_kernel=3,
-    down_rate=None,
-    use_dconv=False,
-    use_expand=True,
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = 'data/coco/'
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_1.py b/configs/detection/yolox/yolox_yolo6_att_sim_1.py
deleted file mode 100644
index e00eb7c1..00000000
--- a/configs/detection/yolox/yolox_yolo6_att_sim_1.py
+++ /dev/null
@@ -1,201 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    backbone='EfficientRep',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF_sim',
-    asff_channel=2,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    la_down_rate=32,
-    expand_kernel=1,
-    down_rate=None,
-    use_dconv=False,
-    use_expand=True,
-    # norm_cfg = 'SyncBN'
-)
-
-sync_bn = True
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = 'data/coco/'
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_16.py b/configs/detection/yolox/yolox_yolo6_att_sim_16.py
deleted file mode 100644
index ae45620a..00000000
--- a/configs/detection/yolox/yolox_yolo6_att_sim_16.py
+++ /dev/null
@@ -1,198 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    backbone='EfficientRep',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF_sim',
-    asff_channel=2,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    la_down_rate=32,
-    expand_kernel=3,
-    down_rate=16,
-    use_dconv=False,
-    use_expand=True,
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = 'data/coco/'
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_32.py b/configs/detection/yolox/yolox_yolo6_att_sim_32.py
deleted file mode 100644
index ab77c87b..00000000
--- a/configs/detection/yolox/yolox_yolo6_att_sim_32.py
+++ /dev/null
@@ -1,198 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    backbone='EfficientRep',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF_sim',
-    asff_channel=2,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    la_down_rate=32,
-    expand_kernel=3,
-    down_rate=32,
-    use_dconv=False,
-    use_expand=True,
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = 'data/coco/'
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_8.py b/configs/detection/yolox/yolox_yolo6_att_sim_8.py
deleted file mode 100644
index 05f0d9ac..00000000
--- a/configs/detection/yolox/yolox_yolo6_att_sim_8.py
+++ /dev/null
@@ -1,198 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    backbone='EfficientRep',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF_sim',
-    asff_channel=2,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    la_down_rate=32,
-    expand_kernel=3,
-    down_rate=8,
-    use_dconv=False,
-    use_expand=True,
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = 'data/coco/'
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_d.py b/configs/detection/yolox/yolox_yolo6_att_sim_d.py
deleted file mode 100644
index 773a24c9..00000000
--- a/configs/detection/yolox/yolox_yolo6_att_sim_d.py
+++ /dev/null
@@ -1,198 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    backbone='EfficientRep',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF_sim',
-    asff_channel=2,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    la_down_rate=32,
-    expand_kernel=3,
-    down_rate=None,
-    use_dconv=True,
-    use_expand=True,
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = 'data/coco/'
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_att_sim_no_expand.py b/configs/detection/yolox/yolox_yolo6_att_sim_no_expand.py
deleted file mode 100644
index 2f94459f..00000000
--- a/configs/detection/yolox/yolox_yolo6_att_sim_no_expand.py
+++ /dev/null
@@ -1,198 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    backbone='EfficientRep',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF_sim',
-    asff_channel=2,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    la_down_rate=32,
-    expand_kernel=3,
-    down_rate=None,
-    use_dconv=False,
-    use_expand=False,
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = 'data/coco/'
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_gsconv.py b/configs/detection/yolox/yolox_yolo6_gsconv.py
deleted file mode 100644
index 75a6cb25..00000000
--- a/configs/detection/yolox/yolox_yolo6_gsconv.py
+++ /dev/null
@@ -1,190 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    backbone='EfficientRep',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    neck='gsconv')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = 'data/coco/'
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_gsconv_asff_sim.py b/configs/detection/yolox/yolox_yolo6_gsconv_asff_sim.py
deleted file mode 100644
index 5573c136..00000000
--- a/configs/detection/yolox/yolox_yolo6_gsconv_asff_sim.py
+++ /dev/null
@@ -1,199 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    backbone='EfficientRep',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    neck='gsconv',
-    use_att='ASFF_sim',
-    asff_channel=2,
-    la_down_rate=32,
-    expand_kernel=1,
-    down_rate=None,
-    use_dconv=False,
-    use_expand=True,
-)
-
-find_unused_parameters = True
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = 'data/coco/'
-data_root = '/root/database/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.005, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_gsconv_part.py b/configs/detection/yolox/yolox_yolo6_gsconv_part.py
deleted file mode 100644
index cbb0dc78..00000000
--- a/configs/detection/yolox/yolox_yolo6_gsconv_part.py
+++ /dev/null
@@ -1,191 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    backbone='EfficientRep',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    neck='gsconv',
-    neck_mode='part')
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = 'data/coco/'
-# data_root = '/apsarapangu/disk5/zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_head_ori.py b/configs/detection/yolox/yolox_yolo6_head_ori.py
deleted file mode 100644
index 05914423..00000000
--- a/configs/detection/yolox/yolox_yolo6_head_ori.py
+++ /dev/null
@@ -1,201 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    backbone='EfficientRep',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF_sim',
-    asff_channel=2,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='yolox',
-    la_down_rate=32,
-    expand_kernel=1,
-    down_rate=None,
-    use_dconv=False,
-    use_expand=True,
-    # norm_cfg = 'SyncBN'
-)
-
-sync_bn = True
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = 'data/coco/'
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_head_tood.py b/configs/detection/yolox/yolox_yolo6_head_tood.py
deleted file mode 100644
index b8142c12..00000000
--- a/configs/detection/yolox/yolox_yolo6_head_tood.py
+++ /dev/null
@@ -1,201 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    backbone='EfficientRep',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF_sim',
-    asff_channel=2,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='tood',
-    la_down_rate=32,
-    expand_kernel=1,
-    down_rate=None,
-    use_dconv=False,
-    use_expand=True,
-    # norm_cfg = 'SyncBN'
-)
-
-# sync_bn = True
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = 'data/coco/'
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=8, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_yolo6_yoloe_head.py b/configs/detection/yolox/yolox_yolo6_yoloe_head.py
deleted file mode 100644
index be34b9a8..00000000
--- a/configs/detection/yolox/yolox_yolo6_yoloe_head.py
+++ /dev/null
@@ -1,194 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    backbone='EfficientRep',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
-    test_conf=0.01,
-    nms_thre=0.65,
-    use_att='ASFF',
-    asff_channel=2,
-    obj_loss_type='BCE',
-    reg_loss_type='giou',
-    head_type='ppyoloe',
-    la_down_rate=32,
-)
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = 'data/coco/'
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/cpfs01/shared/public/dataset/coco2017/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=1, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)

From ab87f0929709e0149670fe88b22ab16e18f23764 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Tue, 9 Aug 2022 15:49:26 +0800
Subject: [PATCH 27/69] fix lint bug

---
 .../yolox/yolox_s_8xb16_300e_coco.py          | 182 ++++++++++++++++++
 .../yolox/yolox_s_8xb16_300e_coco_pai.py      |  96 +++++++++
 .../detection/yolox/yolox_s_8xb16_300e_voc.py |  90 +++++++++
 .../yolox/yolox_tiny_8xb16_300e_coco.py       |  10 +-
 easycv/apis/train.py                          |   1 -
 show_predict.py                               |  79 --------
 6 files changed, 371 insertions(+), 87 deletions(-)
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_pai.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_voc.py
 delete mode 100644 show_predict.py

diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
new file mode 100644
index 00000000..1f434656
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -0,0 +1,182 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    num_classes=80,
+    model_type='s',  # s m l x tiny nano
+    test_conf=0.01,
+    nms_thre=0.65)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+data_root = 'data/coco/'
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_pai.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_pai.py
new file mode 100644
index 00000000..1f509612
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_pai.py
@@ -0,0 +1,96 @@
+_base_ = './yolox_s_8xb16_300e_coco.py'
+
+# oss io config
+oss_io_config = dict(
+    ak_id='your oss ak id',
+    ak_secret='your oss ak secret',
+    hosts='oss-cn-zhangjiakou.aliyuncs.com',  # your oss hosts
+    buckets=['your_bucket'])  # your oss buckets
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+# dataset settings
+img_size = 640
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourcePAI',
+        path='data/coco/train2017.manifest',
+        classes=CLASSES),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourcePAI',
+        path='data/coco/val2017.manifest',
+        classes=CLASSES),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_voc.py b/configs/detection/yolox/yolox_s_8xb16_300e_voc.py
new file mode 100644
index 00000000..d2b0d800
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_voc.py
@@ -0,0 +1,90 @@
+_base_ = './yolox_s_8xb16_300e_coco.py'
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
+    'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
+    'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
+]
+
+model = dict(num_classes=20)
+
+# dataset settings
+data_root = 'data/voc/'
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceVOC',
+        path=data_root + 'ImageSets/Main/train.txt',
+        classes=CLASSES,
+        cache_at_init=True),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceVOC',
+        path=data_root + 'ImageSets/Main/val.txt',
+        classes=CLASSES,
+        cache_at_init=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# # evaluation
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
diff --git a/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py b/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
index 5c3dda76..436244d9 100644
--- a/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
@@ -1,7 +1,4 @@
-# _base_ = './yolox_s_8xb16_300e_coco.py'
-# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
-# _base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_tal_asff_giou.py'
-_base_ = 'configs/detection/yolox/yolox_yolo6_att.py'
+_base_ = './yolox_s_8xb16_300e_coco.py'
 
 # model settings
 model = dict(model_type='tiny')
@@ -60,8 +57,7 @@
     dict(type='Collect', keys=['img'])
 ]
 
-# data_root = 'data/coco/'
-data_root = '/apsarapangu/disk5/zxy/data/coco/'
+data_root = 'data/coco/'
 
 train_dataset = dict(
     type='DetImagesMixDataset',
@@ -74,7 +70,7 @@
             dict(type='LoadAnnotations', with_bbox=True)
         ],
         classes=CLASSES,
-        filter_empty_gt=True,
+        filter_empty_gt=False,
         iscrowd=False),
     pipeline=train_pipeline,
     dynamic_scale=img_scale)
diff --git a/easycv/apis/train.py b/easycv/apis/train.py
index ff8b2715..2307f61e 100644
--- a/easycv/apis/train.py
+++ b/easycv/apis/train.py
@@ -89,7 +89,6 @@ def train_model(model,
 
     # SyncBatchNorm
     open_sync_bn = cfg.get('sync_bn', False)
-    print('!!Sync_bn', open_sync_bn)
 
     if open_sync_bn:
         model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
diff --git a/show_predict.py b/show_predict.py
deleted file mode 100644
index 31992d51..00000000
--- a/show_predict.py
+++ /dev/null
@@ -1,79 +0,0 @@
-import os
-import random
-
-import cv2
-import numpy as np
-from PIL import Image
-
-from easycv.predictors.detector import TorchYoloXPredictor
-
-colors = [[255, 0, 0], [255, 255, 0], [255, 255, 0], [0, 255, 255]
-          ] + [[random.randint(0, 255) for _ in range(3)] for _ in range(2000)]
-
-
-def plot_boxes(outputs, imgs, save_path=None, color=None, line_thickness=None):
-    x = outputs['detection_boxes']
-    score = outputs['detection_scores']
-    id = outputs['detection_classes']
-    label = outputs['detection_class_names']
-
-    # Plots one bounding box on image img
-    tl = int(
-        line_thickness or round(0.002 * (imgs.shape[0] + imgs.shape[1]) /
-                                2)) + 1  # line/font thickness
-    # tl = int(line_thickness)
-
-    for num in range(x.shape[0]):
-        c1, c2 = (int(x[num][0]), int(x[num][1])), (int(x[num][2]),
-                                                    int(x[num][3]))
-        cv2.rectangle(
-            imgs, c1, c2, colors[id[num]], thickness=tl, lineType=cv2.LINE_AA)
-
-        tf = max(tl - 1, 1)  # font thickness
-        t_size = cv2.getTextSize(
-            label[num], 0, fontScale=tl / 10, thickness=tf)[0]
-
-        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
-        cv2.rectangle(imgs, c1, c2, colors[id[num]], -1, cv2.LINE_AA)  # filled
-        cv2.putText(
-            imgs,
-            label[num], (c1[0], c1[1] - 2),
-            0,
-            0.2, [225, 0, 255],
-            thickness=1,
-            lineType=cv2.LINE_AA)
-        cv2.putText(
-            imgs,
-            str(score[num]), (c1[0], c1[1] - 10),
-            0,
-            0.2, [225, 0, 255],
-            thickness=1,
-            lineType=cv2.LINE_AA)
-
-    imgs = cv2.cvtColor(imgs, cv2.COLOR_BGR2RGB)
-    cv2.imwrite(save_path + '/result_39.6.jpg', imgs)
-
-    return
-
-
-def main():
-    pretrain_path = '/apsarapangu/disk5/zxy/pretrained/models/epoch_300_39.6.pth'
-    data_path = '/apsarapangu/disk5/zxy/data/coco/'
-    detection_model_path = pretrain_path
-
-    img = os.path.join(data_path, 'val2017/000000037777.jpg')
-
-    input_data_list = [np.asarray(Image.open(img))]
-    predictor = TorchYoloXPredictor(
-        model_path=detection_model_path, score_thresh=0.5)
-
-    output = predictor.predict(input_data_list)[0]
-
-    img = cv2.imread(img)
-    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
-    plot_boxes(output, img, save_path='./result')
-    print(output)
-
-
-if __name__ == '__main__':
-    main()

From ce4e7b56f621fa99ca6831d2f6aa4cbf31540451 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Tue, 9 Aug 2022 15:50:34 +0800
Subject: [PATCH 28/69] fix lint bug1

---
 tests/ut_config.py | 3 ---
 tools/eval.py      | 6 ------
 2 files changed, 9 deletions(-)

diff --git a/tests/ut_config.py b/tests/ut_config.py
index a9975e72..f874327e 100644
--- a/tests/ut_config.py
+++ b/tests/ut_config.py
@@ -81,9 +81,6 @@
     'pretrained_models/classification/resnet/resnet50_withhead.pth')
 PRETRAINED_MODEL_FACEID = os.path.join(BASE_LOCAL_PATH,
                                        'pretrained_models/faceid')
-
-# PRETRAINED_MODEL_YOLOXS_EXPORT = os.path.join(
-#     BASE_LOCAL_PATH, 'pretrained_models/detection/yolox_s/epoch_300.pth')
 PRETRAINED_MODEL_YOLOXS_EXPORT = os.path.join(
     BASE_LOCAL_PATH, 'pretrained_models/detection/yolox_s/epoch_300_export.pt')
 PRETRAINED_MODEL_YOLOXS_END2END_JIT = os.path.join(
diff --git a/tools/eval.py b/tools/eval.py
index 9786083b..20463ac6 100644
--- a/tools/eval.py
+++ b/tools/eval.py
@@ -185,12 +185,6 @@ def main():
     device = 'cuda' if torch.cuda.is_available() else 'cpu'
     print(f'use device {device}')
     checkpoint = load_checkpoint(model, args.checkpoint, map_location=device)
-    #
-    # official_path = "/apsarapangu/disk5/zxy/pretrained/models/yolox_s_official_40.5.pth"
-    # if 'official' in official_path:
-    #     checkpoint_model = _load_checkpoint(official_path, device)
-    #     state_dict = checkpoint_model['model']
-    #     model.load_state_dict(state_dict)
 
     model.to(device)
     # if args.fuse_conv_bn:

From 502a9dc3a1503f5a9b13dc29b8be002453539188 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Wed, 10 Aug 2022 11:06:48 +0800
Subject: [PATCH 29/69] fix make_divisible

---
 easycv/models/backbones/repvgg_yolox_backbone.py | 9 +++++----
 1 file changed, 5 insertions(+), 4 deletions(-)

diff --git a/easycv/models/backbones/repvgg_yolox_backbone.py b/easycv/models/backbones/repvgg_yolox_backbone.py
index f8ddbd9d..7b63f433 100644
--- a/easycv/models/backbones/repvgg_yolox_backbone.py
+++ b/easycv/models/backbones/repvgg_yolox_backbone.py
@@ -1,11 +1,16 @@
 # borrow some code from https://github.com/DingXiaoH/RepVGG/repvgg.py MIT2.0
 import copy
 import warnings
+import math
 
 import numpy as np
 import torch
 import torch.nn as nn
 
+def make_divisible(x, divisor):
+    # Upward revision the value x to make it evenly divisible by the divisor.
+    return math.ceil(x / divisor) * divisor
+
 
 def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
     '''Basic cell for rep-style block, including conv and bn'''
@@ -360,7 +365,6 @@ def forward(self, x):
 if __name__ == '__main__':
 
     from torchsummaryX import summary
-    import math
 
     depth_mul = 0.33
     width_mul = 0.5
@@ -372,9 +376,6 @@ def forward(self, x):
     num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i)
                   for i in (num_repeat_backbone + num_repeat_neck)]
 
-    def make_divisible(x, divisor):
-        # Upward revision the value x to make it evenly divisible by the divisor.
-        return math.ceil(x / divisor) * divisor
 
     channels_list = [
         make_divisible(i * width_mul, 8)

From 6379b27eba3cab7eab1fcef344c8162c6f63bcee Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Wed, 10 Aug 2022 11:07:19 +0800
Subject: [PATCH 30/69] lint

---
 easycv/models/backbones/repvgg_yolox_backbone.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/easycv/models/backbones/repvgg_yolox_backbone.py b/easycv/models/backbones/repvgg_yolox_backbone.py
index 7b63f433..e20f8c6c 100644
--- a/easycv/models/backbones/repvgg_yolox_backbone.py
+++ b/easycv/models/backbones/repvgg_yolox_backbone.py
@@ -1,12 +1,13 @@
 # borrow some code from https://github.com/DingXiaoH/RepVGG/repvgg.py MIT2.0
 import copy
-import warnings
 import math
+import warnings
 
 import numpy as np
 import torch
 import torch.nn as nn
 
+
 def make_divisible(x, divisor):
     # Upward revision the value x to make it evenly divisible by the divisor.
     return math.ceil(x / divisor) * divisor
@@ -376,7 +377,6 @@ def forward(self, x):
     num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i)
                   for i in (num_repeat_backbone + num_repeat_neck)]
 
-
     channels_list = [
         make_divisible(i * width_mul, 8)
         for i in (channels_list_backbone + channels_list_neck)

From 47f7e2eafd548f51a4b3c321e4116b1f860e9949 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Wed, 10 Aug 2022 11:26:15 +0800
Subject: [PATCH 31/69] first version check by zl

---
 easycv/apis/export.py                         |   3 +-
 easycv/models/backbones/efficientrep.py       | 126 -------
 easycv/models/backbones/yolo6_blocks.py       | 321 ------------------
 .../models/detection/detectors/yolox/yolox.py |   1 -
 easycv/models/loss/iou_loss.py                |  30 --
 easycv/toolkit/blade/cv_blade_utils.py        |   8 -
 6 files changed, 1 insertion(+), 488 deletions(-)
 delete mode 100644 easycv/models/backbones/efficientrep.py
 delete mode 100644 easycv/models/backbones/yolo6_blocks.py

diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index 060743a5..3b22e168 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -190,8 +190,7 @@ def _export_yolox(model, cfg, filename):
         if LooseVersion(torch.__version__) < LooseVersion('1.7.0') and end2end:
             raise ValueError('`end2end` only support torch1.7.0 and later!')
 
-        batch_size = cfg.export.get('batch_size', 32)
-        # batch_size = cfg.export.get('batch_size', 1)
+        batch_size = cfg.export.get('batch_size', 1)
         static_opt = cfg.export.get('static_opt', True)
         img_scale = cfg.get('img_scale', (640, 640))
         assert (
diff --git a/easycv/models/backbones/efficientrep.py b/easycv/models/backbones/efficientrep.py
deleted file mode 100644
index 9ddc6268..00000000
--- a/easycv/models/backbones/efficientrep.py
+++ /dev/null
@@ -1,126 +0,0 @@
-import math
-
-import torch
-from torch import nn
-
-from easycv.models.backbones.yolo6_blocks import RepBlock, RepVGGBlock, SimSPPF
-
-
-class EfficientRep(nn.Module):
-    '''EfficientRep Backbone
-    EfficientRep is handcrafted by hardware-aware neural network design.
-    With rep-style struct, EfficientRep is friendly to high-computation hardware(e.g. GPU).
-    '''
-
-    def __init__(self, in_channels=3, channels_list=None, num_repeats=None):
-        super().__init__()
-
-        assert channels_list is not None
-        assert num_repeats is not None
-
-        self.stem = RepVGGBlock(
-            in_channels=in_channels,
-            out_channels=channels_list[0],
-            kernel_size=3,
-            stride=2)
-
-        self.ERBlock_2 = nn.Sequential(
-            RepVGGBlock(
-                in_channels=channels_list[0],
-                out_channels=channels_list[1],
-                kernel_size=3,
-                stride=2),
-            RepBlock(
-                in_channels=channels_list[1],
-                out_channels=channels_list[1],
-                n=num_repeats[1]))
-
-        self.ERBlock_3 = nn.Sequential(
-            RepVGGBlock(
-                in_channels=channels_list[1],
-                out_channels=channels_list[2],
-                kernel_size=3,
-                stride=2),
-            RepBlock(
-                in_channels=channels_list[2],
-                out_channels=channels_list[2],
-                n=num_repeats[2],
-            ))
-
-        self.ERBlock_4 = nn.Sequential(
-            RepVGGBlock(
-                in_channels=channels_list[2],
-                out_channels=channels_list[3],
-                kernel_size=3,
-                stride=2),
-            RepBlock(
-                in_channels=channels_list[3],
-                out_channels=channels_list[3],
-                n=num_repeats[3]))
-
-        self.ERBlock_5 = nn.Sequential(
-            RepVGGBlock(
-                in_channels=channels_list[3],
-                out_channels=channels_list[4],
-                kernel_size=3,
-                stride=2),
-            RepBlock(
-                in_channels=channels_list[4],
-                out_channels=channels_list[4],
-                n=num_repeats[4]),
-            SimSPPF(
-                in_channels=channels_list[4],
-                out_channels=channels_list[4],
-                kernel_size=5))
-
-    def forward(self, x):
-
-        outputs = []
-        x = self.stem(x)
-        x = self.ERBlock_2(x)
-        x = self.ERBlock_3(x)
-        outputs.append(x)
-        x = self.ERBlock_4(x)
-        outputs.append(x)
-        x = self.ERBlock_5(x)
-        outputs.append(x)
-
-        return tuple(outputs)
-
-
-if __name__ == '__main__':
-
-    from torchsummaryX import summary
-
-    depth_mul = 0.33
-    width_mul = 0.5
-    num_repeat_backbone = [1, 6, 12, 18, 6]
-    channels_list_backbone = [64, 128, 256, 512, 1024]
-    num_repeat_neck = [12, 12, 12, 12]
-    channels_list_neck = [256, 128, 128, 256, 256, 512]
-
-    channels = 3
-
-    num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i)
-                  for i in (num_repeat_backbone + num_repeat_neck)]
-
-    def make_divisible(x, divisor):
-        # Upward revision the value x to make it evenly divisible by the divisor.
-        return math.ceil(x / divisor) * divisor
-
-    channels_list = [
-        make_divisible(i * width_mul, 8)
-        for i in (channels_list_backbone + channels_list_neck)
-    ]
-    model = EfficientRep(
-        in_channels=channels,
-        channels_list=channels_list,
-        num_repeats=num_repeat)
-    for layer in model.modules():
-        if isinstance(layer, RepVGGBlock):
-            layer.switch_to_deploy()
-
-    model = model.cuda()
-
-    a = torch.randn(1, 3, 640, 640).cuda()
-    summary(model, a)
diff --git a/easycv/models/backbones/yolo6_blocks.py b/easycv/models/backbones/yolo6_blocks.py
deleted file mode 100644
index d04545f7..00000000
--- a/easycv/models/backbones/yolo6_blocks.py
+++ /dev/null
@@ -1,321 +0,0 @@
-#!/usr/bin/env python3
-
-import warnings
-from pathlib import Path
-
-import numpy as np
-import torch
-import torch.nn as nn
-
-
-class SiLU(nn.Module):
-    '''Activation of SiLU'''
-
-    @staticmethod
-    def forward(x):
-        return x * torch.sigmoid(x)
-
-
-class Conv(nn.Module):
-    '''Normal Conv with SiLU activation'''
-
-    def __init__(self,
-                 in_channels,
-                 out_channels,
-                 kernel_size,
-                 stride,
-                 groups=1,
-                 bias=False):
-        super().__init__()
-        padding = kernel_size // 2
-        self.conv = nn.Conv2d(
-            in_channels,
-            out_channels,
-            kernel_size=kernel_size,
-            stride=stride,
-            padding=padding,
-            groups=groups,
-            bias=bias,
-        )
-        self.bn = nn.BatchNorm2d(out_channels)
-        self.act = nn.SiLU()
-
-    def forward(self, x):
-        return self.act(self.bn(self.conv(x)))
-
-    def forward_fuse(self, x):
-        return self.act(self.conv(x))
-
-
-class SimConv(nn.Module):
-    '''Normal Conv with ReLU activation'''
-
-    def __init__(self,
-                 in_channels,
-                 out_channels,
-                 kernel_size,
-                 stride,
-                 groups=1,
-                 bias=False):
-        super().__init__()
-        padding = kernel_size // 2
-        self.conv = nn.Conv2d(
-            in_channels,
-            out_channels,
-            kernel_size=kernel_size,
-            stride=stride,
-            padding=padding,
-            groups=groups,
-            bias=bias,
-        )
-        self.bn = nn.BatchNorm2d(out_channels)
-        self.act = nn.ReLU()
-
-    def forward(self, x):
-        return self.act(self.bn(self.conv(x)))
-
-    def forward_fuse(self, x):
-        return self.act(self.conv(x))
-
-
-class SimSPPF(nn.Module):
-    '''Simplified SPPF with ReLU activation'''
-
-    def __init__(self, in_channels, out_channels, kernel_size=5):
-        super().__init__()
-        c_ = in_channels // 2  # hidden channels
-        self.cv1 = SimConv(in_channels, c_, 1, 1)
-        self.cv2 = SimConv(c_ * 4, out_channels, 1, 1)
-        self.m = nn.MaxPool2d(
-            kernel_size=kernel_size, stride=1, padding=kernel_size // 2)
-
-    def forward(self, x):
-        x = self.cv1(x)
-        with warnings.catch_warnings():
-            warnings.simplefilter('ignore')
-            y1 = self.m(x)
-            y2 = self.m(y1)
-            return self.cv2(torch.cat([x, y1, y2, self.m(y2)], 1))
-
-
-class Transpose(nn.Module):
-    '''Normal Transpose, default for upsampling'''
-
-    def __init__(self, in_channels, out_channels, kernel_size=2, stride=2):
-        super().__init__()
-        self.upsample_transpose = torch.nn.ConvTranspose2d(
-            in_channels=in_channels,
-            out_channels=out_channels,
-            kernel_size=kernel_size,
-            stride=stride,
-            bias=True)
-
-    def forward(self, x):
-        return self.upsample_transpose(x)
-
-
-class Concat(nn.Module):
-
-    def __init__(self, dimension=1):
-        super().__init__()
-        self.d = dimension
-
-    def forward(self, x):
-        return torch.cat(x, self.d)
-
-
-def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
-    '''Basic cell for rep-style block, including conv and bn'''
-    result = nn.Sequential()
-    result.add_module(
-        'conv',
-        nn.Conv2d(
-            in_channels=in_channels,
-            out_channels=out_channels,
-            kernel_size=kernel_size,
-            stride=stride,
-            padding=padding,
-            groups=groups,
-            bias=False))
-    result.add_module('bn', nn.BatchNorm2d(num_features=out_channels))
-    return result
-
-
-class RepBlock(nn.Module):
-    '''
-        RepBlock is a stage block with rep-style basic block
-    '''
-
-    def __init__(self, in_channels, out_channels, n=1):
-        super().__init__()
-        self.conv1 = RepVGGBlock(in_channels, out_channels)
-        self.block = nn.Sequential(*(RepVGGBlock(out_channels, out_channels)
-                                     for _ in range(n - 1))) if n > 1 else None
-
-    def forward(self, x):
-        x = self.conv1(x)
-        if self.block is not None:
-            x = self.block(x)
-        return x
-
-
-class RepVGGBlock(nn.Module):
-    '''RepVGGBlock is a basic rep-style block, including training and deploy status
-    This code is based on https://github.com/DingXiaoH/RepVGG/blob/main/repvgg.py
-    '''
-
-    def __init__(self,
-                 in_channels,
-                 out_channels,
-                 kernel_size=3,
-                 stride=1,
-                 padding=1,
-                 dilation=1,
-                 groups=1,
-                 padding_mode='zeros',
-                 deploy=False,
-                 use_se=False):
-        super(RepVGGBlock, self).__init__()
-        """ Intialization of the class.
-        Args:
-            in_channels (int): Number of channels in the input image
-            out_channels (int): Number of channels produced by the convolution
-            kernel_size (int or tuple): Size of the convolving kernel
-            stride (int or tuple, optional): Stride of the convolution. Default: 1
-            padding (int or tuple, optional): Zero-padding added to both sides of
-                the input. Default: 1
-            dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
-            groups (int, optional): Number of blocked connections from input
-                channels to output channels. Default: 1
-            padding_mode (string, optional): Default: 'zeros'
-            deploy: Whether to be deploy status or training status. Default: False
-            use_se: Whether to use se. Default: False
-        """
-        self.deploy = deploy
-        self.groups = groups
-        self.in_channels = in_channels
-        self.out_channels = out_channels
-
-        assert kernel_size == 3
-        assert padding == 1
-
-        padding_11 = padding - kernel_size // 2
-
-        self.nonlinearity = nn.ReLU()
-
-        if use_se:
-            raise NotImplementedError('se block not supported yet')
-        else:
-            self.se = nn.Identity()
-
-        if deploy:
-            self.rbr_reparam = nn.Conv2d(
-                in_channels=in_channels,
-                out_channels=out_channels,
-                kernel_size=kernel_size,
-                stride=stride,
-                padding=padding,
-                dilation=dilation,
-                groups=groups,
-                bias=True,
-                padding_mode=padding_mode)
-
-        else:
-            self.rbr_identity = nn.BatchNorm2d(
-                num_features=in_channels
-            ) if out_channels == in_channels and stride == 1 else None
-            self.rbr_dense = conv_bn(
-                in_channels=in_channels,
-                out_channels=out_channels,
-                kernel_size=kernel_size,
-                stride=stride,
-                padding=padding,
-                groups=groups)
-            self.rbr_1x1 = conv_bn(
-                in_channels=in_channels,
-                out_channels=out_channels,
-                kernel_size=1,
-                stride=stride,
-                padding=padding_11,
-                groups=groups)
-
-    def forward(self, inputs):
-        '''Forward process'''
-        if hasattr(self, 'rbr_reparam'):
-            return self.nonlinearity(self.se(self.rbr_reparam(inputs)))
-
-        if self.rbr_identity is None:
-            id_out = 0
-        else:
-            id_out = self.rbr_identity(inputs)
-
-        return self.nonlinearity(
-            self.se(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out))
-
-    def get_equivalent_kernel_bias(self):
-        kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense)
-        kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1)
-        kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity)
-        return kernel3x3 + self._pad_1x1_to_3x3_tensor(
-            kernel1x1) + kernelid, bias3x3 + bias1x1 + biasid
-
-    def _pad_1x1_to_3x3_tensor(self, kernel1x1):
-        if kernel1x1 is None:
-            return 0
-        else:
-            return torch.nn.functional.pad(kernel1x1, [1, 1, 1, 1])
-
-    def _fuse_bn_tensor(self, branch):
-        if branch is None:
-            return 0, 0
-        if isinstance(branch, nn.Sequential):
-            kernel = branch.conv.weight
-            running_mean = branch.bn.running_mean
-            running_var = branch.bn.running_var
-            gamma = branch.bn.weight
-            beta = branch.bn.bias
-            eps = branch.bn.eps
-        else:
-            assert isinstance(branch, nn.BatchNorm2d)
-            if not hasattr(self, 'id_tensor'):
-                input_dim = self.in_channels // self.groups
-                kernel_value = np.zeros((self.in_channels, input_dim, 3, 3),
-                                        dtype=np.float32)
-                for i in range(self.in_channels):
-                    kernel_value[i, i % input_dim, 1, 1] = 1
-                self.id_tensor = torch.from_numpy(kernel_value).to(
-                    branch.weight.device)
-            kernel = self.id_tensor
-            running_mean = branch.running_mean
-            running_var = branch.running_var
-            gamma = branch.weight
-            beta = branch.bias
-            eps = branch.eps
-        std = (running_var + eps).sqrt()
-        t = (gamma / std).reshape(-1, 1, 1, 1)
-        return kernel * t, beta - running_mean * gamma / std
-
-    def switch_to_deploy(self):
-        if hasattr(self, 'rbr_reparam'):
-            return
-        kernel, bias = self.get_equivalent_kernel_bias()
-        self.rbr_reparam = nn.Conv2d(
-            in_channels=self.rbr_dense.conv.in_channels,
-            out_channels=self.rbr_dense.conv.out_channels,
-            kernel_size=self.rbr_dense.conv.kernel_size,
-            stride=self.rbr_dense.conv.stride,
-            padding=self.rbr_dense.conv.padding,
-            dilation=self.rbr_dense.conv.dilation,
-            groups=self.rbr_dense.conv.groups,
-            bias=True)
-        self.rbr_reparam.weight.data = kernel
-        self.rbr_reparam.bias.data = bias
-        for para in self.parameters():
-            para.detach_()
-        self.__delattr__('rbr_dense')
-        self.__delattr__('rbr_1x1')
-        if hasattr(self, 'rbr_identity'):
-            self.__delattr__('rbr_identity')
-        if hasattr(self, 'id_tensor'):
-            self.__delattr__('id_tensor')
-        self.deploy = True
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index d0b2aae2..34f5aa52 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -11,7 +11,6 @@
 from easycv.models.builder import MODELS
 from easycv.models.detection.utils import postprocess
 from .tood_head import TOODHead
-# from .ppyoloe_head import PPYOLOEHead
 from .yolo_head import YOLOXHead
 from .yolo_pafpn import YOLOPAFPN
 
diff --git a/easycv/models/loss/iou_loss.py b/easycv/models/loss/iou_loss.py
index f4fbdfd8..e11b812a 100644
--- a/easycv/models/loss/iou_loss.py
+++ b/easycv/models/loss/iou_loss.py
@@ -443,33 +443,3 @@ def forward(self, pred, target):
             loss = loss.mean()
         elif self.reduction == 'sum':
             loss = loss.sum()
-
-    # def forward(self,
-    #             pred,
-    #             target,
-    #             weight=None,
-    #             avg_factor=None,
-    #             reduction_override=None,
-    #             **kwargs):
-    #     if weight is not None and not torch.any(weight > 0):
-    #         if pred.dim() == weight.dim() + 1:
-    #             weight = weight.unsqueeze(1)
-    #         return (pred * weight).sum()  # 0
-    #     assert reduction_override in (None, 'none', 'mean', 'sum')
-    #     reduction = (
-    #         reduction_override if reduction_override else self.reduction)
-    #     if weight is not None and weight.dim() > 1:
-    #         # TODO: remove this in the future
-    #         # reduce the weight of shape (n, 4) to (n,) to match the
-    #         # giou_loss of shape (n,)
-    #         assert weight.shape == pred.shape
-    #         weight = weight.mean(-1)
-    #     loss = self.loss_weight * giou_loss(
-    #         pred,
-    #         target,
-    #         weight,
-    #         eps=self.eps,
-    #         reduction=reduction,
-    #         avg_factor=avg_factor,
-    #         **kwargs)
-    #     return loss
diff --git a/easycv/toolkit/blade/cv_blade_utils.py b/easycv/toolkit/blade/cv_blade_utils.py
index 01f1545e..8191c646 100644
--- a/easycv/toolkit/blade/cv_blade_utils.py
+++ b/easycv/toolkit/blade/cv_blade_utils.py
@@ -293,14 +293,6 @@ def blade_optimize(script_model,
         test_result = opt_model(*inputs)
         torch.cuda.synchronize()
 
-    # output = model(*inputs)
-    # if blade_config.get('enable_fp16', True):
-    #     with opt_blade_mixprec():
-    #         test_result = model(*inputs)
-    # else:
-    # test_result = opt_model(*inputs)
-    # test_result = opt_model(*inputs)
-
     torch.cuda.synchronize()
     cu_prof_start()
     for k in range(10):

From 81a2051e04e0292b48394a15e906754b427f0396 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Wed, 10 Aug 2022 11:31:39 +0800
Subject: [PATCH 32/69] rm compute model params

---
 compute_model_params.py | 313 ----------------------------------------
 1 file changed, 313 deletions(-)
 delete mode 100644 compute_model_params.py

diff --git a/compute_model_params.py b/compute_model_params.py
deleted file mode 100644
index c50f8ebb..00000000
--- a/compute_model_params.py
+++ /dev/null
@@ -1,313 +0,0 @@
-# Copyright (c) 2014-2021 Megvii Inc And Alibaba PAI-Teams. All rights reserved.
-
-import math
-
-import torch
-import torch.nn as nn
-from torchsummaryX import summary
-
-from easycv.models.backbones.darknet import CSPDarknet
-from easycv.models.backbones.efficientrep import EfficientRep
-from easycv.models.backbones.network_blocks import (BaseConv, CSPLayer, DWConv,
-                                                    GSConv, VoVGSCSP)
-
-
-def make_divisible(x, divisor):
-    # Upward revision the value x to make it evenly divisible by the divisor.
-    return math.ceil(x / divisor) * divisor
-
-
-class YOLOPAFPN(nn.Module):
-    """
-    YOLOv3 model. Darknet 53 is the default backbone of this model.
-    """
-
-    def __init__(
-        self,
-        depth=1.0,
-        width=1.0,
-        in_features=('dark3', 'dark4', 'dark5'),
-        in_channels=[256, 512, 1024],
-        depthwise=False,
-        act='silu',
-        asff_channel=16,
-        use_att=None,
-        expand_kernel=3,
-        down_rate=32,
-        use_dconv=False,
-        use_expand=True,
-        spp_type='spp',
-        backbone='CSPDarknet',
-        neck='gsconv',
-        neck_mode='part',
-    ):
-        super().__init__()
-        # self.backbone = CSPDarknet(depth, width, depthwise=depthwise, act=act,spp_type=spp_type)
-        self.backbone_name = backbone
-        if backbone == 'CSPDarknet':
-            self.backbone = CSPDarknet(
-                depth, width, depthwise=depthwise, act=act)
-        else:
-            depth_mul = depth
-            width_mul = width
-            num_repeat_backbone = [1, 6, 12, 18, 6]
-            channels_list_backbone = [64, 128, 256, 512, 1024]
-            num_repeat_neck = [12, 12, 12, 12]
-            channels_list_neck = [256, 128, 128, 256, 256, 512]
-
-            channels = 3
-
-            num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i)
-                          for i in (num_repeat_backbone + num_repeat_neck)]
-
-            channels_list = [
-                make_divisible(i * width_mul, 8)
-                for i in (channels_list_backbone + channels_list_neck)
-            ]
-            self.backbone = EfficientRep(
-                in_channels=channels,
-                channels_list=channels_list,
-                num_repeats=num_repeat)
-
-        self.in_features = in_features
-        self.in_channels = in_channels
-        Conv = DWConv if depthwise else BaseConv
-
-        self.neck = neck
-        self.neck_mode = neck_mode
-
-        if neck == 'yolo':
-            self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
-            self.lateral_conv0 = BaseConv(
-                int(in_channels[2] * width),
-                int(in_channels[1] * width),
-                1,
-                1,
-                act=act)
-            self.C3_p4 = CSPLayer(
-                int(2 * in_channels[1] * width),
-                int(in_channels[1] * width),
-                round(3 * depth),
-                False,
-                depthwise=depthwise,
-                act=act)  # cat
-
-            self.reduce_conv1 = BaseConv(
-                int(in_channels[1] * width),
-                int(in_channels[0] * width),
-                1,
-                1,
-                act=act)
-            self.C3_p3 = CSPLayer(
-                int(2 * in_channels[0] * width),
-                int(in_channels[0] * width),
-                round(3 * depth),
-                False,
-                depthwise=depthwise,
-                act=act)
-
-            # bottom-up conv
-            self.bu_conv2 = Conv(
-                int(in_channels[0] * width),
-                int(in_channels[0] * width),
-                3,
-                2,
-                act=act)
-            self.C3_n3 = CSPLayer(
-                int(2 * in_channels[0] * width),
-                int(in_channels[1] * width),
-                round(3 * depth),
-                False,
-                depthwise=depthwise,
-                act=act)
-
-            # bottom-up conv
-            self.bu_conv1 = Conv(
-                int(in_channels[1] * width),
-                int(in_channels[1] * width),
-                3,
-                2,
-                act=act)
-            self.C3_n4 = CSPLayer(
-                int(2 * in_channels[1] * width),
-                int(in_channels[2] * width),
-                round(3 * depth),
-                False,
-                depthwise=depthwise,
-                act=act)
-        else:
-            # gsconv
-            self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
-            self.gsconv1 = GSConv(
-                int(in_channels[2] * width),
-                int(in_channels[1] * width),
-                1,
-                1,
-                act=act)
-
-            self.gsconv2 = GSConv(
-                int(in_channels[1] * width),
-                int(in_channels[0] * width),
-                1,
-                1,
-                act=act)
-
-            self.gsconv4 = GSConv(
-                int(in_channels[0] * width),
-                int(in_channels[0] * width),
-                3,
-                2,
-                act=act)
-
-            self.gsconv5 = GSConv(
-                int(in_channels[1] * width),
-                int(in_channels[1] * width),
-                3,
-                2,
-                act=act)
-
-            if self.neck_mode == 'all':
-                self.vovGSCSP1 = VoVGSCSP(
-                    int(2 * in_channels[1] * width),
-                    int(in_channels[1] * width),
-                    round(3 * depth),
-                    False,
-                )
-
-                self.gsconv3 = GSConv(
-                    int(2 * in_channels[0] * width),
-                    int(2 * in_channels[0] * width),
-                    1,
-                    1,
-                    act=act)
-                self.vovGSCSP2 = VoVGSCSP(
-                    int(2 * in_channels[0] * width),
-                    int(in_channels[0] * width),
-                    round(3 * depth),
-                    False,
-                )
-
-                self.vovGSCSP3 = VoVGSCSP(
-                    int(2 * in_channels[0] * width),
-                    int(in_channels[1] * width),
-                    round(3 * depth),
-                    False,
-                )
-
-                self.vovGSCSP4 = VoVGSCSP(
-                    int(2 * in_channels[1] * width),
-                    int(in_channels[2] * width),
-                    round(3 * depth),
-                    False,
-                )
-            else:
-                self.C3_p4 = CSPLayer(
-                    int(2 * in_channels[1] * width),
-                    int(in_channels[1] * width),
-                    round(3 * depth),
-                    False,
-                    depthwise=depthwise,
-                    act=act)  # cat
-
-                self.C3_p3 = CSPLayer(
-                    int(2 * in_channels[0] * width),
-                    int(in_channels[0] * width),
-                    round(3 * depth),
-                    False,
-                    depthwise=depthwise,
-                    act=act)
-
-                self.C3_n3 = CSPLayer(
-                    int(2 * in_channels[0] * width),
-                    int(in_channels[1] * width),
-                    round(3 * depth),
-                    False,
-                    depthwise=depthwise,
-                    act=act)
-
-                self.C3_n4 = CSPLayer(
-                    int(2 * in_channels[1] * width),
-                    int(in_channels[2] * width),
-                    round(3 * depth),
-                    False,
-                    depthwise=depthwise,
-                    act=act)
-
-    def forward(self, input):
-        """
-        Args:
-            inputs: input images.
-
-        Returns:
-            Tuple[Tensor]: FPN feature.
-        """
-
-        #  backbone
-        # out_features = self.backbone(input)
-        # features = [out_features[f] for f in self.in_features]
-        # [x2, x1, x0] = features
-        #  backbone
-        x2, x1, x0 = x
-        if self.neck == 'yolo':
-            fpn_out0 = self.lateral_conv0(x0)  # 1024->512/32
-            f_out0 = self.upsample(fpn_out0)  # 512/16
-            f_out0 = torch.cat([f_out0, x1], 1)  # 512->1024/16
-            f_out0 = self.C3_p4(f_out0)  # 1024->512/16
-
-            fpn_out1 = self.reduce_conv1(f_out0)  # 512->256/16
-            f_out1 = self.upsample(fpn_out1)  # 256/8
-            f_out1 = torch.cat([f_out1, x2], 1)  # 256->512/8
-            pan_out2 = self.C3_p3(f_out1)  # 512->256/8
-
-            p_out1 = self.bu_conv2(pan_out2)  # 256->256/16
-            p_out1 = torch.cat([p_out1, fpn_out1], 1)  # 256->512/16
-            pan_out1 = self.C3_n3(p_out1)  # 512->512/16
-
-            p_out0 = self.bu_conv1(pan_out1)  # 512->512/32
-            p_out0 = torch.cat([p_out0, fpn_out0], 1)  # 512->1024/32
-            pan_out0 = self.C3_n4(p_out0)  # 1024->1024/32
-        else:
-            print('in')
-            # gsconv
-            fpn_out0 = self.gsconv1(x0)  # 1024->512/32
-            f_out0 = self.upsample(fpn_out0)  # 512/16
-            f_out0 = torch.cat([f_out0, x1], 1)  # 512->1024/16
-            if self.neck_mode == 'all':
-                f_out0 = self.vovGSCSP1(f_out0)  # 1024->512/16
-            else:
-                f_out0 = self.C3_p4(f_out0)
-
-            fpn_out1 = self.gsconv2(f_out0)  # 512->256/16
-            f_out1 = self.upsample(fpn_out1)  # 256/8
-            f_out1 = torch.cat([f_out1, x2], 1)  # 256->512/8
-            if self.neck_mode == 'all':
-                f_out1 = self.gsconv3(f_out1)
-                pan_out2 = self.vovGSCSP2(f_out1)  # 512->256/8
-            else:
-                pan_out2 = self.C3_p3(f_out1)  # 512->256/8
-
-            p_out1 = self.gsconv4(pan_out2)  # 256->256/16
-            p_out1 = torch.cat([p_out1, fpn_out1], 1)  # 256->512/16
-            if self.neck_mode == 'all':
-                pan_out1 = self.vovGSCSP3(p_out1)  # 512->512/16
-            else:
-                pan_out1 = self.C3_n3(p_out1)  # 512->512/16
-
-            p_out0 = self.gsconv5(pan_out1)  # 512->512/32
-            p_out0 = torch.cat([p_out0, fpn_out0], 1)  # 512->1024/32
-            if self.neck_mode == 'all':
-                pan_out0 = self.vovGSCSP4(p_out0)  # 1024->1024/32
-            else:
-                pan_out0 = self.C3_n4(p_out0)  # 1024->1024/32
-
-        outputs = (pan_out2, pan_out1, pan_out0)
-
-        return outputs
-
-
-if __name__ == '__main__':
-    x = (torch.randn(1, 128, 80, 80).cuda(), torch.randn(1, 256, 40,
-                                                         40).cuda(),
-         torch.randn(1, 512, 20, 20).cuda())
-    model = YOLOPAFPN(depth=0.33, width=0.5).cuda()
-    summary(model, x)

From cac888a1aea9675c9a92b68672bed968c48bc473 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Fri, 12 Aug 2022 11:28:17 +0800
Subject: [PATCH 33/69] modify asff v1

---
 .../models/detection/detectors/yolox/ASFF.py  | 48 +-------------
 .../detection/detectors/yolox/ASFF_sim.py     | 66 +++++++++----------
 2 files changed, 36 insertions(+), 78 deletions(-)

diff --git a/easycv/models/detection/detectors/yolox/ASFF.py b/easycv/models/detection/detectors/yolox/ASFF.py
index afda8bf0..3ad7d65d 100644
--- a/easycv/models/detection/detectors/yolox/ASFF.py
+++ b/easycv/models/detection/detectors/yolox/ASFF.py
@@ -1,51 +1,7 @@
 import torch
 import torch.nn as nn
 import torch.nn.functional as F
-
-from easycv.models.backbones.network_blocks import SiLU
-
-
-def autopad(k, p=None):  # kernel, padding
-    # Pad to 'same'
-    if p is None:
-        p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad
-    return p
-
-
-def get_activation(name='silu', inplace=True):
-    if name == 'silu':
-        module = SiLU(inplace=inplace)
-    elif name == 'relu':
-        module = nn.ReLU(inplace=inplace)
-    elif name == 'lrelu':
-        module = nn.LeakyReLU(0.1, inplace=inplace)
-    else:
-        raise AttributeError('Unsupported act type: {}'.format(name))
-    return module
-
-
-class Conv(nn.Module):
-    # Standard convolution
-    def __init__(self,
-                 c1,
-                 c2,
-                 k=1,
-                 s=1,
-                 p=None,
-                 g=1,
-                 act='silu'):  # ch_in, ch_out, kernel, stride, padding, groups
-        super(Conv, self).__init__()
-        self.conv = nn.Conv2d(
-            c1, c2, k, s, autopad(k, p), groups=g, bias=False)
-        self.bn = nn.BatchNorm2d(c2)
-        self.act = get_activation(act, inplace=True)
-
-    def forward(self, x):
-        return self.act(self.bn(self.conv(x)))
-
-    def forward_fuse(self, x):
-        return self.act(self.conv(x))
-
+from easycv.models.backbones.network_blocks import BaseConv, DWConv, SiLU
 
 class ASFF(nn.Module):
 
@@ -71,6 +27,8 @@ def __init__(self,
             int(256 * multiplier)
         ]
 
+        Conv = BaseConv
+
         self.inter_dim = self.dim[self.level]
         if level == 0:
             self.stride_level_1 = Conv(
diff --git a/easycv/models/detection/detectors/yolox/ASFF_sim.py b/easycv/models/detection/detectors/yolox/ASFF_sim.py
index da5e1ee9..65b65184 100644
--- a/easycv/models/detection/detectors/yolox/ASFF_sim.py
+++ b/easycv/models/detection/detectors/yolox/ASFF_sim.py
@@ -269,36 +269,36 @@ def forward(self, x):  # l,m,s
             return out
 
 
-if __name__ == '__main__':
-    width = 0.5
-    num_classes = 80
-    in_channels = [256, 512, 1024]
-
-    asff_channel = 2
-    act = 'relu'
-
-    asff_1 = ASFF(
-        level=0, multiplier=width, asff_channel=asff_channel, act=act).cuda()
-    asff_2 = ASFF(
-        level=1, multiplier=width, asff_channel=asff_channel, act=act).cuda()
-    asff_3 = ASFF(
-        level=2, multiplier=width, asff_channel=asff_channel, act=act).cuda()
-
-    input = (torch.rand(1, 128, 80, 80).cuda(), torch.rand(1, 256, 40,
-                                                           40).cuda(),
-             torch.rand(1, 512, 20, 20).cuda())
-
-    # flops, params = get_model_complexity_info(asff_1, input, as_strings=True,
-    #                                           print_per_layer_stat=True)
-    # print('Flops:  ' + flops)
-    # print('Params: ' + params)
-
-    # input = torch.randn(1, 3, 640, 640).cuda()
-    # flops, params = profile(asff_1, inputs=(input,))
-    # print('flops: {}, params: {}'.format(flops, params))
-
-    from torchsummaryX import summary
-
-    summary(asff_1, input)
-    summary(asff_2, input)
-    summary(asff_3, input)
+# if __name__ == '__main__':
+#     width = 0.5
+#     num_classes = 80
+#     in_channels = [256, 512, 1024]
+#
+#     asff_channel = 2
+#     act = 'relu'
+#
+#     asff_1 = ASFF(
+#         level=0, multiplier=width, asff_channel=asff_channel, act=act).cuda()
+#     asff_2 = ASFF(
+#         level=1, multiplier=width, asff_channel=asff_channel, act=act).cuda()
+#     asff_3 = ASFF(
+#         level=2, multiplier=width, asff_channel=asff_channel, act=act).cuda()
+#
+#     input = (torch.rand(1, 128, 80, 80).cuda(), torch.rand(1, 256, 40,
+#                                                            40).cuda(),
+#              torch.rand(1, 512, 20, 20).cuda())
+#
+#     # flops, params = get_model_complexity_info(asff_1, input, as_strings=True,
+#     #                                           print_per_layer_stat=True)
+#     # print('Flops:  ' + flops)
+#     # print('Params: ' + params)
+#
+#     # input = torch.randn(1, 3, 640, 640).cuda()
+#     # flops, params = profile(asff_1, inputs=(input,))
+#     # print('flops: {}, params: {}'.format(flops, params))
+#
+#     from torchsummaryX import summary
+#
+#     summary(asff_1, input)
+#     summary(asff_2, input)
+#     summary(asff_3, input)

From 8dbe3606a8488c1406fc526d346079e4418fb2b5 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Fri, 12 Aug 2022 15:57:16 +0800
Subject: [PATCH 34/69] add trt nms demo for detection

---
 easycv/apis/export.py                         |   2 +-
 .../detection/detectors/yolox/postprocess.py  | 134 ++++++++++++++++++
 .../detection/detectors/yolox/tood_head.py    |  23 ++-
 .../models/detection/detectors/yolox/yolox.py |   1 +
 easycv/models/detection/utils/boxes.py        |   5 +
 test.py                                       | 113 +++++++++++++++
 6 files changed, 262 insertions(+), 16 deletions(-)
 create mode 100644 easycv/models/detection/detectors/yolox/postprocess.py
 create mode 100755 test.py

diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index 3b22e168..40ce0197 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -53,7 +53,7 @@ def export(cfg, ckpt_path, filename):
         load_checkpoint(model, ckpt_path, map_location='cpu')
     else:
         cfg.model.backbone.pretrained = False
-    model = reparameterize_models(model)
+    # model = reparameterize_models(model)
 
     if isinstance(model, MOCO) or isinstance(model, DINO):
         _export_moco(model, cfg, filename)
diff --git a/easycv/models/detection/detectors/yolox/postprocess.py b/easycv/models/detection/detectors/yolox/postprocess.py
new file mode 100644
index 00000000..27e9b76a
--- /dev/null
+++ b/easycv/models/detection/detectors/yolox/postprocess.py
@@ -0,0 +1,134 @@
+from torch import nn
+import torch
+
+class TRT8_NMS(torch.autograd.Function):
+    '''TensorRT NMS operation'''
+    @staticmethod
+    def forward(
+        ctx,
+        boxes,
+        scores,
+        background_class=-1,
+        box_coding=1,
+        iou_threshold=0.45,
+        max_output_boxes=100,
+        plugin_version="1",
+        score_activation=0,
+        score_threshold=0.25,
+    ):
+        batch_size, num_boxes, num_classes = scores.shape
+        num_det = torch.randint(0, max_output_boxes, (batch_size, 1), dtype=torch.int32)
+        det_boxes = torch.randn(batch_size, max_output_boxes, 4)
+        det_scores = torch.randn(batch_size, max_output_boxes)
+        det_classes = torch.randint(0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32)
+        return num_det, det_boxes, det_scores, det_classes
+
+    @staticmethod
+    def symbolic(g,
+                 boxes,
+                 scores,
+                 background_class=-1,
+                 box_coding=1,
+                 iou_threshold=0.45,
+                 max_output_boxes=100,
+                 plugin_version="1",
+                 score_activation=0,
+                 score_threshold=0.25):
+        out = g.op("TRT::EfficientNMS_TRT",
+                   boxes,
+                   scores,
+                   background_class_i=background_class,
+                   box_coding_i=box_coding,
+                   iou_threshold_f=iou_threshold,
+                   max_output_boxes_i=max_output_boxes,
+                   plugin_version_s=plugin_version,
+                   score_activation_i=score_activation,
+                   score_threshold_f=score_threshold,
+                   outputs=4)
+        nums, boxes, scores, classes = out
+        return nums, boxes, scores, classes
+
+class ONNX_TRT8(nn.Module):
+    '''onnx module with TensorRT NMS operation.'''
+    def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None ,device=None):
+        super().__init__()
+        assert max_wh is None
+        self.device = device if device else torch.device('cpu')
+        self.background_class = -1,
+        self.box_coding = 1,
+        self.iou_threshold = iou_thres
+        self.max_obj = max_obj
+        self.plugin_version = '1'
+        self.score_activation = 0
+        self.score_threshold = score_thres
+
+    def forward(self, x):
+        box = x[:, :, :4]
+        conf = x[:, :, 4:5]
+        score = x[:, :, 5:]
+        score *= conf
+        num_det, det_boxes, det_scores, det_classes = TRT8_NMS.apply(box, score, self.background_class, self.box_coding,
+                                                                    self.iou_threshold, self.max_obj,
+                                                                    self.plugin_version, self.score_activation,
+                                                                    self.score_threshold)
+        return num_det, det_boxes, det_scores, det_classes
+
+def create_tensorrt_postprocess(example_scores, iou_thres, score_thres):
+    from torch_blade import tensorrt
+    import torch_blade._torch_blade._backends as backends
+    import io
+
+    model = torch.jit.trace(ONNX_TRT8(iou_thres=iou_thres, score_thres=score_thres), example_scores)
+    example_outputs = model(example_scores)
+
+    input_names=['input']
+    output_names=['num_det', 'det_boxes', 'det_example_scores', 'det_classes']
+    with io.BytesIO() as onnx_proto_f:
+        torch.onnx.export(
+            model,
+            example_scores,
+            onnx_proto_f,
+            input_names=input_names,
+            output_names=output_names,
+            example_outputs=example_outputs
+        )
+        onnx_proto = onnx_proto_f.getvalue()
+
+    def _copy_meta(data, name, dtype, sizes):
+        data.name = name
+        if dtype.is_floating_point:
+            data.dtype = "Float"
+        else:
+            data.dtype = "Int"
+        data.sizes = sizes
+        return data
+
+    state = backends.EngineState()
+    state.inputs = [
+        _copy_meta(backends.TensorInfo(), name, tensor.dtype, list(tensor.shape))
+        for name, tensor in zip(input_names, [example_scores])
+    ]
+    state.outputs = [
+        _copy_meta(backends.TensorInfo(), name, tensor.dtype, [])
+        for name, tensor in zip(output_names, example_outputs)
+    ]
+    state = tensorrt.cvt_onnx_to_tensorrt(onnx_proto, state, [], dict())
+    class Model(torch.nn.Module):
+        def __init__(self, state):
+            super().__init__()
+            self._trt_engine_ext = backends.create_engine(state)
+    
+        def forward(self, x):
+            return self._trt_engine_ext.execute([x])
+    trt_ext = torch.jit.script(Model(state))
+    return trt_ext
+
+
+if __name__=="__main__":
+    bs = 32
+    num_boxes = 100
+    num_classes = 2
+    example_scores = torch.randn([bs, num_boxes, 4 + 1 + num_classes], dtype=torch.float32)
+    trt_ext = create_tensorrt_postprocess(example_scores)
+    out = trt_ext.forward(example_scores)
+    print(out)
\ No newline at end of file
diff --git a/easycv/models/detection/detectors/yolox/tood_head.py b/easycv/models/detection/detectors/yolox/tood_head.py
index 3f0cc14c..5bfb6fce 100644
--- a/easycv/models/detection/detectors/yolox/tood_head.py
+++ b/easycv/models/detection/detectors/yolox/tood_head.py
@@ -251,20 +251,14 @@ def __init__(
             conv_cfg = self.conv_cfg
             chn = self.feat_channels
             self.inter_convs.append(
-                Conv(
-                    in_channels=chn,
-                    out_channels=chn,
-                    act=act,
-                ))
-            # self.inter_convs.append(
-            #     ConvModule(
-            #         chn,
-            #         self.feat_channels,
-            #         3,
-            #         stride=1,
-            #         padding=1,
-            #         conv_cfg=conv_cfg,
-            #         norm_cfg=self.norm_cfg))
+                ConvModule(
+                    chn,
+                    self.feat_channels,
+                    3,
+                    stride=1,
+                    padding=1,
+                    conv_cfg=conv_cfg,
+                    norm_cfg=self.norm_cfg))
 
         self.bcewithlog_loss = nn.BCEWithLogitsLoss(reduction='none')
 
@@ -362,7 +356,6 @@ def forward(self, xin, labels=None, imgs=None):
             outputs.append(output)
 
         if self.training:
-
             return self.get_losses(
                 imgs,
                 x_shifts,
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index 34f5aa52..e29b6889 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -278,3 +278,4 @@ def forward_export(self, img):
                                       self.test_conf, self.nms_thre)
 
         return outputs
+
diff --git a/easycv/models/detection/utils/boxes.py b/easycv/models/detection/utils/boxes.py
index 65508f11..e0c0f12f 100644
--- a/easycv/models/detection/utils/boxes.py
+++ b/easycv/models/detection/utils/boxes.py
@@ -36,6 +36,11 @@ def bboxes_iou(bboxes_a, bboxes_b, xyxy=True):
     return area_i / (area_a[:, None] + area_b - area_i)
 
 
+# refer to easycv/models/detection/detectors/yolox/postprocess.py and test.py to rebuild a torch-blade-trtplugin NMS, which is checked by zhoulou in test.py
+# infer docker images is : registry.cn-shanghai.aliyuncs.com/pai-ai-test/eas-service:easycv_blade_181_export
+def trtplugin_efficientnms_postprocess():
+    return 
+
 def postprocess(prediction, num_classes, conf_thre=0.7, nms_thre=0.45):
     box_corner = prediction.new(prediction.shape)
     box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
diff --git a/test.py b/test.py
new file mode 100755
index 00000000..a8de94c3
--- /dev/null
+++ b/test.py
@@ -0,0 +1,113 @@
+# from easycv.models.detection.detectors.yolox import YOLOX
+from easycv.models.detection.detectors.yolox.postprocess import create_tensorrt_postprocess
+import torch
+from torchvision.transforms import Compose
+
+from easycv.models import build_model
+from easycv.utils.checkpoint import load_checkpoint
+from easycv.utils.config_tools import mmcv_config_fromfile
+from easycv.utils.registry import build_from_cfg
+from easycv.datasets.registry import PIPELINES
+from easycv.models.detection.utils import postprocess
+
+
+
+import sys
+import numpy as np
+from PIL import Image
+
+if __name__=='__main__':
+    #a = YOLOX(decode_in_inference=False).eval()
+    cfg = sys.argv[1]
+    ckpt_path = sys.argv[2]
+
+    cfg = mmcv_config_fromfile(cfg)
+    model = build_model(cfg.model)
+    load_checkpoint(model, ckpt_path, map_location='cpu')
+    model = model.eval()
+
+    test_pipeline = cfg.test_pipeline
+    CLASSES = cfg.CLASSES
+
+    pipeline = [build_from_cfg(p, PIPELINES) for p in test_pipeline]
+    pipeline = Compose(pipeline)
+
+    # 8400 ishard code, need to reimplement to  sum(img_w / stride_i + img_h /stride_i) 
+    example_scores = torch.randn([1, 8400, 85], dtype=torch.float32)
+    trt_ext = create_tensorrt_postprocess(example_scores, iou_thres=model.nms_thre, score_thres=model.test_conf)
+
+    # img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000129062.jpg'
+    img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000254016.jpg'
+    # img = cv2.imread(img_path)
+    img = Image.open(img_path)
+    if type(img) is not np.ndarray:
+        img = np.asarray(img)
+
+    # ori_img_shape = img.shape[:2]
+    data_dict = {'img': img}
+    data_dict = pipeline(data_dict)
+    img = data_dict['img']
+    img = torch.unsqueeze(img._data, 0)
+    # print(img.shape)
+    model.decode_in_inference = False
+    # print(type(model), model.decode_in_inference)
+    c = model.forward_export(img)
+    
+    # print(type(c), c.shape)
+    print(model.test_conf, model.nms_thre, model.num_classes, model.decode_in_inference)
+    tc = model.head.decode_outputs(c, c[0].type())
+    # print(type(tc))
+    # print(tc.shape)
+
+    import copy
+    tcback = copy.deepcopy(tc)
+
+    tpa = postprocess(tc, model.num_classes, model.test_conf, model.nms_thre)[0]
+    # print(tpa)
+    tpa[:, 4] = tpa[:, 4] * tpa[:, 5]
+    tpa[:, 5] = tpa[:, 6]
+    tpa = tpa[:, :6]
+    # print("fuck tpa:", len(tpa), tpa[0].shape)
+    box_a = tpa[:,:4]
+    score_a = tpa[:,4]
+    id_a = tpa[:,5]
+    # print(tpa)
+
+    # trt_ext must be cuda 
+    tcback = tcback
+    tpb = trt_ext.forward(tcback)
+    # print("fuck tpb:",len(tpb))
+     
+    valid_length = min(len(tpa), tpb[2].shape[1])
+    print(valid_length)
+    valid_length = min(valid_length,30)
+
+    box_a = box_a[:valid_length]
+    score_a = score_a[:valid_length]
+    id_a = id_a[:valid_length]
+
+    print(tpb[1].shape)
+    print(tpb[2].shape)
+    print(tpb[3].shape)
+
+    box_b = tpb[1][:,:valid_length,:].cpu().view(box_a.shape)
+    score_b = tpb[2][:,:valid_length].cpu().view(score_a.shape)
+    id_b = tpb[3][:,:valid_length].cpu().view(id_a.shape)
+    
+    def get_diff(input_a, input_b, name='score'):
+        print("name:", name)
+        print("shape:", input_a.shape)
+        print("max_diff  :",torch.max(input_a-input_b))
+        print("avg_diff  :",torch.mean(input_a-input_b))
+        print("totol_diff:",torch.sum(input_a-input_b))
+
+    get_diff(box_a, box_b, 'box')
+    get_diff(score_a, score_b, 'score')
+    get_diff(id_a, id_a, 'id')
+
+    if 0:
+        from easycv.predictors import TorchYoloXPredictor
+        img = Image.open(img_path)
+        pred = TorchYoloXPredictor('models/predict.pt')
+        m = pred.predict([img])
+        print(m)
\ No newline at end of file

From b8b2646ef291d0a8347ad91a1664bccb15869dc8 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Sat, 13 Aug 2022 17:20:04 +0800
Subject: [PATCH 35/69] merge ASFF & ASFF_sim and registbackbone & head

---
 .../yolox/yolox_s_8xb16_300e_coco.py          |  23 +-
 .../yolox_s_8xb16_300e_coco_asff_tood3.py     | 198 +++++
 .../yolox/yolox_s_8xb16_300e_coco_rep.py      | 197 +++++
 .../yolox/yolox_s_8xb16_300e_coco_tood3.py    | 197 +++++
 .../detectors/detr/detr_transformer.py        |   1 -
 .../models/detection/detectors/yolox/ASFF.py  | 216 ++++--
 .../detection/detectors/yolox/ASFF_sim.py     | 304 --------
 .../detection/detectors/yolox/__init__.py     |   2 +
 .../detection/detectors/yolox/tood_head.py    | 689 +-----------------
 .../detection/detectors/yolox/yolo_head.py    | 639 +---------------
 .../detectors/yolox/yolo_head_template.py     | 656 +++++++++++++++++
 .../detection/detectors/yolox/yolo_pafpn.py   | 100 ++-
 .../models/detection/detectors/yolox/yolox.py | 194 ++---
 .../detection/detectors/yolox/yolox_bak.py    | 239 ++++++
 14 files changed, 1796 insertions(+), 1859 deletions(-)
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py
 delete mode 100644 easycv/models/detection/detectors/yolox/ASFF_sim.py
 create mode 100644 easycv/models/detection/detectors/yolox/yolo_head_template.py
 create mode 100644 easycv/models/detection/detectors/yolox/yolox_bak.py

diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index 1f434656..68ccada3 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -3,10 +3,23 @@
 # model settings s m l x
 model = dict(
     type='YOLOX',
-    num_classes=80,
-    model_type='s',  # s m l x tiny nano
     test_conf=0.01,
-    nms_thre=0.65)
+    nms_thre=0.65,
+    backbone=dict(
+        type='YOLOPAFPN',
+        backbone='CSPDarknet',
+        model_type='s',  # s m l x tiny nano
+        use_att=None,
+        neck='yolo'
+    ),
+    head=dict(
+        type='YOLOXHead',
+        model_type='s',
+        obj_loss_type='BCE',
+        reg_loss_type='giou',
+        num_classes=80
+    )
+)
 
 # s m l x
 img_scale = (640, 640)
@@ -35,7 +48,9 @@
 ]
 
 # dataset settings
-data_root = 'data/coco/'
+# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
new file mode 100644
index 00000000..73787692
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
@@ -0,0 +1,198 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    test_conf=0.01,
+    nms_thre=0.65,
+    backbone=dict(
+        type='YOLOPAFPN',
+        backbone='RepVGGYOLOX',
+        model_type='s',  # s m l x tiny nano
+        use_att='ASFF',
+        neck='yolo'
+    ),
+    head=dict(
+        type='TOODHead',
+        model_type='s',
+        obj_loss_type='BCE',
+        reg_loss_type='giou',
+        num_classes=80
+    )
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+data_root = '/root/workspace/data/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
new file mode 100644
index 00000000..bb113bf7
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
@@ -0,0 +1,197 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    test_conf=0.01,
+    nms_thre=0.65,
+    backbone=dict(
+        type='YOLOPAFPN',
+        backbone='RepVGGYOLOX',
+        model_type='s',  # s m l x tiny nano
+        use_att=None,
+        neck='yolo'
+    ),
+    head=dict(
+        type='YOLOXHead',
+        model_type='s',
+        obj_loss_type='BCE',
+        reg_loss_type='giou',
+        num_classes=80
+    )
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py
new file mode 100644
index 00000000..9c20fee9
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py
@@ -0,0 +1,197 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    test_conf=0.01,
+    nms_thre=0.65,
+    backbone=dict(
+        type='YOLOPAFPN',
+        backbone='RepVGGYOLOX',
+        model_type='s',  # s m l x tiny nano
+        use_att=None,
+        neck='yolo'
+    ),
+    head=dict(
+        type='TOODHead',
+        model_type='s',
+        obj_loss_type='BCE',
+        reg_loss_type='giou',
+        num_classes=80
+    )
+)
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
+data_root = '/mnt/data/nas/data/detection/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/easycv/models/detection/detectors/detr/detr_transformer.py b/easycv/models/detection/detectors/detr/detr_transformer.py
index f7a6dd8c..213ee8c0 100644
--- a/easycv/models/detection/detectors/detr/detr_transformer.py
+++ b/easycv/models/detection/detectors/detr/detr_transformer.py
@@ -187,7 +187,6 @@ def forward(self, mask):
 
 
 class TransformerDecoder(nn.Module):
-
     def __init__(self,
                  decoder_layer,
                  num_layers,
diff --git a/easycv/models/detection/detectors/yolox/ASFF.py b/easycv/models/detection/detectors/yolox/ASFF.py
index 3ad7d65d..56448502 100644
--- a/easycv/models/detection/detectors/yolox/ASFF.py
+++ b/easycv/models/detection/detectors/yolox/ASFF.py
@@ -1,26 +1,28 @@
 import torch
 import torch.nn as nn
 import torch.nn.functional as F
-from easycv.models.backbones.network_blocks import BaseConv, DWConv, SiLU
+from easycv.models.backbones.network_blocks import BaseConv
 
 class ASFF(nn.Module):
-
     def __init__(self,
                  level,
+                 type = 'ASFF',
+                 asff_channel=2,
+                 expand_kernel=3,
                  multiplier=1,
-                 asff_channel=16,
-                 rfb=False,
-                 vis=False,
                  act='silu'):
         """
-        multiplier should be 1, 0.5
-        which means, the channel of ASFF can be
-        512, 256, 128 -> multiplier=0.5
-        1024, 512, 256 -> multiplier=1
-        For even smaller, you need change code manually.
+        Args:
+            level(int): the level of the input feature
+            type(str): ASFF or ASFF_sim
+            asff_channel(int): the hidden channel of the attention layer in ASFF
+            expand_kernel(int): expand kernel size of the expand layer
+            multiplier: should be the same as width in the backbone
         """
         super(ASFF, self).__init__()
         self.level = level
+        self.type = type
+
         self.dim = [
             int(1024 * multiplier),
             int(512 * multiplier),
@@ -30,40 +32,65 @@ def __init__(self,
         Conv = BaseConv
 
         self.inter_dim = self.dim[self.level]
-        if level == 0:
-            self.stride_level_1 = Conv(
-                int(512 * multiplier), self.inter_dim, 3, 2, act=act)
-
-            self.stride_level_2 = Conv(
-                int(256 * multiplier), self.inter_dim, 3, 2, act=act)
-
-            self.expand = Conv(
-                self.inter_dim, int(1024 * multiplier), 3, 1, act=act)
-        elif level == 1:
-            self.compress_level_0 = Conv(
-                int(1024 * multiplier), self.inter_dim, 1, 1, act=act)
-            self.stride_level_2 = Conv(
-                int(256 * multiplier), self.inter_dim, 3, 2, act=act)
-            self.expand = Conv(
-                self.inter_dim, int(512 * multiplier), 3, 1, act=act)
-        elif level == 2:
-            self.compress_level_0 = Conv(
-                int(1024 * multiplier), self.inter_dim, 1, 1, act=act)
-            self.compress_level_1 = Conv(
-                int(512 * multiplier), self.inter_dim, 1, 1, act=act)
-            self.expand = Conv(
-                self.inter_dim, int(256 * multiplier), 3, 1, act=act)
-
-        # when adding rfb, we use half number of channels to save memory
-        # compress_c = 8 if rfb else 16
-        compress_c = asff_channel
-
-        self.weight_level_0 = Conv(self.inter_dim, compress_c, 1, 1, act=act)
-        self.weight_level_1 = Conv(self.inter_dim, compress_c, 1, 1, act=act)
-        self.weight_level_2 = Conv(self.inter_dim, compress_c, 1, 1, act=act)
-
-        self.weight_levels = Conv(compress_c * 3, 3, 1, 1, act=act)
-        self.vis = vis
+
+        if self.type == 'ASFF':
+            if level == 0:
+                self.stride_level_1 = Conv(
+                    int(512 * multiplier), self.inter_dim, 3, 2, act=act)
+
+                self.stride_level_2 = Conv(
+                    int(256 * multiplier), self.inter_dim, 3, 2, act=act)
+
+            elif level == 1:
+                self.compress_level_0 = Conv(
+                    int(1024 * multiplier), self.inter_dim, 1, 1, act=act)
+                self.stride_level_2 = Conv(
+                    int(256 * multiplier), self.inter_dim, 3, 2, act=act)
+
+            elif level == 2:
+                self.compress_level_0 = Conv(
+                    int(1024 * multiplier), self.inter_dim, 1, 1, act=act)
+                self.compress_level_1 = Conv(
+                    int(512 * multiplier), self.inter_dim, 1, 1, act=act)
+
+        # add expand layer
+        self.expand = Conv(
+            self.inter_dim,
+            self.inter_dim,
+            expand_kernel,
+            1,
+            act=act)
+
+        self.weight_level_0 = Conv(self.inter_dim, asff_channel, 1, 1, act=act)
+        self.weight_level_1 = Conv(self.inter_dim, asff_channel, 1, 1, act=act)
+        self.weight_level_2 = Conv(self.inter_dim, asff_channel, 1, 1, act=act)
+
+        self.weight_levels = Conv(asff_channel * 3, 3, 1, 1, act=act)
+
+    def expand_channel(self, x):
+        # [b,c,h,w]->[b,c*4,h/2,w/2]
+        patch_top_left = x[..., ::2, ::2]
+        patch_top_right = x[..., ::2, 1::2]
+        patch_bot_left = x[..., 1::2, ::2]
+        patch_bot_right = x[..., 1::2, 1::2]
+        x = torch.cat(
+            (
+                patch_top_left,
+                patch_bot_left,
+                patch_top_right,
+                patch_bot_right,
+            ),
+            dim=1,
+        )
+        return x
+
+
+    def mean_channel(self, x):
+        # [b,c,h,w]->[b,c/4,h*2,w*2]
+        x1 = x[:, ::2, :, :]
+        x2 = x[:, 1::2, :, :]
+        return (x1 + x2) / 2
+
 
     def forward(self, x):  # l,m,s
         """
@@ -75,26 +102,52 @@ def forward(self, x):  # l,m,s
         x_level_1 = x[1]  # mid feature level [256,40,40]
         x_level_2 = x[0]  # min feature level [128,80,80]
 
-        if self.level == 0:
-            level_0_resized = x_level_0
-            level_1_resized = self.stride_level_1(x_level_1)
-            level_2_downsampled_inter = F.max_pool2d(
-                x_level_2, 3, stride=2, padding=1)
-            level_2_resized = self.stride_level_2(level_2_downsampled_inter)
-        elif self.level == 1:
-            level_0_compressed = self.compress_level_0(x_level_0)
-            level_0_resized = F.interpolate(
-                level_0_compressed, scale_factor=2, mode='nearest')
-            level_1_resized = x_level_1
-            level_2_resized = self.stride_level_2(x_level_2)
-        elif self.level == 2:
-            level_0_compressed = self.compress_level_0(x_level_0)
-            level_0_resized = F.interpolate(
-                level_0_compressed, scale_factor=4, mode='nearest')
-            x_level_1_compressed = self.compress_level_1(x_level_1)
-            level_1_resized = F.interpolate(
-                x_level_1_compressed, scale_factor=2, mode='nearest')
-            level_2_resized = x_level_2
+        if self.type == 'ASFF':
+            if self.level == 0:
+                level_0_resized = x_level_0
+                level_1_resized = self.stride_level_1(x_level_1)
+                level_2_downsampled_inter = F.max_pool2d(
+                    x_level_2, 3, stride=2, padding=1)
+                level_2_resized = self.stride_level_2(level_2_downsampled_inter)
+            elif self.level == 1:
+                level_0_compressed = self.compress_level_0(x_level_0)
+                level_0_resized = F.interpolate(
+                    level_0_compressed, scale_factor=2, mode='nearest')
+                level_1_resized = x_level_1
+                level_2_resized = self.stride_level_2(x_level_2)
+            elif self.level == 2:
+                level_0_compressed = self.compress_level_0(x_level_0)
+                level_0_resized = F.interpolate(
+                    level_0_compressed, scale_factor=4, mode='nearest')
+                x_level_1_compressed = self.compress_level_1(x_level_1)
+                level_1_resized = F.interpolate(
+                    x_level_1_compressed, scale_factor=2, mode='nearest')
+                level_2_resized = x_level_2
+        else:
+            if self.level == 0:
+                level_0_resized = x_level_0
+                level_1_resized = self.expand_channel(x_level_1)
+                level_1_resized = self.mean_channel(level_1_resized)
+                level_2_resized = self.expand_channel(x_level_2)
+                level_2_resized = F.max_pool2d(
+                    level_2_resized, 3, stride=2, padding=1)
+            elif self.level == 1:
+                level_0_resized = F.interpolate(
+                    x_level_0, scale_factor=2, mode='nearest')
+                level_0_resized = self.mean_channel(level_0_resized)
+                level_1_resized = x_level_1
+                level_2_resized = self.expand_channel(x_level_2)
+                level_2_resized = self.mean_channel(level_2_resized)
+
+            elif self.level == 2:
+                level_0_resized = F.interpolate(
+                    x_level_0, scale_factor=4, mode='nearest')
+                level_0_resized = self.mean_channel(
+                    self.mean_channel(level_0_resized))
+                level_1_resized = F.interpolate(
+                    x_level_1, scale_factor=2, mode='nearest')
+                level_1_resized = self.mean_channel(level_1_resized)
+                level_2_resized = x_level_2
 
         level_0_weight_v = self.weight_level_0(level_0_resized)
         level_1_weight_v = self.weight_level_1(level_1_resized)
@@ -110,10 +163,35 @@ def forward(self, x):  # l,m,s
                                                                                                        1:
                                                                                                        2, :, :] + level_2_resized * levels_weight[:,
                                                                                                                                                   2:, :, :]
-
         out = self.expand(fused_out_reduced)
 
-        if self.vis:
-            return out, levels_weight, fused_out_reduced.sum(dim=1)
-        else:
-            return out
+        return out
+
+if __name__=='__main__':
+
+    width = 0.5
+    num_classes = 80
+    in_channels = [256, 512, 1024]
+
+    asff_channel = 2
+    act = 'silu'
+    type = 'ASFF_sim'
+    expand_kernel = 1
+
+    asff_1 = ASFF(
+        level=0, type = type, multiplier=width, asff_channel=asff_channel, act=act,expand_kernel=expand_kernel).cuda()
+    asff_2 = ASFF(
+        level=1, type = type, multiplier=width, asff_channel=asff_channel, act=act,expand_kernel=expand_kernel).cuda()
+    asff_3 = ASFF(
+        level=2, type = type, multiplier=width, asff_channel=asff_channel, act=act,expand_kernel=expand_kernel).cuda()
+
+    input = (torch.rand(1, 128, 80, 80).cuda(), torch.rand(1, 256, 40,
+                                                           40).cuda(),
+             torch.rand(1, 512, 20, 20).cuda())
+
+    from torchsummaryX import summary
+
+    summary(asff_1, input)
+    summary(asff_2, input)
+    summary(asff_3, input)
+
diff --git a/easycv/models/detection/detectors/yolox/ASFF_sim.py b/easycv/models/detection/detectors/yolox/ASFF_sim.py
deleted file mode 100644
index 65b65184..00000000
--- a/easycv/models/detection/detectors/yolox/ASFF_sim.py
+++ /dev/null
@@ -1,304 +0,0 @@
-import torch
-import torch.nn as nn
-import torch.nn.functional as F
-
-from easycv.models.backbones.network_blocks import DWConv, SiLU
-
-
-def autopad(k, p=None):  # kernel, padding
-    # Pad to 'same'
-    if p is None:
-        p = k // 2 if isinstance(k, int) else [x // 2 for x in k]  # auto-pad
-    return p
-
-
-def get_activation(name='silu', inplace=True):
-    if name == 'silu':
-        # @ to do nn.SiLU 1.7.0
-        # module = nn.SiLU(inplace=inplace)
-        module = SiLU(inplace=inplace)
-    elif name == 'relu':
-        module = nn.ReLU(inplace=inplace)
-    elif name == 'lrelu':
-        module = nn.LeakyReLU(0.1, inplace=inplace)
-    else:
-        raise AttributeError('Unsupported act type: {}'.format(name))
-    return module
-
-
-class Conv(nn.Module):
-    # Standard convolution
-    def __init__(self,
-                 c1,
-                 c2,
-                 k=1,
-                 s=1,
-                 p=None,
-                 g=1,
-                 act='silu'):  # ch_in, ch_out, kernel, stride, padding, groups
-        super(Conv, self).__init__()
-        self.conv = nn.Conv2d(
-            c1, c2, k, s, autopad(k, p), groups=g, bias=False)
-        self.bn = nn.BatchNorm2d(c2)
-        self.act = get_activation(act, inplace=True)
-
-    def forward(self, x):
-        return self.act(self.bn(self.conv(x)))
-
-    def forward_fuse(self, x):
-        return self.act(self.conv(x))
-
-
-class ASFF(nn.Module):
-
-    def __init__(self,
-                 level,
-                 multiplier=1,
-                 asff_channel=2,
-                 expand_kernel=3,
-                 down_rate=None,
-                 use_dconv=False,
-                 use_expand=True,
-                 rfb=False,
-                 vis=False,
-                 act='silu'):
-        """
-        multiplier should be 1, 0.5
-        which means, the channel of ASFF can be
-        512, 256, 128 -> multiplier=0.5
-        1024, 512, 256 -> multiplier=1
-        For even smaller, you need change code manually.
-        """
-        super(ASFF, self).__init__()
-        self.level = level
-        self.dim = [
-            int(1024 * multiplier),
-            int(512 * multiplier),
-            int(256 * multiplier)
-        ]
-
-        self.inter_dim = self.dim[self.level]
-
-        self.use_expand = use_expand
-
-        if level == 0:
-            if down_rate == None:
-                self.expand = Conv(
-                    self.inter_dim,
-                    int(1024 * multiplier),
-                    expand_kernel,
-                    1,
-                    act=act)
-            else:
-                if use_dconv:
-                    self.expand = DWConv(
-                        self.inter_dim,
-                        int(1024 * multiplier),
-                        expand_kernel,
-                        1,
-                        act=act)
-                else:
-                    self.expand = nn.Sequential(
-                        Conv(
-                            self.inter_dim,
-                            int(self.inter_dim // down_rate),
-                            1,
-                            1,
-                            act=act),
-                        Conv(
-                            int(self.inter_dim // down_rate),
-                            int(1024 * multiplier),
-                            1,
-                            1,
-                            act=act))
-
-        elif level == 1:
-            if down_rate == None:
-                self.expand = Conv(
-                    self.inter_dim,
-                    int(512 * multiplier),
-                    expand_kernel,
-                    1,
-                    act=act)
-            else:
-                if use_dconv:
-                    self.expand = DWConv(
-                        self.inter_dim,
-                        int(512 * multiplier),
-                        expand_kernel,
-                        1,
-                        act=act)
-                else:
-                    self.expand = nn.Sequential(
-                        Conv(
-                            self.inter_dim,
-                            int(self.inter_dim // down_rate),
-                            1,
-                            1,
-                            act=act),
-                        Conv(
-                            int(self.inter_dim // down_rate),
-                            int(512 * multiplier),
-                            1,
-                            1,
-                            act=act))
-
-        elif level == 2:
-            if down_rate == None:
-                self.expand = Conv(
-                    self.inter_dim,
-                    int(256 * multiplier),
-                    expand_kernel,
-                    1,
-                    act=act)
-            else:
-                if use_dconv:
-                    self.expand = DWConv(
-                        self.inter_dim,
-                        int(256 * multiplier),
-                        expand_kernel,
-                        1,
-                        act=act)
-                else:
-                    self.expand = nn.Sequential(
-                        Conv(
-                            self.inter_dim,
-                            int(self.inter_dim // down_rate),
-                            1,
-                            1,
-                            act=act),
-                        Conv(
-                            int(self.inter_dim // down_rate),
-                            int(256 * multiplier),
-                            1,
-                            1,
-                            act=act))
-
-        # when adding rfb, we use half number of channels to save memory
-        # compress_c = 8 if rfb else 16
-        compress_c = asff_channel
-
-        self.weight_level_0 = Conv(self.inter_dim, compress_c, 1, 1, act=act)
-        self.weight_level_1 = Conv(self.inter_dim, compress_c, 1, 1, act=act)
-        self.weight_level_2 = Conv(self.inter_dim, compress_c, 1, 1, act=act)
-
-        self.weight_levels = Conv(compress_c * 3, 3, 1, 1, act=act)
-        self.vis = vis
-
-    def expand_channel(self, x):
-        # [b,c,h,w]->[b,c*4,h/2,w/2]
-        patch_top_left = x[..., ::2, ::2]
-        patch_top_right = x[..., ::2, 1::2]
-        patch_bot_left = x[..., 1::2, ::2]
-        patch_bot_right = x[..., 1::2, 1::2]
-        x = torch.cat(
-            (
-                patch_top_left,
-                patch_bot_left,
-                patch_top_right,
-                patch_bot_right,
-            ),
-            dim=1,
-        )
-        return x
-
-    def mean_channel(self, x):
-        # [b,c,h,w]->[b,c/4,h*2,w*2]
-        x1 = x[:, ::2, :, :]
-        x2 = x[:, 1::2, :, :]
-        return (x1 + x2) / 2
-
-    def forward(self, x):  # l,m,s
-        """
-        #
-        256, 512, 1024
-        from small -> large
-        """
-        x_level_0 = x[2]  # max feature [512,20,20]
-        x_level_1 = x[1]  # mid feature [256,40,40]
-        x_level_2 = x[0]  # min feature [128,80,80]
-
-        if self.level == 0:
-            level_0_resized = x_level_0
-            level_1_resized = self.expand_channel(x_level_1)
-            level_1_resized = self.mean_channel(level_1_resized)
-            level_2_resized = self.expand_channel(x_level_2)
-            level_2_resized = F.max_pool2d(
-                level_2_resized, 3, stride=2, padding=1)
-        elif self.level == 1:
-            level_0_resized = F.interpolate(
-                x_level_0, scale_factor=2, mode='nearest')
-            level_0_resized = self.mean_channel(level_0_resized)
-            level_1_resized = x_level_1
-            level_2_resized = self.expand_channel(x_level_2)
-            level_2_resized = self.mean_channel(level_2_resized)
-
-        elif self.level == 2:
-            level_0_resized = F.interpolate(
-                x_level_0, scale_factor=4, mode='nearest')
-            level_0_resized = self.mean_channel(
-                self.mean_channel(level_0_resized))
-            level_1_resized = F.interpolate(
-                x_level_1, scale_factor=2, mode='nearest')
-            level_1_resized = self.mean_channel(level_1_resized)
-            level_2_resized = x_level_2
-
-        level_0_weight_v = self.weight_level_0(level_0_resized)
-        level_1_weight_v = self.weight_level_1(level_1_resized)
-        level_2_weight_v = self.weight_level_2(level_2_resized)
-
-        levels_weight_v = torch.cat(
-            (level_0_weight_v, level_1_weight_v, level_2_weight_v), 1)
-        levels_weight = self.weight_levels(levels_weight_v)
-        levels_weight = F.softmax(levels_weight, dim=1)
-
-        fused_out_reduced = level_0_resized * levels_weight[:, 0:
-                                                            1, :, :] + level_1_resized * levels_weight[:,
-                                                                                                       1:
-                                                                                                       2, :, :] + level_2_resized * levels_weight[:,
-                                                                                                                                                  2:, :, :]
-
-        if self.use_expand:
-            out = self.expand(fused_out_reduced)
-        else:
-            out = fused_out_reduced
-
-        if self.vis:
-            return out, levels_weight, fused_out_reduced.sum(dim=1)
-        else:
-            return out
-
-
-# if __name__ == '__main__':
-#     width = 0.5
-#     num_classes = 80
-#     in_channels = [256, 512, 1024]
-#
-#     asff_channel = 2
-#     act = 'relu'
-#
-#     asff_1 = ASFF(
-#         level=0, multiplier=width, asff_channel=asff_channel, act=act).cuda()
-#     asff_2 = ASFF(
-#         level=1, multiplier=width, asff_channel=asff_channel, act=act).cuda()
-#     asff_3 = ASFF(
-#         level=2, multiplier=width, asff_channel=asff_channel, act=act).cuda()
-#
-#     input = (torch.rand(1, 128, 80, 80).cuda(), torch.rand(1, 256, 40,
-#                                                            40).cuda(),
-#              torch.rand(1, 512, 20, 20).cuda())
-#
-#     # flops, params = get_model_complexity_info(asff_1, input, as_strings=True,
-#     #                                           print_per_layer_stat=True)
-#     # print('Flops:  ' + flops)
-#     # print('Params: ' + params)
-#
-#     # input = torch.randn(1, 3, 640, 640).cuda()
-#     # flops, params = profile(asff_1, inputs=(input,))
-#     # print('flops: {}, params: {}'.format(flops, params))
-#
-#     from torchsummaryX import summary
-#
-#     summary(asff_1, input)
-#     summary(asff_2, input)
-#     summary(asff_3, input)
diff --git a/easycv/models/detection/detectors/yolox/__init__.py b/easycv/models/detection/detectors/yolox/__init__.py
index c2e4bf0d..48fa177f 100644
--- a/easycv/models/detection/detectors/yolox/__init__.py
+++ b/easycv/models/detection/detectors/yolox/__init__.py
@@ -1 +1,3 @@
 from .yolox import YOLOX
+from .tood_head import TOODHead
+from .yolo_head import YOLOXHead
\ No newline at end of file
diff --git a/easycv/models/detection/detectors/yolox/tood_head.py b/easycv/models/detection/detectors/yolox/tood_head.py
index 3f0cc14c..f6277f7f 100644
--- a/easycv/models/detection/detectors/yolox/tood_head.py
+++ b/easycv/models/detection/detectors/yolox/tood_head.py
@@ -1,17 +1,9 @@
-# Copyright (c) 2014-2021 Megvii Inc And Alibaba PAI-Teams. All rights reserved.
-import logging
-import math
-from distutils.version import LooseVersion
-
 import torch
 import torch.nn as nn
 import torch.nn.functional as F
 from mmcv.cnn import ConvModule, normal_init
-
-from easycv.models.backbones.network_blocks import BaseConv, DWConv
-from easycv.models.backbones.repvgg_yolox_backbone import RepVGGBlock
-from easycv.models.detection.utils import bboxes_iou
-from easycv.models.loss import FocalLoss, IOUloss, VarifocalLoss
+from easycv.models.builder import HEADS
+from .yolo_head_template import YOLOXHead_Template
 
 
 class TaskDecomposition(nn.Module):
@@ -86,22 +78,21 @@ def forward(self, feat, avg_feat=None):
         return feat
 
 
-class TOODHead(nn.Module):
-
+@HEADS.register_module
+class TOODHead(YOLOXHead_Template):
     def __init__(
             self,
             num_classes,
-            width=1.0,
+            model_type='s',
             strides=[8, 16, 32],
             in_channels=[256, 512, 1024],
-            conv_type='repconv',
             act='silu',
+            depthwise=False,
             stage='CLOUD',
-            obj_loss_type='l1',
-            reg_loss_type='iou',
-            stacked_convs=6,
-            la_down_rate=8,
-            conv_layers=2,
+            obj_loss_type='BCE',
+            reg_loss_type='giou',
+            stacked_convs=3,
+            la_down_rate=32,
             decode_in_inference=True,
             conv_cfg=None,
             norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
@@ -118,126 +109,30 @@ def __init__(
             obj_loss_type (str): the loss function of the obj conf. Default value: l1.
             reg_loss_type (str): the loss function of the box prediction. Default value: l1.
         """
-        super().__init__()
-
-        self.n_anchors = 1
-        self.num_classes = num_classes
-        self.stage = stage
-        self.decode_in_inference = decode_in_inference  # for deploy, set to False
+        super(TOODHead, self).__init__(
+            num_classes=num_classes,
+            model_type=model_type,
+            strides=strides,
+            in_channels=in_channels,
+            act=act,
+            depthwise=depthwise,
+            stage=stage,
+            obj_loss_type=obj_loss_type,
+            reg_loss_type=reg_loss_type,
+            decode_in_inference=decode_in_inference
+        )
 
         self.stacked_convs = stacked_convs
         self.conv_cfg = conv_cfg
         self.norm_cfg = norm_cfg
-        self.feat_channels = int(256 * width)
+        self.feat_channels = int(256 * self.width)
 
-        self.cls_convs = nn.ModuleList()
-        self.reg_convs = nn.ModuleList()
-        self.cls_preds = nn.ModuleList()
-        self.reg_preds = nn.ModuleList()
-        self.obj_preds = nn.ModuleList()
         self.cls_decomps = nn.ModuleList()
         self.reg_decomps = nn.ModuleList()
-        self.stems = nn.ModuleList()
 
         self.inter_convs = nn.ModuleList()
 
-        default_conv_type_list = ['conv', 'dwconv', 'repconv']
-        # Conv = DWConv if depthwise else BaseConv
-        if conv_type not in default_conv_type_list:
-            logging.warning(
-                'YOLOX-PAI tood head conv_type must in [conv, dwconv, repconv], otherwise we use repconv as default'
-            )
-            conv_type = 'repconv'
-        if conv_type == 'conv':
-            Conv = BaseConv
-        if conv_type == 'dwconv':
-            Conv = DWConv
-        if conv_type == 'repconv':
-            Conv = RepVGGBlock
-
         for i in range(len(in_channels)):
-            self.stems.append(
-                BaseConv(
-                    in_channels=int(in_channels[i] * width),
-                    out_channels=int(256 * width),
-                    ksize=1,
-                    stride=1,
-                    act=act,
-                ))
-            if conv_layers == 2:
-                self.cls_convs.append(
-                    nn.Sequential(*[
-                        Conv(
-                            in_channels=int(256 * width),
-                            out_channels=int(256 * width),
-                            act=act,
-                            # ksize=3,
-                            # stride=1,
-                        ),
-                        Conv(
-                            in_channels=int(256 * width),
-                            out_channels=int(256 * width),
-                            act=act,
-                            # ksize=3,
-                            # stride=1,
-                            # act=act,
-                        ),
-                    ]))
-                self.reg_convs.append(
-                    nn.Sequential(*[
-                        Conv(
-                            in_channels=int(256 * width),
-                            out_channels=int(256 * width),
-                            act=act,
-                        ),
-                        Conv(
-                            in_channels=int(256 * width),
-                            out_channels=int(256 * width),
-                            act=act,
-                        ),
-                    ]))
-            elif conv_layers == 1:
-                self.cls_convs.append(
-                    nn.Sequential(*[
-                        Conv(
-                            in_channels=int(256 * width),
-                            out_channels=int(256 * width),
-                            act=act,
-                        )
-                    ]))
-                self.reg_convs.append(
-                    nn.Sequential(*[
-                        Conv(
-                            in_channels=int(256 * width),
-                            out_channels=int(256 * width),
-                            act=act,
-                        )
-                    ]))
-
-            self.cls_preds.append(
-                nn.Conv2d(
-                    in_channels=int(256 * width),
-                    out_channels=self.n_anchors * self.num_classes,
-                    kernel_size=1,
-                    stride=1,
-                    padding=0,
-                ))
-            self.reg_preds.append(
-                nn.Conv2d(
-                    in_channels=int(256 * width),
-                    out_channels=4,
-                    kernel_size=1,
-                    stride=1,
-                    padding=0,
-                ))
-            self.obj_preds.append(
-                nn.Conv2d(
-                    in_channels=int(256 * width),
-                    out_channels=self.n_anchors * 1,
-                    kernel_size=1,
-                    stride=1,
-                    padding=0,
-                ))
             self.cls_decomps.append(
                 TaskDecomposition(self.feat_channels, self.stacked_convs,
                                   self.stacked_convs * la_down_rate,
@@ -250,52 +145,17 @@ def __init__(
         for i in range(self.stacked_convs):
             conv_cfg = self.conv_cfg
             chn = self.feat_channels
-            self.inter_convs.append(
-                Conv(
-                    in_channels=chn,
-                    out_channels=chn,
-                    act=act,
-                ))
-            # self.inter_convs.append(
-            #     ConvModule(
-            #         chn,
-            #         self.feat_channels,
-            #         3,
-            #         stride=1,
-            #         padding=1,
-            #         conv_cfg=conv_cfg,
-            #         norm_cfg=self.norm_cfg))
-
-        self.bcewithlog_loss = nn.BCEWithLogitsLoss(reduction='none')
-
-        self.use_l1 = False
-        self.l1_loss = nn.L1Loss(reduction='none')
-
-        self.iou_loss = IOUloss(reduction='none', loss_type=reg_loss_type)
-
-        self.obj_loss_type = obj_loss_type
-        if obj_loss_type == 'BCE':
-            self.obj_loss = nn.BCEWithLogitsLoss(reduction='none')
-        elif obj_loss_type == 'focal':
-            self.obj_loss = FocalLoss(reduction='none')
-        elif obj_loss_type == 'v_focal':
-            self.obj_loss = VarifocalLoss(reduction='none')
-        else:
-            assert 'Undefined loss type: {}'.format(obj_loss_type)
-
-        self.strides = strides
-        self.grids = [torch.zeros(1)] * len(in_channels)
 
-    def initialize_biases(self, prior_prob):
-        for conv in self.cls_preds:
-            b = conv.bias.view(self.n_anchors, -1)
-            b.data.fill_(-math.log((1 - prior_prob) / prior_prob))
-            conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+            self.inter_convs.append(
+                ConvModule(
+                    chn,
+                    self.feat_channels,
+                    3,
+                    stride=1,
+                    padding=1,
+                    conv_cfg=conv_cfg,
+                    norm_cfg=self.norm_cfg))
 
-        for conv in self.obj_preds:
-            b = conv.bias.view(self.n_anchors, -1)
-            b.data.fill_(-math.log((1 - prior_prob) / prior_prob))
-            conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
 
     def forward(self, xin, labels=None, imgs=None):
         outputs = []
@@ -384,489 +244,4 @@ def forward(self, xin, labels=None, imgs=None):
             else:
                 return outputs
 
-    def get_output_and_grid(self, output, k, stride, dtype):
-        grid = self.grids[k]
-
-        batch_size = output.shape[0]
-        n_ch = 5 + self.num_classes
-        hsize, wsize = output.shape[-2:]
-        if grid.shape[2:4] != output.shape[2:4]:
-            yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])
-            grid = torch.stack((xv, yv), 2).view(1, 1, hsize, wsize,
-                                                 2).type(dtype)
-            self.grids[k] = grid
-
-        output = output.view(batch_size, self.n_anchors, n_ch, hsize, wsize)
-        output = output.permute(0, 1, 3, 4,
-                                2).reshape(batch_size,
-                                           self.n_anchors * hsize * wsize, -1)
-        grid = grid.view(1, -1, 2)
-        output[..., :2] = (output[..., :2] + grid) * stride
-        output[..., 2:4] = torch.exp(output[..., 2:4]) * stride
-        return output, grid
-
-    def decode_outputs(self, outputs, dtype):
-        grids = []
-        strides = []
-        for (hsize, wsize), stride in zip(self.hw, self.strides):
-            yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])
-            grid = torch.stack((xv, yv), 2).view(1, -1, 2)
-            grids.append(grid)
-            shape = grid.shape[:2]
-            strides.append(torch.full((*shape, 1), stride, dtype=torch.int))
-
-        grids = torch.cat(grids, dim=1).type(dtype)
-        strides = torch.cat(strides, dim=1).type(dtype)
-
-        outputs[..., :2] = (outputs[..., :2] + grids) * strides
-        outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides
-        return outputs
-
-    def get_losses(
-        self,
-        imgs,
-        x_shifts,
-        y_shifts,
-        expanded_strides,
-        labels,
-        outputs,
-        origin_preds,
-        dtype,
-    ):
-        bbox_preds = outputs[:, :, :4]  # [batch, n_anchors_all, 4]
-        obj_preds = outputs[:, :, 4].unsqueeze(-1)  # [batch, n_anchors_all, 1]
-        cls_preds = outputs[:, :, 5:]  # [batch, n_anchors_all, n_cls]
-
-        # calculate targets
-        nlabel = (labels.sum(dim=2) > 0).sum(dim=1)  # number of objects
-
-        total_num_anchors = outputs.shape[1]
-        x_shifts = torch.cat(x_shifts, 1)  # [1, n_anchors_all]
-        y_shifts = torch.cat(y_shifts, 1)  # [1, n_anchors_all]
-        expanded_strides = torch.cat(expanded_strides, 1)
-        if self.use_l1:
-            origin_preds = torch.cat(origin_preds, 1)
-
-        cls_targets = []
-        reg_targets = []
-        l1_targets = []
-        obj_targets = []
-        fg_masks = []
-
-        num_fg = 0.0
-        num_gts = 0.0
-
-        for batch_idx in range(outputs.shape[0]):
-            num_gt = int(nlabel[batch_idx])
-
-            num_gts += num_gt
-            if num_gt == 0:
-                cls_target = outputs.new_zeros((0, self.num_classes))
-                reg_target = outputs.new_zeros((0, 4))
-                l1_target = outputs.new_zeros((0, 4))
-                obj_target = outputs.new_zeros((total_num_anchors, 1))
-                fg_mask = outputs.new_zeros(total_num_anchors).bool()
-            else:
-                gt_bboxes_per_image = labels[batch_idx, :num_gt, 1:5]
-                gt_classes = labels[batch_idx, :num_gt, 0]
-                bboxes_preds_per_image = bbox_preds[batch_idx]
-
-                try:
-                    (
-                        gt_matched_classes,
-                        fg_mask,
-                        pred_ious_this_matching,
-                        matched_gt_inds,
-                        num_fg_img,
-                    ) = self.get_assignments(  # noqa
-                        batch_idx,
-                        num_gt,
-                        total_num_anchors,
-                        gt_bboxes_per_image,
-                        gt_classes,
-                        bboxes_preds_per_image,
-                        expanded_strides,
-                        x_shifts,
-                        y_shifts,
-                        cls_preds,
-                        bbox_preds,
-                        obj_preds,
-                        labels,
-                        imgs,
-                    )
-
-                except RuntimeError:
-                    logging.error(
-                        'OOM RuntimeError is raised due to the huge memory cost during label assignment. \
-                           CPU mode is applied in this batch. If you want to avoid this issue, \
-                           try to reduce the batch size or image size.')
-                    torch.cuda.empty_cache()
-                    (
-                        gt_matched_classes,
-                        fg_mask,
-                        pred_ious_this_matching,
-                        matched_gt_inds,
-                        num_fg_img,
-                    ) = self.get_assignments(  # noqa
-                        batch_idx,
-                        num_gt,
-                        total_num_anchors,
-                        gt_bboxes_per_image,
-                        gt_classes,
-                        bboxes_preds_per_image,
-                        expanded_strides,
-                        x_shifts,
-                        y_shifts,
-                        cls_preds,
-                        bbox_preds,
-                        obj_preds,
-                        labels,
-                        imgs,
-                        'cpu',
-                    )
-
-                torch.cuda.empty_cache()
-                num_fg += num_fg_img
-
-                cls_target = F.one_hot(
-                    gt_matched_classes.to(torch.int64),
-                    self.num_classes) * pred_ious_this_matching.unsqueeze(-1)
-                obj_target = fg_mask.unsqueeze(-1)
-                reg_target = gt_bboxes_per_image[matched_gt_inds]
-
-                if self.use_l1:
-                    l1_target = self.get_l1_target(
-                        outputs.new_zeros((num_fg_img, 4)),
-                        gt_bboxes_per_image[matched_gt_inds],
-                        expanded_strides[0][fg_mask],
-                        x_shifts=x_shifts[0][fg_mask],
-                        y_shifts=y_shifts[0][fg_mask],
-                    )
-
-            cls_targets.append(cls_target)
-            reg_targets.append(reg_target)
-            obj_targets.append(obj_target.to(dtype))
-            fg_masks.append(fg_mask)
-            if self.use_l1:
-                l1_targets.append(l1_target)
-
-        cls_targets = torch.cat(cls_targets, 0)
-        reg_targets = torch.cat(reg_targets, 0)
-        obj_targets = torch.cat(obj_targets, 0)
-        fg_masks = torch.cat(fg_masks, 0)
-
-        if self.use_l1:
-            l1_targets = torch.cat(l1_targets, 0)
-
-        num_fg = max(num_fg, 1)
-
-        loss_iou = (self.iou_loss(
-            bbox_preds.view(-1, 4)[fg_masks], reg_targets)).sum() / num_fg
-
-        if self.obj_loss_type == 'focal':
-            loss_obj = (self.focal_loss(obj_preds.sigmoid().view(-1, 1),
-                                        obj_targets)).sum() / num_fg
-        else:
-            loss_obj = (self.obj_loss(obj_preds.view(-1, 1),
-                                      obj_targets)).sum() / num_fg
-        loss_cls = (self.bcewithlog_loss(
-            cls_preds.view(-1, self.num_classes)[fg_masks],
-            cls_targets)).sum() / num_fg
-
-        if self.use_l1:
-            loss_l1 = (self.l1_loss(
-                origin_preds.view(-1, 4)[fg_masks], l1_targets)).sum() / num_fg
-        else:
-            loss_l1 = 0.0
-
-        reg_weight = 5.0
-        loss = reg_weight * loss_iou + loss_obj + loss_cls + loss_l1
-
-        return (
-            loss,
-            reg_weight * loss_iou,
-            loss_obj,
-            loss_cls,
-            loss_l1,
-            num_fg / max(num_gts, 1),
-        )
-
-    def focal_loss(self, pred, gt):
-        pos_inds = gt.eq(1).float()
-        neg_inds = gt.eq(0).float()
-        pos_loss = torch.log(pred + 1e-5) * torch.pow(1 - pred,
-                                                      2) * pos_inds * 0.75
-        neg_loss = torch.log(1 - pred + 1e-5) * torch.pow(pred,
-                                                          2) * neg_inds * 0.25
-        loss = -(pos_loss + neg_loss)
-        return loss
-
-    def get_l1_target(self,
-                      l1_target,
-                      gt,
-                      stride,
-                      x_shifts,
-                      y_shifts,
-                      eps=1e-8):
-        l1_target[:, 0] = gt[:, 0] / stride - x_shifts
-        l1_target[:, 1] = gt[:, 1] / stride - y_shifts
-        l1_target[:, 2] = torch.log(gt[:, 2] / stride + eps)
-        l1_target[:, 3] = torch.log(gt[:, 3] / stride + eps)
-        return l1_target
-
-    @torch.no_grad()
-    def get_assignments(
-        self,
-        batch_idx,
-        num_gt,
-        total_num_anchors,
-        gt_bboxes_per_image,
-        gt_classes,
-        bboxes_preds_per_image,
-        expanded_strides,
-        x_shifts,
-        y_shifts,
-        cls_preds,
-        bbox_preds,
-        obj_preds,
-        labels,
-        imgs,
-        mode='gpu',
-    ):
-
-        if mode == 'cpu':
-            print('------------CPU Mode for This Batch-------------')
-            gt_bboxes_per_image = gt_bboxes_per_image.cpu().float()
-            bboxes_preds_per_image = bboxes_preds_per_image.cpu().float()
-            gt_classes = gt_classes.cpu().float()
-            expanded_strides = expanded_strides.cpu().float()
-            x_shifts = x_shifts.cpu()
-            y_shifts = y_shifts.cpu()
-
-        fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(
-            gt_bboxes_per_image,
-            expanded_strides,
-            x_shifts,
-            y_shifts,
-            total_num_anchors,
-            num_gt,
-        )
-        # reference to: https://github.com/Megvii-BaseDetection/YOLOX/pull/811
-        # NOTE: Fix `selected index k out of range`
-        npa: int = fg_mask.sum().item()  # number of positive anchors
-
-        if npa == 0:
-            gt_matched_classes = torch.zeros(0, device=fg_mask.device).long()
-            pred_ious_this_matching = torch.rand(0, device=fg_mask.device)
-            matched_gt_inds = gt_matched_classes
-            num_fg = npa
-
-            if mode == 'cpu':
-                gt_matched_classes = gt_matched_classes.cuda()
-                fg_mask = fg_mask.cuda()
-                pred_ious_this_matching = pred_ious_this_matching.cuda()
-                matched_gt_inds = matched_gt_inds.cuda()
-                num_fg = num_fg.cuda()
-
-            return (
-                gt_matched_classes,
-                fg_mask,
-                pred_ious_this_matching,
-                matched_gt_inds,
-                num_fg,
-            )
 
-        bboxes_preds_per_image = bboxes_preds_per_image[fg_mask]
-        cls_preds_ = cls_preds[batch_idx][fg_mask]
-        obj_preds_ = obj_preds[batch_idx][fg_mask]
-        num_in_boxes_anchor = bboxes_preds_per_image.shape[0]
-
-        if mode == 'cpu':
-            gt_bboxes_per_image = gt_bboxes_per_image.cpu()
-            bboxes_preds_per_image = bboxes_preds_per_image.cpu()
-
-        pair_wise_ious = bboxes_iou(gt_bboxes_per_image,
-                                    bboxes_preds_per_image, False)
-
-        if (torch.isnan(pair_wise_ious.max())):
-            pair_wise_ious = bboxes_iou(gt_bboxes_per_image,
-                                        bboxes_preds_per_image, False)
-
-        gt_cls_per_image = (
-            F.one_hot(gt_classes.to(torch.int64),
-                      self.num_classes).float().unsqueeze(1).repeat(
-                          1, num_in_boxes_anchor, 1))
-        pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8)
-
-        if mode == 'cpu':
-            cls_preds_, obj_preds_ = cls_preds_.cpu(), obj_preds_.cpu()
-
-        if LooseVersion(torch.__version__) >= LooseVersion('1.6.0'):
-            with torch.cuda.amp.autocast(enabled=False):
-                cls_preds_ = (
-                    cls_preds_.float().unsqueeze(0).repeat(num_gt, 1,
-                                                           1).sigmoid_() *
-                    obj_preds_.float().unsqueeze(0).repeat(num_gt, 1,
-                                                           1).sigmoid_())
-                pair_wise_cls_loss = F.binary_cross_entropy(
-                    cls_preds_.sqrt_(), gt_cls_per_image,
-                    reduction='none').sum(-1)
-        else:
-            cls_preds_ = (
-                cls_preds_.float().unsqueeze(0).repeat(num_gt, 1,
-                                                       1).sigmoid_() *
-                obj_preds_.float().unsqueeze(0).repeat(num_gt, 1,
-                                                       1).sigmoid_())
-            pair_wise_cls_loss = F.binary_cross_entropy(
-                cls_preds_.sqrt_(), gt_cls_per_image, reduction='none').sum(-1)
-
-        del cls_preds_
-
-        cost = (
-            pair_wise_cls_loss + 3.0 * pair_wise_ious_loss + 100000.0 *
-            (~is_in_boxes_and_center))
-
-        (
-            num_fg,
-            gt_matched_classes,
-            pred_ious_this_matching,
-            matched_gt_inds,
-        ) = self.dynamic_k_matching(cost, pair_wise_ious, gt_classes, num_gt,
-                                    fg_mask)
-
-        del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss
-
-        if mode == 'cpu':
-            gt_matched_classes = gt_matched_classes.cuda()
-            fg_mask = fg_mask.cuda()
-            pred_ious_this_matching = pred_ious_this_matching.cuda()
-            matched_gt_inds = matched_gt_inds.cuda()
-
-        return (
-            gt_matched_classes,
-            fg_mask,
-            pred_ious_this_matching,
-            matched_gt_inds,
-            num_fg,
-        )
-
-    def get_in_boxes_info(
-        self,
-        gt_bboxes_per_image,
-        expanded_strides,
-        x_shifts,
-        y_shifts,
-        total_num_anchors,
-        num_gt,
-    ):
-        expanded_strides_per_image = expanded_strides[0]
-        x_shifts_per_image = x_shifts[0] * expanded_strides_per_image
-        y_shifts_per_image = y_shifts[0] * expanded_strides_per_image
-        x_centers_per_image = (
-            (x_shifts_per_image +
-             0.5 * expanded_strides_per_image).unsqueeze(0).repeat(num_gt, 1)
-        )  # [n_anchor] -> [n_gt, n_anchor]
-        y_centers_per_image = (
-            (y_shifts_per_image +
-             0.5 * expanded_strides_per_image).unsqueeze(0).repeat(num_gt, 1))
-
-        gt_bboxes_per_image_l = (
-            (gt_bboxes_per_image[:, 0] -
-             0.5 * gt_bboxes_per_image[:, 2]).unsqueeze(1).repeat(
-                 1, total_num_anchors))
-        gt_bboxes_per_image_r = (
-            (gt_bboxes_per_image[:, 0] +
-             0.5 * gt_bboxes_per_image[:, 2]).unsqueeze(1).repeat(
-                 1, total_num_anchors))
-        gt_bboxes_per_image_t = (
-            (gt_bboxes_per_image[:, 1] -
-             0.5 * gt_bboxes_per_image[:, 3]).unsqueeze(1).repeat(
-                 1, total_num_anchors))
-        gt_bboxes_per_image_b = (
-            (gt_bboxes_per_image[:, 1] +
-             0.5 * gt_bboxes_per_image[:, 3]).unsqueeze(1).repeat(
-                 1, total_num_anchors))
-
-        b_l = x_centers_per_image - gt_bboxes_per_image_l
-        b_r = gt_bboxes_per_image_r - x_centers_per_image
-        b_t = y_centers_per_image - gt_bboxes_per_image_t
-        b_b = gt_bboxes_per_image_b - y_centers_per_image
-        bbox_deltas = torch.stack([b_l, b_t, b_r, b_b], 2)
-
-        is_in_boxes = bbox_deltas.min(dim=-1).values > 0.0
-        is_in_boxes_all = is_in_boxes.sum(dim=0) > 0
-        # in fixed center
-
-        center_radius = 2.5
-
-        gt_bboxes_per_image_l = (
-            gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
-                1, total_num_anchors
-            ) - center_radius * expanded_strides_per_image.unsqueeze(0)
-        gt_bboxes_per_image_r = (
-            gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
-                1, total_num_anchors
-            ) + center_radius * expanded_strides_per_image.unsqueeze(0)
-        gt_bboxes_per_image_t = (
-            gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
-                1, total_num_anchors
-            ) - center_radius * expanded_strides_per_image.unsqueeze(0)
-        gt_bboxes_per_image_b = (
-            gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
-                1, total_num_anchors
-            ) + center_radius * expanded_strides_per_image.unsqueeze(0)
-
-        c_l = x_centers_per_image - gt_bboxes_per_image_l
-        c_r = gt_bboxes_per_image_r - x_centers_per_image
-        c_t = y_centers_per_image - gt_bboxes_per_image_t
-        c_b = gt_bboxes_per_image_b - y_centers_per_image
-        center_deltas = torch.stack([c_l, c_t, c_r, c_b], 2)
-        is_in_centers = center_deltas.min(dim=-1).values > 0.0
-        is_in_centers_all = is_in_centers.sum(dim=0) > 0
-
-        # in boxes and in centers
-        is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all
-
-        is_in_boxes_and_center = (
-            is_in_boxes[:, is_in_boxes_anchor]
-            & is_in_centers[:, is_in_boxes_anchor])
-        return is_in_boxes_anchor, is_in_boxes_and_center
-
-    def dynamic_k_matching(self, cost, pair_wise_ious, gt_classes, num_gt,
-                           fg_mask):
-
-        # Dynamic K
-        # ---------------------------------------------------------------
-        matching_matrix = torch.zeros_like(cost, dtype=torch.uint8)
-
-        ious_in_boxes_matrix = pair_wise_ious
-        n_candidate_k = min(10, ious_in_boxes_matrix.size(1))
-
-        topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=1)
-        dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)
-        dynamic_ks = dynamic_ks.tolist()
-
-        for gt_idx in range(num_gt):
-            _, pos_idx = torch.topk(
-                cost[gt_idx], k=dynamic_ks[gt_idx], largest=False)
-            matching_matrix[gt_idx][pos_idx] = 1
-
-        del topk_ious, dynamic_ks, pos_idx
-
-        anchor_matching_gt = matching_matrix.sum(0)
-        if (anchor_matching_gt > 1).sum() > 0:
-            _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
-            matching_matrix[:, anchor_matching_gt > 1] *= 0
-            matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1
-        fg_mask_inboxes = matching_matrix.sum(0) > 0
-        num_fg = fg_mask_inboxes.sum().item()
-
-        fg_mask[fg_mask.clone()] = fg_mask_inboxes
-
-        matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
-        gt_matched_classes = gt_classes[matched_gt_inds]
-
-        pred_ious_this_matching = (matching_matrix *
-                                   pair_wise_ious).sum(0)[fg_mask_inboxes]
-
-        return num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds
diff --git a/easycv/models/detection/detectors/yolox/yolo_head.py b/easycv/models/detection/detectors/yolox/yolo_head.py
index fa092fc0..1c9b125e 100644
--- a/easycv/models/detection/detectors/yolox/yolo_head.py
+++ b/easycv/models/detection/detectors/yolox/yolo_head.py
@@ -1,29 +1,25 @@
 # Copyright (c) 2014-2021 Megvii Inc And Alibaba PAI-Teams. All rights reserved.
-import logging
-import math
-from distutils.version import LooseVersion
-
 import torch
 import torch.nn as nn
-import torch.nn.functional as F
-
-from easycv.models.backbones.network_blocks import BaseConv, DWConv
-from easycv.models.detection.utils import bboxes_iou
-from easycv.models.loss import FocalLoss, IOUloss, VarifocalLoss
 
+from easycv.models.builder import HEADS
+from .yolo_head_template import YOLOXHead_Template
 
-class YOLOXHead(nn.Module):
 
+@HEADS.register_module
+class YOLOXHead(YOLOXHead_Template):
     def __init__(self,
-                 num_classes,
-                 width=1.0,
+                 num_classes = 80,
+                 model_type='s',
                  strides=[8, 16, 32],
                  in_channels=[256, 512, 1024],
                  act='silu',
                  depthwise=False,
                  stage='CLOUD',
-                 obj_loss_type='l1',
-                 reg_loss_type='l1'):
+                 obj_loss_type='BCE',
+                 reg_loss_type='giou',
+                 decode_in_inference=True,
+        ):
         """
         Args:
             num_classes (int): detection class numbers.
@@ -36,123 +32,19 @@ def __init__(self,
             obj_loss_type (str): the loss function of the obj conf. Default value: l1.
             reg_loss_type (str): the loss function of the box prediction. Default value: l1.
         """
-        super().__init__()
-
-        self.n_anchors = 1
-        self.num_classes = num_classes
-        self.stage = stage
-        self.decode_in_inference = True  # for deploy, set to False
-
-        self.cls_convs = nn.ModuleList()
-        self.reg_convs = nn.ModuleList()
-        self.cls_preds = nn.ModuleList()
-        self.reg_preds = nn.ModuleList()
-        self.obj_preds = nn.ModuleList()
-        self.stems = nn.ModuleList()
-        Conv = DWConv if depthwise else BaseConv
-
-        for i in range(len(in_channels)):
-            self.stems.append(
-                BaseConv(
-                    in_channels=int(in_channels[i] * width),
-                    out_channels=int(256 * width),
-                    ksize=1,
-                    stride=1,
-                    act=act,
-                ))
-            self.cls_convs.append(
-                nn.Sequential(*[
-                    Conv(
-                        in_channels=int(256 * width),
-                        out_channels=int(256 * width),
-                        ksize=3,
-                        stride=1,
-                        act=act,
-                    ),
-                    Conv(
-                        in_channels=int(256 * width),
-                        out_channels=int(256 * width),
-                        ksize=3,
-                        stride=1,
-                        act=act,
-                    ),
-                ]))
-            self.reg_convs.append(
-                nn.Sequential(*[
-                    Conv(
-                        in_channels=int(256 * width),
-                        out_channels=int(256 * width),
-                        ksize=3,
-                        stride=1,
-                        act=act,
-                    ),
-                    Conv(
-                        in_channels=int(256 * width),
-                        out_channels=int(256 * width),
-                        ksize=3,
-                        stride=1,
-                        act=act,
-                    ),
-                ]))
-            self.cls_preds.append(
-                nn.Conv2d(
-                    in_channels=int(256 * width),
-                    out_channels=self.n_anchors * self.num_classes,
-                    kernel_size=1,
-                    stride=1,
-                    padding=0,
-                ))
-            self.reg_preds.append(
-                nn.Conv2d(
-                    in_channels=int(256 * width),
-                    out_channels=4,
-                    kernel_size=1,
-                    stride=1,
-                    padding=0,
-                ))
-            self.obj_preds.append(
-                nn.Conv2d(
-                    in_channels=int(256 * width),
-                    out_channels=self.n_anchors * 1,
-                    kernel_size=1,
-                    stride=1,
-                    padding=0,
-                ))
-
-        self.bcewithlog_loss = nn.BCEWithLogitsLoss(reduction='none')
-
-        # if reg_loss_type=='l1':
-        self.use_l1 = False
-        self.l1_loss = nn.L1Loss(reduction='none')
-        # else:
-        #     self.use_l1 = False
-
-        self.iou_loss = IOUloss(reduction='none', loss_type=reg_loss_type)
-
-        self.obj_loss_type = obj_loss_type
-        if obj_loss_type == 'BCE':
-            self.obj_loss = nn.BCEWithLogitsLoss(reduction='none')
-        elif obj_loss_type == 'focal':
-            self.obj_loss = FocalLoss(reduction='none')
-
-        elif obj_loss_type == 'v_focal':
-            self.obj_loss = VarifocalLoss(reduction='none')
-        else:
-            assert 'Undefined loss type: {}'.format(obj_loss_type)
-
-        self.strides = strides
-        self.grids = [torch.zeros(1)] * len(in_channels)
-
-    def initialize_biases(self, prior_prob):
-        for conv in self.cls_preds:
-            b = conv.bias.view(self.n_anchors, -1)
-            b.data.fill_(-math.log((1 - prior_prob) / prior_prob))
-            conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+        super(YOLOXHead,self).__init__(
+                num_classes = num_classes,
+                model_type=model_type,
+                strides=strides,
+                in_channels=in_channels,
+                act=act,
+                depthwise=depthwise,
+                stage=stage,
+                obj_loss_type=obj_loss_type,
+                reg_loss_type=reg_loss_type,
+                decode_in_inference=decode_in_inference
+        )
 
-        for conv in self.obj_preds:
-            b = conv.bias.view(self.n_anchors, -1)
-            b.data.fill_(-math.log((1 - prior_prob) / prior_prob))
-            conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
 
     def forward(self, xin, labels=None, imgs=None):
         outputs = []
@@ -230,490 +122,3 @@ def forward(self, xin, labels=None, imgs=None):
                 return self.decode_outputs(outputs, dtype=xin[0].type())
             else:
                 return outputs
-
-    def get_output_and_grid(self, output, k, stride, dtype):
-        grid = self.grids[k]
-
-        batch_size = output.shape[0]
-        n_ch = 5 + self.num_classes
-        hsize, wsize = output.shape[-2:]
-        if grid.shape[2:4] != output.shape[2:4]:
-            yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])
-            grid = torch.stack((xv, yv), 2).view(1, 1, hsize, wsize,
-                                                 2).type(dtype)
-            self.grids[k] = grid
-
-        output = output.view(batch_size, self.n_anchors, n_ch, hsize, wsize)
-        output = output.permute(0, 1, 3, 4,
-                                2).reshape(batch_size,
-                                           self.n_anchors * hsize * wsize, -1)
-        grid = grid.view(1, -1, 2)
-        output[..., :2] = (output[..., :2] + grid) * stride
-        output[..., 2:4] = torch.exp(output[..., 2:4]) * stride
-        return output, grid
-
-    def decode_outputs(self, outputs, dtype):
-        grids = []
-        strides = []
-        for (hsize, wsize), stride in zip(self.hw, self.strides):
-            yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])
-            grid = torch.stack((xv, yv), 2).view(1, -1, 2)
-            grids.append(grid)
-            shape = grid.shape[:2]
-            strides.append(torch.full((*shape, 1), stride, dtype=torch.int))
-
-        grids = torch.cat(grids, dim=1).type(dtype)
-        strides = torch.cat(strides, dim=1).type(dtype)
-
-        outputs[..., :2] = (outputs[..., :2] + grids) * strides
-        outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides
-        return outputs
-
-    def get_losses(
-        self,
-        imgs,
-        x_shifts,
-        y_shifts,
-        expanded_strides,
-        labels,
-        outputs,
-        origin_preds,
-        dtype,
-    ):
-        bbox_preds = outputs[:, :, :4]  # [batch, n_anchors_all, 4]
-        obj_preds = outputs[:, :, 4].unsqueeze(-1)  # [batch, n_anchors_all, 1]
-        cls_preds = outputs[:, :, 5:]  # [batch, n_anchors_all, n_cls]
-
-        # calculate targets
-        nlabel = (labels.sum(dim=2) > 0).sum(dim=1)  # number of objects
-
-        total_num_anchors = outputs.shape[1]
-        x_shifts = torch.cat(x_shifts, 1)  # [1, n_anchors_all]
-        y_shifts = torch.cat(y_shifts, 1)  # [1, n_anchors_all]
-        expanded_strides = torch.cat(expanded_strides, 1)
-        if self.use_l1:
-            origin_preds = torch.cat(origin_preds, 1)
-
-        cls_targets = []
-        reg_targets = []
-        l1_targets = []
-        obj_targets = []
-        fg_masks = []
-
-        num_fg = 0.0
-        num_gts = 0.0
-
-        for batch_idx in range(outputs.shape[0]):
-            num_gt = int(nlabel[batch_idx])
-
-            num_gts += num_gt
-            if num_gt == 0:
-                cls_target = outputs.new_zeros((0, self.num_classes))
-                reg_target = outputs.new_zeros((0, 4))
-                l1_target = outputs.new_zeros((0, 4))
-                obj_target = outputs.new_zeros((total_num_anchors, 1))
-                fg_mask = outputs.new_zeros(total_num_anchors).bool()
-            else:
-                gt_bboxes_per_image = labels[batch_idx, :num_gt, 1:5]
-                gt_classes = labels[batch_idx, :num_gt, 0]
-                bboxes_preds_per_image = bbox_preds[batch_idx]
-
-                try:
-                    (
-                        gt_matched_classes,
-                        fg_mask,
-                        pred_ious_this_matching,
-                        matched_gt_inds,
-                        num_fg_img,
-                    ) = self.get_assignments(  # noqa
-                        batch_idx,
-                        num_gt,
-                        total_num_anchors,
-                        gt_bboxes_per_image,
-                        gt_classes,
-                        bboxes_preds_per_image,
-                        expanded_strides,
-                        x_shifts,
-                        y_shifts,
-                        cls_preds,
-                        bbox_preds,
-                        obj_preds,
-                        labels,
-                        imgs,
-                    )
-
-                except RuntimeError:
-                    logging.error(
-                        'OOM RuntimeError is raised due to the huge memory cost during label assignment. \
-                           CPU mode is applied in this batch. If you want to avoid this issue, \
-                           try to reduce the batch size or image size.')
-                    torch.cuda.empty_cache()
-                    (
-                        gt_matched_classes,
-                        fg_mask,
-                        pred_ious_this_matching,
-                        matched_gt_inds,
-                        num_fg_img,
-                    ) = self.get_assignments(  # noqa
-                        batch_idx,
-                        num_gt,
-                        total_num_anchors,
-                        gt_bboxes_per_image,
-                        gt_classes,
-                        bboxes_preds_per_image,
-                        expanded_strides,
-                        x_shifts,
-                        y_shifts,
-                        cls_preds,
-                        bbox_preds,
-                        obj_preds,
-                        labels,
-                        imgs,
-                        'cpu',
-                    )
-
-                torch.cuda.empty_cache()
-                num_fg += num_fg_img
-
-                cls_target = F.one_hot(
-                    gt_matched_classes.to(torch.int64),
-                    self.num_classes) * pred_ious_this_matching.unsqueeze(-1)
-                obj_target = fg_mask.unsqueeze(-1)
-                reg_target = gt_bboxes_per_image[matched_gt_inds]
-
-                if self.use_l1:
-                    l1_target = self.get_l1_target(
-                        outputs.new_zeros((num_fg_img, 4)),
-                        gt_bboxes_per_image[matched_gt_inds],
-                        expanded_strides[0][fg_mask],
-                        x_shifts=x_shifts[0][fg_mask],
-                        y_shifts=y_shifts[0][fg_mask],
-                    )
-
-            cls_targets.append(cls_target)
-            reg_targets.append(reg_target)
-            obj_targets.append(obj_target.to(dtype))
-            fg_masks.append(fg_mask)
-            if self.use_l1:
-                l1_targets.append(l1_target)
-
-        cls_targets = torch.cat(cls_targets, 0)
-        reg_targets = torch.cat(reg_targets, 0)
-        obj_targets = torch.cat(obj_targets, 0)
-        fg_masks = torch.cat(fg_masks, 0)
-
-        if self.use_l1:
-            l1_targets = torch.cat(l1_targets, 0)
-
-        num_fg = max(num_fg, 1)
-
-        loss_iou = (self.iou_loss(
-            bbox_preds.view(-1, 4)[fg_masks], reg_targets)).sum() / num_fg
-
-        if self.obj_loss_type == 'focal':
-            loss_obj = (self.focal_loss(obj_preds.sigmoid().view(-1, 1),
-                                        obj_targets)).sum() / num_fg
-        else:
-            loss_obj = (self.obj_loss(obj_preds.view(-1, 1),
-                                      obj_targets)).sum() / num_fg
-        loss_cls = (self.bcewithlog_loss(
-            cls_preds.view(-1, self.num_classes)[fg_masks],
-            cls_targets)).sum() / num_fg
-
-        if self.use_l1:
-            loss_l1 = (self.l1_loss(
-                origin_preds.view(-1, 4)[fg_masks], l1_targets)).sum() / num_fg
-        else:
-            loss_l1 = 0.0
-
-        reg_weight = 5.0
-        loss = reg_weight * loss_iou + loss_obj + loss_cls + loss_l1
-
-        return (
-            loss,
-            reg_weight * loss_iou,
-            loss_obj,
-            loss_cls,
-            loss_l1,
-            num_fg / max(num_gts, 1),
-        )
-
-    def focal_loss(self, pred, gt):
-        pos_inds = gt.eq(1).float()
-        neg_inds = gt.eq(0).float()
-        pos_loss = torch.log(pred + 1e-5) * torch.pow(1 - pred,
-                                                      2) * pos_inds * 0.75
-        neg_loss = torch.log(1 - pred + 1e-5) * torch.pow(pred,
-                                                          2) * neg_inds * 0.25
-        loss = -(pos_loss + neg_loss)
-        return loss
-
-    def get_l1_target(self,
-                      l1_target,
-                      gt,
-                      stride,
-                      x_shifts,
-                      y_shifts,
-                      eps=1e-8):
-        l1_target[:, 0] = gt[:, 0] / stride - x_shifts
-        l1_target[:, 1] = gt[:, 1] / stride - y_shifts
-        l1_target[:, 2] = torch.log(gt[:, 2] / stride + eps)
-        l1_target[:, 3] = torch.log(gt[:, 3] / stride + eps)
-        return l1_target
-
-    @torch.no_grad()
-    def get_assignments(
-        self,
-        batch_idx,
-        num_gt,
-        total_num_anchors,
-        gt_bboxes_per_image,
-        gt_classes,
-        bboxes_preds_per_image,
-        expanded_strides,
-        x_shifts,
-        y_shifts,
-        cls_preds,
-        bbox_preds,
-        obj_preds,
-        labels,
-        imgs,
-        mode='gpu',
-    ):
-
-        if mode == 'cpu':
-            print('------------CPU Mode for This Batch-------------')
-            gt_bboxes_per_image = gt_bboxes_per_image.cpu().float()
-            bboxes_preds_per_image = bboxes_preds_per_image.cpu().float()
-            gt_classes = gt_classes.cpu().float()
-            expanded_strides = expanded_strides.cpu().float()
-            x_shifts = x_shifts.cpu()
-            y_shifts = y_shifts.cpu()
-
-        fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(
-            gt_bboxes_per_image,
-            expanded_strides,
-            x_shifts,
-            y_shifts,
-            total_num_anchors,
-            num_gt,
-        )
-        # reference to: https://github.com/Megvii-BaseDetection/YOLOX/pull/811
-        # NOTE: Fix `selected index k out of range`
-        npa: int = fg_mask.sum().item()  # number of positive anchors
-
-        if npa == 0:
-            gt_matched_classes = torch.zeros(0, device=fg_mask.device).long()
-            pred_ious_this_matching = torch.rand(0, device=fg_mask.device)
-            matched_gt_inds = gt_matched_classes
-            num_fg = npa
-
-            if mode == 'cpu':
-                gt_matched_classes = gt_matched_classes.cuda()
-                fg_mask = fg_mask.cuda()
-                pred_ious_this_matching = pred_ious_this_matching.cuda()
-                matched_gt_inds = matched_gt_inds.cuda()
-                num_fg = num_fg.cuda()
-
-            return (
-                gt_matched_classes,
-                fg_mask,
-                pred_ious_this_matching,
-                matched_gt_inds,
-                num_fg,
-            )
-
-        bboxes_preds_per_image = bboxes_preds_per_image[fg_mask]
-        cls_preds_ = cls_preds[batch_idx][fg_mask]
-        obj_preds_ = obj_preds[batch_idx][fg_mask]
-        num_in_boxes_anchor = bboxes_preds_per_image.shape[0]
-
-        if mode == 'cpu':
-            gt_bboxes_per_image = gt_bboxes_per_image.cpu()
-            bboxes_preds_per_image = bboxes_preds_per_image.cpu()
-
-        pair_wise_ious = bboxes_iou(gt_bboxes_per_image,
-                                    bboxes_preds_per_image, False)
-
-        if (torch.isnan(pair_wise_ious.max())):
-            pair_wise_ious = bboxes_iou(gt_bboxes_per_image,
-                                        bboxes_preds_per_image, False)
-
-        gt_cls_per_image = (
-            F.one_hot(gt_classes.to(torch.int64),
-                      self.num_classes).float().unsqueeze(1).repeat(
-                          1, num_in_boxes_anchor, 1))
-        pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8)
-
-        if mode == 'cpu':
-            cls_preds_, obj_preds_ = cls_preds_.cpu(), obj_preds_.cpu()
-
-        if LooseVersion(torch.__version__) >= LooseVersion('1.6.0'):
-            with torch.cuda.amp.autocast(enabled=False):
-                cls_preds_ = (
-                    cls_preds_.float().unsqueeze(0).repeat(num_gt, 1,
-                                                           1).sigmoid_() *
-                    obj_preds_.float().unsqueeze(0).repeat(num_gt, 1,
-                                                           1).sigmoid_())
-                pair_wise_cls_loss = F.binary_cross_entropy(
-                    cls_preds_.sqrt_(), gt_cls_per_image,
-                    reduction='none').sum(-1)
-        else:
-            cls_preds_ = (
-                cls_preds_.float().unsqueeze(0).repeat(num_gt, 1,
-                                                       1).sigmoid_() *
-                obj_preds_.float().unsqueeze(0).repeat(num_gt, 1,
-                                                       1).sigmoid_())
-            pair_wise_cls_loss = F.binary_cross_entropy(
-                cls_preds_.sqrt_(), gt_cls_per_image, reduction='none').sum(-1)
-
-        del cls_preds_
-
-        cost = (
-            pair_wise_cls_loss + 3.0 * pair_wise_ious_loss + 100000.0 *
-            (~is_in_boxes_and_center))
-
-        (
-            num_fg,
-            gt_matched_classes,
-            pred_ious_this_matching,
-            matched_gt_inds,
-        ) = self.dynamic_k_matching(cost, pair_wise_ious, gt_classes, num_gt,
-                                    fg_mask)
-
-        del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss
-
-        if mode == 'cpu':
-            gt_matched_classes = gt_matched_classes.cuda()
-            fg_mask = fg_mask.cuda()
-            pred_ious_this_matching = pred_ious_this_matching.cuda()
-            matched_gt_inds = matched_gt_inds.cuda()
-
-        return (
-            gt_matched_classes,
-            fg_mask,
-            pred_ious_this_matching,
-            matched_gt_inds,
-            num_fg,
-        )
-
-    def get_in_boxes_info(
-        self,
-        gt_bboxes_per_image,
-        expanded_strides,
-        x_shifts,
-        y_shifts,
-        total_num_anchors,
-        num_gt,
-    ):
-        expanded_strides_per_image = expanded_strides[0]
-        x_shifts_per_image = x_shifts[0] * expanded_strides_per_image
-        y_shifts_per_image = y_shifts[0] * expanded_strides_per_image
-        x_centers_per_image = (
-            (x_shifts_per_image +
-             0.5 * expanded_strides_per_image).unsqueeze(0).repeat(num_gt, 1)
-        )  # [n_anchor] -> [n_gt, n_anchor]
-        y_centers_per_image = (
-            (y_shifts_per_image +
-             0.5 * expanded_strides_per_image).unsqueeze(0).repeat(num_gt, 1))
-
-        gt_bboxes_per_image_l = (
-            (gt_bboxes_per_image[:, 0] -
-             0.5 * gt_bboxes_per_image[:, 2]).unsqueeze(1).repeat(
-                 1, total_num_anchors))
-        gt_bboxes_per_image_r = (
-            (gt_bboxes_per_image[:, 0] +
-             0.5 * gt_bboxes_per_image[:, 2]).unsqueeze(1).repeat(
-                 1, total_num_anchors))
-        gt_bboxes_per_image_t = (
-            (gt_bboxes_per_image[:, 1] -
-             0.5 * gt_bboxes_per_image[:, 3]).unsqueeze(1).repeat(
-                 1, total_num_anchors))
-        gt_bboxes_per_image_b = (
-            (gt_bboxes_per_image[:, 1] +
-             0.5 * gt_bboxes_per_image[:, 3]).unsqueeze(1).repeat(
-                 1, total_num_anchors))
-
-        b_l = x_centers_per_image - gt_bboxes_per_image_l
-        b_r = gt_bboxes_per_image_r - x_centers_per_image
-        b_t = y_centers_per_image - gt_bboxes_per_image_t
-        b_b = gt_bboxes_per_image_b - y_centers_per_image
-        bbox_deltas = torch.stack([b_l, b_t, b_r, b_b], 2)
-
-        is_in_boxes = bbox_deltas.min(dim=-1).values > 0.0
-        is_in_boxes_all = is_in_boxes.sum(dim=0) > 0
-        # in fixed center
-
-        center_radius = 2.5
-
-        gt_bboxes_per_image_l = (
-            gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
-                1, total_num_anchors
-            ) - center_radius * expanded_strides_per_image.unsqueeze(0)
-        gt_bboxes_per_image_r = (
-            gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
-                1, total_num_anchors
-            ) + center_radius * expanded_strides_per_image.unsqueeze(0)
-        gt_bboxes_per_image_t = (
-            gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
-                1, total_num_anchors
-            ) - center_radius * expanded_strides_per_image.unsqueeze(0)
-        gt_bboxes_per_image_b = (
-            gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
-                1, total_num_anchors
-            ) + center_radius * expanded_strides_per_image.unsqueeze(0)
-
-        c_l = x_centers_per_image - gt_bboxes_per_image_l
-        c_r = gt_bboxes_per_image_r - x_centers_per_image
-        c_t = y_centers_per_image - gt_bboxes_per_image_t
-        c_b = gt_bboxes_per_image_b - y_centers_per_image
-        center_deltas = torch.stack([c_l, c_t, c_r, c_b], 2)
-        is_in_centers = center_deltas.min(dim=-1).values > 0.0
-        is_in_centers_all = is_in_centers.sum(dim=0) > 0
-
-        # in boxes and in centers
-        is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all
-
-        is_in_boxes_and_center = (
-            is_in_boxes[:, is_in_boxes_anchor]
-            & is_in_centers[:, is_in_boxes_anchor])
-        return is_in_boxes_anchor, is_in_boxes_and_center
-
-    def dynamic_k_matching(self, cost, pair_wise_ious, gt_classes, num_gt,
-                           fg_mask):
-
-        # Dynamic K
-        # ---------------------------------------------------------------
-        matching_matrix = torch.zeros_like(cost, dtype=torch.uint8)
-
-        ious_in_boxes_matrix = pair_wise_ious
-        n_candidate_k = min(10, ious_in_boxes_matrix.size(1))
-
-        topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=1)
-        dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)
-        dynamic_ks = dynamic_ks.tolist()
-
-        for gt_idx in range(num_gt):
-            _, pos_idx = torch.topk(
-                cost[gt_idx], k=dynamic_ks[gt_idx], largest=False)
-            matching_matrix[gt_idx][pos_idx] = 1
-
-        del topk_ious, dynamic_ks, pos_idx
-
-        anchor_matching_gt = matching_matrix.sum(0)
-        if (anchor_matching_gt > 1).sum() > 0:
-            _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
-            matching_matrix[:, anchor_matching_gt > 1] *= 0
-            matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1
-        fg_mask_inboxes = matching_matrix.sum(0) > 0
-        num_fg = fg_mask_inboxes.sum().item()
-
-        fg_mask[fg_mask.clone()] = fg_mask_inboxes
-
-        matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
-        gt_matched_classes = gt_classes[matched_gt_inds]
-
-        pred_ious_this_matching = (matching_matrix *
-                                   pair_wise_ious).sum(0)[fg_mask_inboxes]
-
-        return num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds
diff --git a/easycv/models/detection/detectors/yolox/yolo_head_template.py b/easycv/models/detection/detectors/yolox/yolo_head_template.py
new file mode 100644
index 00000000..1d7e5981
--- /dev/null
+++ b/easycv/models/detection/detectors/yolox/yolo_head_template.py
@@ -0,0 +1,656 @@
+# Copyright (c) 2014-2021 Megvii Inc And Alibaba PAI-Teams. All rights reserved.
+import logging
+import math
+from distutils.version import LooseVersion
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from abc import abstractmethod
+from easycv.models.builder import HEADS
+from easycv.models.backbones.network_blocks import BaseConv, DWConv
+from easycv.models.detection.utils import bboxes_iou
+from easycv.models.loss import FocalLoss, IOUloss, VarifocalLoss
+
+
+class YOLOXHead_Template(nn.Module):
+    param_map = {
+        'nano': [0.33, 0.25],
+        'tiny': [0.33, 0.375],
+        's': [0.33, 0.5],
+        'm': [0.67, 0.75],
+        'l': [1.0, 1.0],
+        'x': [1.33, 1.25]
+    }
+
+    def __init__(self,
+                 num_classes = 80,
+                 model_type='s',
+                 strides=[8, 16, 32],
+                 in_channels=[256, 512, 1024],
+                 act='silu',
+                 depthwise=False,
+                 stage='CLOUD',
+                 obj_loss_type='BCE',
+                 reg_loss_type='giou',
+                 decode_in_inference=True
+        ):
+        """
+        Args:
+            num_classes (int): detection class numbers.
+            width (float): model width. Default value: 1.0.
+            strides (list): expanded strides. Default value: [8, 16, 32].
+            in_channels (list): model conv channels set. Default value: [256, 512, 1024].
+            act (str): activation type of conv. Defalut value: "silu".
+            depthwise (bool): whether apply depthwise conv in conv branch. Default value: False.
+            stage (str): model stage, distinguish edge head to cloud head. Default value: CLOUD.
+            obj_loss_type (str): the loss function of the obj conf. Default value: BCE.
+            reg_loss_type (str): the loss function of the box prediction. Default value: giou.
+        """
+        super().__init__()
+        width = self.param_map[model_type][1]
+        self.width = width
+        self.n_anchors = 1
+        self.num_classes = num_classes
+        self.stage = stage
+        self.decode_in_inference = decode_in_inference  # for deploy, set to False
+
+        self.cls_convs = nn.ModuleList()
+        self.reg_convs = nn.ModuleList()
+        self.cls_preds = nn.ModuleList()
+        self.reg_preds = nn.ModuleList()
+        self.obj_preds = nn.ModuleList()
+        self.stems = nn.ModuleList()
+
+        Conv = DWConv if depthwise else BaseConv
+
+        for i in range(len(in_channels)):
+            self.stems.append(
+                BaseConv(
+                    in_channels=int(in_channels[i] * width),
+                    out_channels=int(256 * width),
+                    ksize=1,
+                    stride=1,
+                    act=act,
+                ))
+            self.cls_convs.append(
+                nn.Sequential(*[
+                    Conv(
+                        in_channels=int(256 * width),
+                        out_channels=int(256 * width),
+                        ksize=3,
+                        stride=1,
+                        act=act,
+                    ),
+                    Conv(
+                        in_channels=int(256 * width),
+                        out_channels=int(256 * width),
+                        ksize=3,
+                        stride=1,
+                        act=act,
+                    ),
+                ]))
+            self.reg_convs.append(
+                nn.Sequential(*[
+                    Conv(
+                        in_channels=int(256 * width),
+                        out_channels=int(256 * width),
+                        ksize=3,
+                        stride=1,
+                        act=act,
+                    ),
+                    Conv(
+                        in_channels=int(256 * width),
+                        out_channels=int(256 * width),
+                        ksize=3,
+                        stride=1,
+                        act=act,
+                    ),
+                ]))
+
+            self.cls_preds.append(
+                nn.Conv2d(
+                    in_channels=int(256 * width),
+                    out_channels=self.n_anchors * self.num_classes,
+                    kernel_size=1,
+                    stride=1,
+                    padding=0,
+                ))
+            self.reg_preds.append(
+                nn.Conv2d(
+                    in_channels=int(256 * width),
+                    out_channels=4,
+                    kernel_size=1,
+                    stride=1,
+                    padding=0,
+                ))
+            self.obj_preds.append(
+                nn.Conv2d(
+                    in_channels=int(256 * width),
+                    out_channels=self.n_anchors * 1,
+                    kernel_size=1,
+                    stride=1,
+                    padding=0,
+                ))
+        self.bcewithlog_loss = nn.BCEWithLogitsLoss(reduction='none')
+
+        self.use_l1 = False
+        self.l1_loss = nn.L1Loss(reduction='none')
+
+        self.iou_loss = IOUloss(reduction='none', loss_type=reg_loss_type)
+
+        self.obj_loss_type = obj_loss_type
+        if obj_loss_type == 'BCE':
+            self.obj_loss = nn.BCEWithLogitsLoss(reduction='none')
+        elif obj_loss_type == 'focal':
+            self.obj_loss = FocalLoss(reduction='none')
+
+        elif obj_loss_type == 'v_focal':
+            self.obj_loss = VarifocalLoss(reduction='none')
+        else:
+            assert 'Undefined loss type: {}'.format(obj_loss_type)
+
+        self.strides = strides
+        self.grids = [torch.zeros(1)] * len(in_channels)
+
+    def initialize_biases(self, prior_prob):
+        for conv in self.cls_preds:
+            b = conv.bias.view(self.n_anchors, -1)
+            b.data.fill_(-math.log((1 - prior_prob) / prior_prob))
+            conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+
+        for conv in self.obj_preds:
+            b = conv.bias.view(self.n_anchors, -1)
+            b.data.fill_(-math.log((1 - prior_prob) / prior_prob))
+            conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
+
+    @abstractmethod
+    def forward(self, xin, labels=None, imgs=None):
+        pass
+
+    def get_output_and_grid(self, output, k, stride, dtype):
+        grid = self.grids[k]
+
+        batch_size = output.shape[0]
+        n_ch = 5 + self.num_classes
+        hsize, wsize = output.shape[-2:]
+        if grid.shape[2:4] != output.shape[2:4]:
+            yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])
+            grid = torch.stack((xv, yv), 2).view(1, 1, hsize, wsize,
+                                                 2).type(dtype)
+            self.grids[k] = grid
+
+        output = output.view(batch_size, self.n_anchors, n_ch, hsize, wsize)
+        output = output.permute(0, 1, 3, 4,
+                                2).reshape(batch_size,
+                                           self.n_anchors * hsize * wsize, -1)
+        grid = grid.view(1, -1, 2)
+        output[..., :2] = (output[..., :2] + grid) * stride
+        output[..., 2:4] = torch.exp(output[..., 2:4]) * stride
+        return output, grid
+
+    def decode_outputs(self, outputs, dtype):
+        grids = []
+        strides = []
+        for (hsize, wsize), stride in zip(self.hw, self.strides):
+            yv, xv = torch.meshgrid([torch.arange(hsize), torch.arange(wsize)])
+            grid = torch.stack((xv, yv), 2).view(1, -1, 2)
+            grids.append(grid)
+            shape = grid.shape[:2]
+            strides.append(torch.full((*shape, 1), stride, dtype=torch.int))
+
+        grids = torch.cat(grids, dim=1).type(dtype)
+        strides = torch.cat(strides, dim=1).type(dtype)
+
+        outputs[..., :2] = (outputs[..., :2] + grids) * strides
+        outputs[..., 2:4] = torch.exp(outputs[..., 2:4]) * strides
+        return outputs
+
+    def get_losses(
+        self,
+        imgs,
+        x_shifts,
+        y_shifts,
+        expanded_strides,
+        labels,
+        outputs,
+        origin_preds,
+        dtype,
+    ):
+        bbox_preds = outputs[:, :, :4]  # [batch, n_anchors_all, 4]
+        obj_preds = outputs[:, :, 4].unsqueeze(-1)  # [batch, n_anchors_all, 1]
+        cls_preds = outputs[:, :, 5:]  # [batch, n_anchors_all, n_cls]
+
+        # calculate targets
+        nlabel = (labels.sum(dim=2) > 0).sum(dim=1)  # number of objects
+
+        total_num_anchors = outputs.shape[1]
+        x_shifts = torch.cat(x_shifts, 1)  # [1, n_anchors_all]
+        y_shifts = torch.cat(y_shifts, 1)  # [1, n_anchors_all]
+        expanded_strides = torch.cat(expanded_strides, 1)
+        if self.use_l1:
+            origin_preds = torch.cat(origin_preds, 1)
+
+        cls_targets = []
+        reg_targets = []
+        l1_targets = []
+        obj_targets = []
+        fg_masks = []
+
+        num_fg = 0.0
+        num_gts = 0.0
+
+        for batch_idx in range(outputs.shape[0]):
+            num_gt = int(nlabel[batch_idx])
+
+            num_gts += num_gt
+            if num_gt == 0:
+                cls_target = outputs.new_zeros((0, self.num_classes))
+                reg_target = outputs.new_zeros((0, 4))
+                l1_target = outputs.new_zeros((0, 4))
+                obj_target = outputs.new_zeros((total_num_anchors, 1))
+                fg_mask = outputs.new_zeros(total_num_anchors).bool()
+            else:
+                gt_bboxes_per_image = labels[batch_idx, :num_gt, 1:5]
+                gt_classes = labels[batch_idx, :num_gt, 0]
+                bboxes_preds_per_image = bbox_preds[batch_idx]
+
+                try:
+                    (
+                        gt_matched_classes,
+                        fg_mask,
+                        pred_ious_this_matching,
+                        matched_gt_inds,
+                        num_fg_img,
+                    ) = self.get_assignments(  # noqa
+                        batch_idx,
+                        num_gt,
+                        total_num_anchors,
+                        gt_bboxes_per_image,
+                        gt_classes,
+                        bboxes_preds_per_image,
+                        expanded_strides,
+                        x_shifts,
+                        y_shifts,
+                        cls_preds,
+                        bbox_preds,
+                        obj_preds,
+                        labels,
+                        imgs,
+                    )
+
+                except RuntimeError:
+                    logging.error(
+                        'OOM RuntimeError is raised due to the huge memory cost during label assignment. \
+                           CPU mode is applied in this batch. If you want to avoid this issue, \
+                           try to reduce the batch size or image size.')
+                    torch.cuda.empty_cache()
+                    (
+                        gt_matched_classes,
+                        fg_mask,
+                        pred_ious_this_matching,
+                        matched_gt_inds,
+                        num_fg_img,
+                    ) = self.get_assignments(  # noqa
+                        batch_idx,
+                        num_gt,
+                        total_num_anchors,
+                        gt_bboxes_per_image,
+                        gt_classes,
+                        bboxes_preds_per_image,
+                        expanded_strides,
+                        x_shifts,
+                        y_shifts,
+                        cls_preds,
+                        bbox_preds,
+                        obj_preds,
+                        labels,
+                        imgs,
+                        'cpu',
+                    )
+
+                torch.cuda.empty_cache()
+                num_fg += num_fg_img
+
+                cls_target = F.one_hot(
+                    gt_matched_classes.to(torch.int64),
+                    self.num_classes) * pred_ious_this_matching.unsqueeze(-1)
+                obj_target = fg_mask.unsqueeze(-1)
+                reg_target = gt_bboxes_per_image[matched_gt_inds]
+
+                if self.use_l1:
+                    l1_target = self.get_l1_target(
+                        outputs.new_zeros((num_fg_img, 4)),
+                        gt_bboxes_per_image[matched_gt_inds],
+                        expanded_strides[0][fg_mask],
+                        x_shifts=x_shifts[0][fg_mask],
+                        y_shifts=y_shifts[0][fg_mask],
+                    )
+
+            cls_targets.append(cls_target)
+            reg_targets.append(reg_target)
+            obj_targets.append(obj_target.to(dtype))
+            fg_masks.append(fg_mask)
+            if self.use_l1:
+                l1_targets.append(l1_target)
+
+        cls_targets = torch.cat(cls_targets, 0)
+        reg_targets = torch.cat(reg_targets, 0)
+        obj_targets = torch.cat(obj_targets, 0)
+        fg_masks = torch.cat(fg_masks, 0)
+
+        if self.use_l1:
+            l1_targets = torch.cat(l1_targets, 0)
+
+        num_fg = max(num_fg, 1)
+
+        loss_iou = (self.iou_loss(
+            bbox_preds.view(-1, 4)[fg_masks], reg_targets)).sum() / num_fg
+
+        if self.obj_loss_type == 'focal':
+            loss_obj = (self.focal_loss(obj_preds.sigmoid().view(-1, 1),
+                                        obj_targets)).sum() / num_fg
+        else:
+            loss_obj = (self.obj_loss(obj_preds.view(-1, 1),
+                                      obj_targets)).sum() / num_fg
+        loss_cls = (self.bcewithlog_loss(
+            cls_preds.view(-1, self.num_classes)[fg_masks],
+            cls_targets)).sum() / num_fg
+
+        if self.use_l1:
+            loss_l1 = (self.l1_loss(
+                origin_preds.view(-1, 4)[fg_masks], l1_targets)).sum() / num_fg
+        else:
+            loss_l1 = 0.0
+
+        reg_weight = 5.0
+        loss = reg_weight * loss_iou + loss_obj + loss_cls + loss_l1
+
+        return (
+            loss,
+            reg_weight * loss_iou,
+            loss_obj,
+            loss_cls,
+            loss_l1,
+            num_fg / max(num_gts, 1),
+        )
+
+    def focal_loss(self, pred, gt):
+        pos_inds = gt.eq(1).float()
+        neg_inds = gt.eq(0).float()
+        pos_loss = torch.log(pred + 1e-5) * torch.pow(1 - pred,
+                                                      2) * pos_inds * 0.75
+        neg_loss = torch.log(1 - pred + 1e-5) * torch.pow(pred,
+                                                          2) * neg_inds * 0.25
+        loss = -(pos_loss + neg_loss)
+        return loss
+
+    def get_l1_target(self,
+                      l1_target,
+                      gt,
+                      stride,
+                      x_shifts,
+                      y_shifts,
+                      eps=1e-8):
+        l1_target[:, 0] = gt[:, 0] / stride - x_shifts
+        l1_target[:, 1] = gt[:, 1] / stride - y_shifts
+        l1_target[:, 2] = torch.log(gt[:, 2] / stride + eps)
+        l1_target[:, 3] = torch.log(gt[:, 3] / stride + eps)
+        return l1_target
+
+    @torch.no_grad()
+    def get_assignments(
+        self,
+        batch_idx,
+        num_gt,
+        total_num_anchors,
+        gt_bboxes_per_image,
+        gt_classes,
+        bboxes_preds_per_image,
+        expanded_strides,
+        x_shifts,
+        y_shifts,
+        cls_preds,
+        bbox_preds,
+        obj_preds,
+        labels,
+        imgs,
+        mode='gpu',
+    ):
+
+        if mode == 'cpu':
+            print('------------CPU Mode for This Batch-------------')
+            gt_bboxes_per_image = gt_bboxes_per_image.cpu().float()
+            bboxes_preds_per_image = bboxes_preds_per_image.cpu().float()
+            gt_classes = gt_classes.cpu().float()
+            expanded_strides = expanded_strides.cpu().float()
+            x_shifts = x_shifts.cpu()
+            y_shifts = y_shifts.cpu()
+
+        fg_mask, is_in_boxes_and_center = self.get_in_boxes_info(
+            gt_bboxes_per_image,
+            expanded_strides,
+            x_shifts,
+            y_shifts,
+            total_num_anchors,
+            num_gt,
+        )
+        # reference to: https://github.com/Megvii-BaseDetection/YOLOX/pull/811
+        # NOTE: Fix `selected index k out of range`
+        npa: int = fg_mask.sum().item()  # number of positive anchors
+
+        if npa == 0:
+            gt_matched_classes = torch.zeros(0, device=fg_mask.device).long()
+            pred_ious_this_matching = torch.rand(0, device=fg_mask.device)
+            matched_gt_inds = gt_matched_classes
+            num_fg = npa
+
+            if mode == 'cpu':
+                gt_matched_classes = gt_matched_classes.cuda()
+                fg_mask = fg_mask.cuda()
+                pred_ious_this_matching = pred_ious_this_matching.cuda()
+                matched_gt_inds = matched_gt_inds.cuda()
+                num_fg = num_fg.cuda()
+
+            return (
+                gt_matched_classes,
+                fg_mask,
+                pred_ious_this_matching,
+                matched_gt_inds,
+                num_fg,
+            )
+
+        bboxes_preds_per_image = bboxes_preds_per_image[fg_mask]
+        cls_preds_ = cls_preds[batch_idx][fg_mask]
+        obj_preds_ = obj_preds[batch_idx][fg_mask]
+        num_in_boxes_anchor = bboxes_preds_per_image.shape[0]
+
+        if mode == 'cpu':
+            gt_bboxes_per_image = gt_bboxes_per_image.cpu()
+            bboxes_preds_per_image = bboxes_preds_per_image.cpu()
+
+        pair_wise_ious = bboxes_iou(gt_bboxes_per_image,
+                                    bboxes_preds_per_image, False)
+
+        if (torch.isnan(pair_wise_ious.max())):
+            pair_wise_ious = bboxes_iou(gt_bboxes_per_image,
+                                        bboxes_preds_per_image, False)
+
+        gt_cls_per_image = (
+            F.one_hot(gt_classes.to(torch.int64),
+                      self.num_classes).float().unsqueeze(1).repeat(
+                          1, num_in_boxes_anchor, 1))
+        pair_wise_ious_loss = -torch.log(pair_wise_ious + 1e-8)
+
+        if mode == 'cpu':
+            cls_preds_, obj_preds_ = cls_preds_.cpu(), obj_preds_.cpu()
+
+        if LooseVersion(torch.__version__) >= LooseVersion('1.6.0'):
+            with torch.cuda.amp.autocast(enabled=False):
+                cls_preds_ = (
+                    cls_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                           1).sigmoid_() *
+                    obj_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                           1).sigmoid_())
+                pair_wise_cls_loss = F.binary_cross_entropy(
+                    cls_preds_.sqrt_(), gt_cls_per_image,
+                    reduction='none').sum(-1)
+        else:
+            cls_preds_ = (
+                cls_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                       1).sigmoid_() *
+                obj_preds_.float().unsqueeze(0).repeat(num_gt, 1,
+                                                       1).sigmoid_())
+            pair_wise_cls_loss = F.binary_cross_entropy(
+                cls_preds_.sqrt_(), gt_cls_per_image, reduction='none').sum(-1)
+
+        del cls_preds_
+
+        cost = (
+            pair_wise_cls_loss + 3.0 * pair_wise_ious_loss + 100000.0 *
+            (~is_in_boxes_and_center))
+
+        (
+            num_fg,
+            gt_matched_classes,
+            pred_ious_this_matching,
+            matched_gt_inds,
+        ) = self.dynamic_k_matching(cost, pair_wise_ious, gt_classes, num_gt,
+                                    fg_mask)
+
+        del pair_wise_cls_loss, cost, pair_wise_ious, pair_wise_ious_loss
+
+        if mode == 'cpu':
+            gt_matched_classes = gt_matched_classes.cuda()
+            fg_mask = fg_mask.cuda()
+            pred_ious_this_matching = pred_ious_this_matching.cuda()
+            matched_gt_inds = matched_gt_inds.cuda()
+
+        return (
+            gt_matched_classes,
+            fg_mask,
+            pred_ious_this_matching,
+            matched_gt_inds,
+            num_fg,
+        )
+
+    def get_in_boxes_info(
+        self,
+        gt_bboxes_per_image,
+        expanded_strides,
+        x_shifts,
+        y_shifts,
+        total_num_anchors,
+        num_gt,
+    ):
+        expanded_strides_per_image = expanded_strides[0]
+        x_shifts_per_image = x_shifts[0] * expanded_strides_per_image
+        y_shifts_per_image = y_shifts[0] * expanded_strides_per_image
+        x_centers_per_image = (
+            (x_shifts_per_image +
+             0.5 * expanded_strides_per_image).unsqueeze(0).repeat(num_gt, 1)
+        )  # [n_anchor] -> [n_gt, n_anchor]
+        y_centers_per_image = (
+            (y_shifts_per_image +
+             0.5 * expanded_strides_per_image).unsqueeze(0).repeat(num_gt, 1))
+
+        gt_bboxes_per_image_l = (
+            (gt_bboxes_per_image[:, 0] -
+             0.5 * gt_bboxes_per_image[:, 2]).unsqueeze(1).repeat(
+                 1, total_num_anchors))
+        gt_bboxes_per_image_r = (
+            (gt_bboxes_per_image[:, 0] +
+             0.5 * gt_bboxes_per_image[:, 2]).unsqueeze(1).repeat(
+                 1, total_num_anchors))
+        gt_bboxes_per_image_t = (
+            (gt_bboxes_per_image[:, 1] -
+             0.5 * gt_bboxes_per_image[:, 3]).unsqueeze(1).repeat(
+                 1, total_num_anchors))
+        gt_bboxes_per_image_b = (
+            (gt_bboxes_per_image[:, 1] +
+             0.5 * gt_bboxes_per_image[:, 3]).unsqueeze(1).repeat(
+                 1, total_num_anchors))
+
+        b_l = x_centers_per_image - gt_bboxes_per_image_l
+        b_r = gt_bboxes_per_image_r - x_centers_per_image
+        b_t = y_centers_per_image - gt_bboxes_per_image_t
+        b_b = gt_bboxes_per_image_b - y_centers_per_image
+        bbox_deltas = torch.stack([b_l, b_t, b_r, b_b], 2)
+
+        is_in_boxes = bbox_deltas.min(dim=-1).values > 0.0
+        is_in_boxes_all = is_in_boxes.sum(dim=0) > 0
+        # in fixed center
+
+        center_radius = 2.5
+
+        gt_bboxes_per_image_l = (
+            gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
+                1, total_num_anchors
+            ) - center_radius * expanded_strides_per_image.unsqueeze(0)
+        gt_bboxes_per_image_r = (
+            gt_bboxes_per_image[:, 0]).unsqueeze(1).repeat(
+                1, total_num_anchors
+            ) + center_radius * expanded_strides_per_image.unsqueeze(0)
+        gt_bboxes_per_image_t = (
+            gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
+                1, total_num_anchors
+            ) - center_radius * expanded_strides_per_image.unsqueeze(0)
+        gt_bboxes_per_image_b = (
+            gt_bboxes_per_image[:, 1]).unsqueeze(1).repeat(
+                1, total_num_anchors
+            ) + center_radius * expanded_strides_per_image.unsqueeze(0)
+
+        c_l = x_centers_per_image - gt_bboxes_per_image_l
+        c_r = gt_bboxes_per_image_r - x_centers_per_image
+        c_t = y_centers_per_image - gt_bboxes_per_image_t
+        c_b = gt_bboxes_per_image_b - y_centers_per_image
+        center_deltas = torch.stack([c_l, c_t, c_r, c_b], 2)
+        is_in_centers = center_deltas.min(dim=-1).values > 0.0
+        is_in_centers_all = is_in_centers.sum(dim=0) > 0
+
+        # in boxes and in centers
+        is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all
+
+        is_in_boxes_and_center = (
+            is_in_boxes[:, is_in_boxes_anchor]
+            & is_in_centers[:, is_in_boxes_anchor])
+        return is_in_boxes_anchor, is_in_boxes_and_center
+
+    def dynamic_k_matching(self, cost, pair_wise_ious, gt_classes, num_gt,
+                           fg_mask):
+
+        # Dynamic K
+        # ---------------------------------------------------------------
+        matching_matrix = torch.zeros_like(cost, dtype=torch.uint8)
+
+        ious_in_boxes_matrix = pair_wise_ious
+        n_candidate_k = min(10, ious_in_boxes_matrix.size(1))
+
+        topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=1)
+        dynamic_ks = torch.clamp(topk_ious.sum(1).int(), min=1)
+        dynamic_ks = dynamic_ks.tolist()
+
+        for gt_idx in range(num_gt):
+            _, pos_idx = torch.topk(
+                cost[gt_idx], k=dynamic_ks[gt_idx], largest=False)
+            matching_matrix[gt_idx][pos_idx] = 1
+
+        del topk_ious, dynamic_ks, pos_idx
+
+        anchor_matching_gt = matching_matrix.sum(0)
+        if (anchor_matching_gt > 1).sum() > 0:
+            _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
+            matching_matrix[:, anchor_matching_gt > 1] *= 0
+            matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1
+        fg_mask_inboxes = matching_matrix.sum(0) > 0
+        num_fg = fg_mask_inboxes.sum().item()
+
+        fg_mask[fg_mask.clone()] = fg_mask_inboxes
+
+        matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
+        gt_matched_classes = gt_classes[matched_gt_inds]
+
+        pred_ious_this_matching = (matching_matrix *
+                                   pair_wise_ious).sum(0)[fg_mask_inboxes]
+
+        return num_fg, gt_matched_classes, pred_ious_this_matching, matched_gt_inds
diff --git a/easycv/models/detection/detectors/yolox/yolo_pafpn.py b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
index 2f16faef..e8ae8ec7 100644
--- a/easycv/models/detection/detectors/yolox/yolo_pafpn.py
+++ b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
@@ -4,40 +4,50 @@
 
 import torch
 import torch.nn as nn
-
+from easycv.models.registry import BACKBONES
 from easycv.models.backbones.darknet import CSPDarknet
 from easycv.models.backbones.network_blocks import (BaseConv, CSPLayer, DWConv,
                                                     GSConv, VoVGSCSP)
 from easycv.models.backbones.repvgg_yolox_backbone import RepVGGYOLOX
-
+from .ASFF import ASFF
 
 def make_divisible(x, divisor):
     # Upward revision the value x to make it evenly divisible by the divisor.
     return math.ceil(x / divisor) * divisor
 
 
+
+@BACKBONES.register_module
 class YOLOPAFPN(nn.Module):
     """
     YOLOv3 model. Darknet 53 is the default backbone of this model.
     """
+    param_map = {
+        'nano': [0.33, 0.25],
+        'tiny': [0.33, 0.375],
+        's': [0.33, 0.5],
+        'm': [0.67, 0.75],
+        'l': [1.0, 1.0],
+        'x': [1.33, 1.25]
+    }
 
     def __init__(self,
-                 depth=1.0,
-                 width=1.0,
+                 model_type='s',
                  in_features=('dark3', 'dark4', 'dark5'),
                  in_channels=[256, 512, 1024],
                  depthwise=False,
                  act='silu',
-                 asff_channel=16,
+                 asff_channel=2,
                  use_att=None,
                  expand_kernel=3,
-                 down_rate=32,
-                 use_dconv=False,
-                 use_expand=True,
                  backbone='CSPDarknet',
                  neck='yolo',
                  neck_mode='all'):
         super().__init__()
+
+        depth = self.param_map[model_type][0]
+        width = self.param_map[model_type][1]
+
         # build backbone
         if backbone == 'CSPDarknet':
             self.backbone = CSPDarknet(
@@ -51,11 +61,13 @@ def __init__(self,
             )
             self.backbone = RepVGGYOLOX(
                 in_channels=3, depth=depth, width=width)
+
         self.backbone_name = backbone
 
         # build neck
         self.in_features = in_features
         self.in_channels = in_channels
+
         Conv = DWConv if depthwise else BaseConv
         self.neck = neck
         self.neck_mode = neck_mode
@@ -230,52 +242,31 @@ def __init__(self,
             )
 
         if self.use_att == 'ASFF' or self.use_att == 'ASFF_sim':
-            if self.use_att == 'ASFF':
-                from .ASFF import ASFF
-                self.asff_1 = ASFF(
-                    level=0,
-                    multiplier=width,
-                    asff_channel=asff_channel,
-                    act=act)
-                self.asff_2 = ASFF(
-                    level=1,
-                    multiplier=width,
-                    asff_channel=asff_channel,
-                    act=act)
-                self.asff_3 = ASFF(
-                    level=2,
-                    multiplier=width,
-                    asff_channel=asff_channel,
-                    act=act)
-            else:
-                from .ASFF_sim import ASFF
-                self.asff_1 = ASFF(
-                    level=0,
-                    multiplier=width,
-                    asff_channel=asff_channel,
-                    act=act,
-                    expand_kernel=expand_kernel,
-                    down_rate=down_rate,
-                    use_dconv=use_dconv,
-                    use_expand=use_expand)
-                self.asff_2 = ASFF(
-                    level=1,
-                    multiplier=width,
-                    asff_channel=asff_channel,
-                    act=act,
-                    expand_kernel=expand_kernel,
-                    down_rate=down_rate,
-                    use_dconv=use_dconv,
-                    use_expand=use_expand)
-                self.asff_3 = ASFF(
-                    level=2,
-                    multiplier=width,
-                    asff_channel=asff_channel,
-                    act=act,
-                    expand_kernel=expand_kernel,
-                    down_rate=down_rate,
-                    use_dconv=use_dconv,
-                    use_expand=use_expand)
+            self.asff_1 = ASFF(
+                level=0,
+                type=self.use_att,
+                asff_channel=asff_channel,
+                expand_kernel=expand_kernel,
+                multiplier=width,
+                act=act,
+                )
+            self.asff_2 = ASFF(
+                level=1,
+                type=self.use_att,
+                asff_channel=asff_channel,
+                expand_kernel=expand_kernel,
+                multiplier=width,
+                act=act,
+            )
+            self.asff_3 = ASFF(
+                level=2,
+                type=self.use_att,
+                asff_channel=asff_channel,
+                expand_kernel=expand_kernel,
+                multiplier=width,
+                act=act,
+            )
+
 
     def forward(self, input):
         """
@@ -325,6 +316,7 @@ def forward(self, input):
             fpn_out1 = self.gsconv2(f_out0)  # 512->256/16
             f_out1 = self.upsample(fpn_out1)  # 256/8
             f_out1 = torch.cat([f_out1, x2], 1)  # 256->512/8
+
             if self.neck_mode == 'all':
                 f_out1 = self.gsconv3(f_out1)
                 pan_out2 = self.vovGSCSP2(f_out1)  # 512->256/8
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index 34f5aa52..0c12336e 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -6,13 +6,11 @@
 import torch
 import torch.nn as nn
 from torch import Tensor
-
+from easycv.utils.config_tools import mmcv_config_fromfile
 from easycv.models.base import BaseModel
-from easycv.models.builder import MODELS
+from easycv.models.builder import (MODELS, build_model, build_backbone, build_head,
+                                   build_neck)
 from easycv.models.detection.utils import postprocess
-from .tood_head import TOODHead
-from .yolo_head import YOLOXHead
-from .yolo_pafpn import YOLOPAFPN
 
 
 def init_yolo(M):
@@ -22,14 +20,6 @@ def init_yolo(M):
             m.momentum = 0.03
 
 
-def cxcywh2xyxy(bboxes):
-    bboxes[..., 0] = bboxes[..., 0] - bboxes[..., 2] * 0.5  # x1
-    bboxes[..., 1] = bboxes[..., 1] - bboxes[..., 3] * 0.5
-    bboxes[..., 2] = bboxes[..., 0] + bboxes[..., 2]
-    bboxes[..., 3] = bboxes[..., 1] + bboxes[..., 3]
-    return bboxes
-
-
 @MODELS.register_module
 class YOLOX(BaseModel):
     """
@@ -37,105 +27,21 @@ class YOLOX(BaseModel):
     The network returns loss values from three YOLO layers during training
     and detection results during test.
     """
-    param_map = {
-        'nano': [0.33, 0.25],
-        'tiny': [0.33, 0.375],
-        's': [0.33, 0.5],
-        'm': [0.67, 0.75],
-        'l': [1.0, 1.0],
-        'x': [1.33, 1.25]
-    }
-
-    # TODO configs support more params
-    # backbone(Darknet)、neck(YOLOXPAFPN)、head(YOLOXHead)
-    def __init__(self,
-                 model_type: str = 's',
-                 num_classes: int = 80,
-                 test_size: tuple = (640, 640),
-                 test_conf: float = 0.01,
-                 nms_thre: float = 0.65,
-                 use_att: str = None,
-                 obj_loss_type: str = 'l1',
-                 reg_loss_type: str = 'l1',
-                 spp_type: str = 'spp',
-                 head_type: str = 'yolox',
-                 neck: str = 'yolo',
-                 neck_mode: str = 'all',
-                 act: str = 'silu',
-                 asff_channel: int = 16,
-                 stacked_convs: int = 6,
-                 la_down_rate: int = 8,
-                 conv_layers: int = 2,
-                 decode_in_inference: bool = True,
-                 backbone='CSPDarknet',
-                 expand_kernel=3,
-                 down_rate=32,
-                 use_dconv=False,
-                 use_expand=True,
-                 pretrained: str = None):
+    def __init__(self, backbone, test_conf, nms_thre, head=None, neck=None, pretrained=True):
         super(YOLOX, self).__init__()
-        assert model_type in self.param_map, f'invalid model_type for yolox {model_type}, valid ones are {list(self.param_map.keys())}'
-
-        in_channels = [256, 512, 1024]
-        depth = self.param_map[model_type][0]
-        width = self.param_map[model_type][1]
-
-        self.backbone = YOLOPAFPN(
-            depth,
-            width,
-            in_channels=in_channels,
-            asff_channel=asff_channel,
-            act=act,
-            use_att=use_att,
-            backbone=backbone,
-            neck=neck,
-            neck_mode=neck_mode,
-            expand_kernel=expand_kernel,
-            down_rate=down_rate,
-            use_dconv=use_dconv,
-            use_expand=use_expand)
-
-        self.head_type = head_type
-        if head_type == 'yolox':
-            self.head = YOLOXHead(
-                num_classes,
-                width,
-                in_channels=in_channels,
-                act=act,
-                obj_loss_type=obj_loss_type,
-                reg_loss_type=reg_loss_type)
-            self.head.initialize_biases(1e-2)
-        elif head_type == 'tood':
-            self.head = TOODHead(
-                num_classes,
-                width,
-                in_channels=in_channels,
-                act=act,
-                obj_loss_type=obj_loss_type,
-                reg_loss_type=reg_loss_type,
-                stacked_convs=stacked_convs,
-                la_down_rate=la_down_rate,
-                conv_layers=conv_layers,
-                decode_in_inference=decode_in_inference)
-            self.head.initialize_biases(1e-2)
-
-        self.decode_in_inference = decode_in_inference
-        # use decode, we will use post process as default
-        if not self.decode_in_inference:
-            logging.warning(
-                'YOLOX-PAI head decode_in_inference close for speed test, post process will be close at same time!'
-            )
-            self.ignore_postprocess = True
-            logging.warning('YOLOX-PAI ignore_postprocess set to be True')
-        else:
-            self.ignore_postprocess = False
+
+        self.pretrained = pretrained
+        self.backbone = build_backbone(backbone)
+        if neck is not None:
+            self.neck = build_neck(neck)
+        self.head = build_head(head)
 
         self.apply(init_yolo)  # init_yolo(self)
-        self.num_classes = num_classes
+        self.num_classes = head.num_classes
         self.test_conf = test_conf
         self.nms_thre = nms_thre
-        self.test_size = test_size
-        self.epoch_counter = 0
+
+
 
     def forward_train(self,
                       img: Tensor,
@@ -157,53 +63,26 @@ def forward_train(self,
 
         targets = torch.cat([gt_labels, gt_bboxes], dim=2)
 
-        if self.head_type != 'ppyoloe':
-            loss, iou_loss, conf_loss, cls_loss, l1_loss, num_fg = self.head(
-                fpn_outs, targets, img)
-
-            outputs = {
-                'total_loss':
-                loss,
-                'iou_l':
-                iou_loss,
-                'conf_l':
-                conf_loss,
-                'cls_l':
-                cls_loss,
-                'img_h':
-                torch.tensor(img_metas[0]['img_shape'][0],
-                             device=loss.device).float(),
-                'img_w':
-                torch.tensor(img_metas[0]['img_shape'][1],
-                             device=loss.device).float()
-            }
+        loss, iou_loss, conf_loss, cls_loss, l1_loss, num_fg = self.head(
+            fpn_outs, targets, img)
+
+        outputs = {
+            'total_loss':
+            loss,
+            'iou_l':
+            iou_loss,
+            'conf_l':
+            conf_loss,
+            'cls_l':
+            cls_loss,
+            'img_h':
+            torch.tensor(img_metas[0]['img_shape'][0],
+                         device=loss.device).float(),
+            'img_w':
+            torch.tensor(img_metas[0]['img_shape'][1],
+                         device=loss.device).float()
+        }
 
-        else:
-            targets[..., 1:] = cxcywh2xyxy(targets[..., 1:])
-            extra_info = {}
-            extra_info['epoch'] = self.epoch_counter
-
-            print(extra_info['epoch'])
-            yolo_losses = self.head(fpn_outs, targets, extra_info)
-
-            outputs = {
-                'total_loss':
-                yolo_losses['total_loss'],
-                'iou_l':
-                yolo_losses['loss_iou'],
-                'conf_l':
-                yolo_losses['loss_dfl'],
-                'cls_l':
-                yolo_losses['loss_cls'],
-                'img_h':
-                torch.tensor(
-                    img_metas[0]['img_shape'][0],
-                    device=yolo_losses['total_loss'].device).float(),
-                'img_w':
-                torch.tensor(
-                    img_metas[0]['img_shape'][1],
-                    device=yolo_losses['total_loss'].device).float()
-            }
 
         return outputs
 
@@ -278,3 +157,12 @@ def forward_export(self, img):
                                       self.test_conf, self.nms_thre)
 
         return outputs
+
+if __name__=='__main__':
+    config_path = '/apsara/xinyi.zxy/code/pr154/configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
+    cfg = mmcv_config_fromfile(config_path)
+
+    print(cfg)
+
+    model = build_model(cfg.model)
+    print(model)
\ No newline at end of file
diff --git a/easycv/models/detection/detectors/yolox/yolox_bak.py b/easycv/models/detection/detectors/yolox/yolox_bak.py
new file mode 100644
index 00000000..5806fd8d
--- /dev/null
+++ b/easycv/models/detection/detectors/yolox/yolox_bak.py
@@ -0,0 +1,239 @@
+# Copyright (c) 2014-2021 Megvii Inc And Alibaba PAI-Teams. All rights reserved.
+import logging
+from typing import Dict
+
+import numpy as np
+import torch
+import torch.nn as nn
+from torch import Tensor
+
+
+from easycv.models.base import BaseModel
+from easycv.models.builder import MODELS
+from easycv.models.detection.utils import postprocess
+from .tood_head import TOODHead
+from .yolo_head import YOLOXHead
+from .yolo_pafpn import YOLOPAFPN
+
+
+def init_yolo(M):
+    for m in M.modules():
+        if isinstance(m, nn.BatchNorm2d):
+            m.eps = 1e-3
+            m.momentum = 0.03
+
+
+# @MODELS.register_module
+class YOLOX(BaseModel):
+    """
+    YOLOX model module. The module list is defined by create_yolov3_modules function.
+    The network returns loss values from three YOLO layers during training
+    and detection results during test.
+    """
+    param_map = {
+        'nano': [0.33, 0.25],
+        'tiny': [0.33, 0.375],
+        's': [0.33, 0.5],
+        'm': [0.67, 0.75],
+        'l': [1.0, 1.0],
+        'x': [1.33, 1.25]
+    }
+
+    # TODO configs support more params
+    # backbone(Darknet)、neck(YOLOXPAFPN)、head(YOLOXHead)
+    def __init__(self,
+                 model_type: str = 's',
+                 num_classes: int = 80,
+                 test_size: tuple = (640, 640),
+                 test_conf: float = 0.01,
+                 nms_thre: float = 0.65,
+                 use_att: str = None,
+                 obj_loss_type: str = 'l1',
+                 reg_loss_type: str = 'l1',
+                 head_type: str = 'yolox',
+                 neck: str = 'yolo',
+                 neck_mode: str = 'all',
+                 act: str = 'silu',
+                 asff_channel: int = 16,
+                 stacked_convs: int = 6,
+                 la_down_rate: int = 8,
+                 conv_layers: int = 2,
+                 decode_in_inference: bool = True,
+                 backbone='CSPDarknet',
+                 expand_kernel=3,
+                 pretrained: str = None):
+        super(YOLOX, self).__init__()
+        assert model_type in self.param_map, f'invalid model_type for yolox {model_type}, valid ones are {list(self.param_map.keys())}'
+
+        in_channels = [256, 512, 1024]
+        depth = self.param_map[model_type][0]
+        width = self.param_map[model_type][1]
+
+        self.backbone = YOLOPAFPN(
+            depth,
+            width,
+            in_channels=in_channels,
+            asff_channel=asff_channel,
+            act=act,
+            use_att=use_att,
+            backbone=backbone,
+            neck=neck,
+            neck_mode=neck_mode,
+            expand_kernel=expand_kernel)
+
+        self.head_type = head_type
+        if head_type == 'yolox':
+            self.head = YOLOXHead(
+                num_classes,
+                width,
+                in_channels=in_channels,
+                act=act,
+                obj_loss_type=obj_loss_type,
+                reg_loss_type=reg_loss_type)
+            self.head.initialize_biases(1e-2)
+        elif head_type == 'tood':
+            self.head = TOODHead(
+                num_classes,
+                width,
+                in_channels=in_channels,
+                act=act,
+                obj_loss_type=obj_loss_type,
+                reg_loss_type=reg_loss_type,
+                stacked_convs=stacked_convs,
+                la_down_rate=la_down_rate,
+                conv_layers=conv_layers,
+                decode_in_inference=decode_in_inference)
+            self.head.initialize_biases(1e-2)
+
+        self.decode_in_inference = decode_in_inference
+        # use decode, we will use post process as default
+        if not self.decode_in_inference:
+            logging.warning(
+                'YOLOX-PAI head decode_in_inference close for speed test, post process will be close at same time!'
+            )
+            self.ignore_postprocess = True
+            logging.warning('YOLOX-PAI ignore_postprocess set to be True')
+        else:
+            self.ignore_postprocess = False
+
+        self.apply(init_yolo)  # init_yolo(self)
+        self.num_classes = num_classes
+        self.test_conf = test_conf
+        self.nms_thre = nms_thre
+        self.test_size = test_size
+        self.epoch_counter = 0
+
+    def forward_train(self,
+                      img: Tensor,
+                      gt_bboxes: Tensor,
+                      gt_labels: Tensor,
+                      img_metas=None,
+                      scale=None) -> Dict[str, Tensor]:
+        """ Abstract interface for model forward in training
+
+        Args:
+            img (Tensor): image tensor, NxCxHxW
+            target (List[Tensor]): list of target tensor, NTx5 [class,x_c,y_c,w,h]
+        """
+
+        # gt_bboxes = gt_bboxes.to(torch.float16)
+        # gt_labels = gt_labels.to(torch.float16)
+
+        fpn_outs = self.backbone(img)
+
+        targets = torch.cat([gt_labels, gt_bboxes], dim=2)
+
+        loss, iou_loss, conf_loss, cls_loss, l1_loss, num_fg = self.head(
+            fpn_outs, targets, img)
+
+        outputs = {
+            'total_loss':
+            loss,
+            'iou_l':
+            iou_loss,
+            'conf_l':
+            conf_loss,
+            'cls_l':
+            cls_loss,
+            'img_h':
+            torch.tensor(img_metas[0]['img_shape'][0],
+                         device=loss.device).float(),
+            'img_w':
+            torch.tensor(img_metas[0]['img_shape'][1],
+                         device=loss.device).float()
+        }
+
+
+        return outputs
+
+    def forward_test(self, img: Tensor, img_metas=None) -> Tensor:
+        """ Abstract interface for model forward in training
+
+        Args:
+            img (Tensor): image tensor, NxCxHxW
+            target (List[Tensor]): list of target tensor, NTx5 [class,x_c,y_c,w,h]
+        """
+        with torch.no_grad():
+            fpn_outs = self.backbone(img)
+            outputs = self.head(fpn_outs)
+
+            outputs = postprocess(outputs, self.num_classes, self.test_conf,
+                                  self.nms_thre)
+
+            detection_boxes = []
+            detection_scores = []
+            detection_classes = []
+            img_metas_list = []
+
+            for i in range(len(outputs)):
+                if img_metas:
+                    img_metas_list.append(img_metas[i])
+                if outputs[i] is not None:
+                    bboxes = outputs[i][:,
+                                        0:4] if outputs[i] is not None else None
+                    if img_metas:
+                        bboxes /= img_metas[i]['scale_factor'][0]
+                    detection_boxes.append(bboxes.cpu().numpy())
+                    detection_scores.append(
+                        (outputs[i][:, 4] * outputs[i][:, 5]).cpu().numpy())
+                    detection_classes.append(
+                        outputs[i][:, 6].cpu().numpy().astype(np.int32))
+                else:
+                    detection_boxes.append(None)
+                    detection_scores.append(None)
+                    detection_classes.append(None)
+
+            test_outputs = {
+                'detection_boxes': detection_boxes,
+                'detection_scores': detection_scores,
+                'detection_classes': detection_classes,
+                'img_metas': img_metas_list
+            }
+
+        return test_outputs
+
+    def forward(self, img, mode='compression', **kwargs):
+        if mode == 'train':
+            return self.forward_train(img, **kwargs)
+        elif mode == 'test':
+            return self.forward_test(img, **kwargs)
+        elif mode == 'compression':
+            return self.forward_compression(img, **kwargs)
+
+    def forward_compression(self, x):
+        # fpn output content features of [dark3, dark4, dark5]
+        fpn_outs = self.backbone(x)
+        outputs = self.head(fpn_outs)
+
+        return outputs
+
+    def forward_export(self, img):
+        with torch.no_grad():
+            fpn_outs = self.backbone(img)
+            outputs = self.head(fpn_outs)
+
+            if self.decode_in_inference:
+                outputs = postprocess(outputs, self.num_classes,
+                                      self.test_conf, self.nms_thre)
+
+        return outputs

From e1e9e8a283e306498930e9555bd770b105bf5d88 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Sun, 14 Aug 2022 16:04:11 +0800
Subject: [PATCH 36/69] fix cr mentioned bug

---
 .../detection/yolox/yolox_l_8xb8_300e_coco.py | 11 ++-
 .../yolox/yolox_m_8xb16_300e_coco.py          |  9 +-
 .../yolox/yolox_nano_8xb16_300e_coco.py       |  9 +-
 .../yolox/yolox_s_8xb16_300e_coco.py          |  3 +-
 .../yolox_s_8xb16_300e_coco_asff_tood3.py     |  9 +-
 .../yolox/yolox_s_8xb16_300e_coco_rep.py      | 10 +-
 .../yolox/yolox_s_8xb16_300e_coco_tood3.py    |  3 +-
 .../yolox/yolox_tiny_8xb16_300e_coco.py       |  9 +-
 easycv/apis/export.py                         |  1 +
 easycv/hooks/yolox_mode_switch_hook.py        |  2 -
 easycv/models/backbones/darknet.py            | 37 ++-----
 easycv/models/backbones/network_blocks.py     | 35 +++----
 .../models/backbones/repvgg_yolox_backbone.py | 14 +--
 .../models/detection/detectors/convert2new.py | 97 +++++++++++++++++++
 .../detectors/yolox/{ASFF.py => asff.py}      | 31 +-----
 .../detectors/yolox/yolo_head_template.py     | 26 ++---
 .../detection/detectors/yolox/yolo_pafpn.py   |  5 +-
 easycv/models/detection/utils/misc.py         | 30 +++++-
 easycv/models/detection/utils/utils.py        | 66 -------------
 easycv/models/loss/focal_loss.py              |  2 +
 easycv/models/loss/iou_loss.py                | 13 ++-
 easycv/models/utils/ops.py                    |  6 +-
 easycv/utils/checkpoint.py                    |  3 +-
 tools/eval.py                                 |  1 -
 24 files changed, 234 insertions(+), 198 deletions(-)
 create mode 100644 easycv/models/detection/detectors/convert2new.py
 rename easycv/models/detection/detectors/yolox/{ASFF.py => asff.py} (88%)
 delete mode 100644 easycv/models/detection/utils/utils.py

diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
index 93b3ce33..4a84d96a 100644
--- a/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
+++ b/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
@@ -1,7 +1,14 @@
-_base_ = 'configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
+_base_ = './yolox_s_8xb16_300e_coco.py'
 
 # model settings
-model = dict(model_type='l')
+model = dict(
+    backbone=dict(
+        model_type='l',  # s m l x tiny nano
+    ),
+    head=dict(
+        model_type='l',
+    )
+)
 
 data = dict(imgs_per_gpu=8, workers_per_gpu=4)
 
diff --git a/configs/detection/yolox/yolox_m_8xb16_300e_coco.py b/configs/detection/yolox/yolox_m_8xb16_300e_coco.py
index 3386d4d4..2fb74503 100644
--- a/configs/detection/yolox/yolox_m_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_m_8xb16_300e_coco.py
@@ -1,4 +1,11 @@
 _base_ = './yolox_s_8xb16_300e_coco.py'
 
 # model settings
-model = dict(model_type='m')
+model = dict(
+    backbone=dict(
+        model_type='m',  # s m l x tiny nano
+    ),
+    head=dict(
+        model_type='m',
+    )
+)
diff --git a/configs/detection/yolox/yolox_nano_8xb16_300e_coco.py b/configs/detection/yolox/yolox_nano_8xb16_300e_coco.py
index f942ec47..8a8d9592 100644
--- a/configs/detection/yolox/yolox_nano_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_nano_8xb16_300e_coco.py
@@ -1,4 +1,11 @@
 _base_ = './yolox_tiny_8xb16_300e_coco.py'
 
 # model settings
-model = dict(model_type='nano')
+model = dict(
+    backbone=dict(
+        model_type='nano',  # s m l x tiny nano
+    ),
+    head=dict(
+        model_type='nano',
+    )
+)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index 68ccada3..99c4465e 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -97,7 +97,7 @@
             dict(type='LoadAnnotations', with_bbox=True)
         ],
         classes=CLASSES,
-        filter_empty_gt=False,
+        filter_empty_gt=True,
         iscrowd=False),
     pipeline=train_pipeline,
     dynamic_scale=img_scale)
@@ -115,6 +115,7 @@
         ],
         classes=CLASSES,
         filter_empty_gt=False,
+        test_mode=True,
         iscrowd=True),
     pipeline=test_pipeline,
     dynamic_scale=None,
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
index 73787692..13164511 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
@@ -22,7 +22,7 @@
 )
 
 # s m l x
-img_scale = (640, 640)
+img_scale = (672, 672)
 random_size = (14, 26)
 scale_ratio = (0.1, 2)
 
@@ -48,9 +48,9 @@
 ]
 
 # dataset settings
-# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
+data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
 # data_root = '/mnt/data/nas/data/detection/coco/'
-data_root = '/root/workspace/data/coco/'
+# data_root = '/root/workspace/data/coco/'
 
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
@@ -98,7 +98,7 @@
             dict(type='LoadAnnotations', with_bbox=True)
         ],
         classes=CLASSES,
-        filter_empty_gt=False,
+        filter_empty_gt=True,
         iscrowd=False),
     pipeline=train_pipeline,
     dynamic_scale=img_scale)
@@ -116,6 +116,7 @@
         ],
         classes=CLASSES,
         filter_empty_gt=False,
+        test_mode=True,
         iscrowd=True),
     pipeline=test_pipeline,
     dynamic_scale=None,
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
index bb113bf7..af05aa82 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
@@ -97,7 +97,7 @@
             dict(type='LoadAnnotations', with_bbox=True)
         ],
         classes=CLASSES,
-        filter_empty_gt=False,
+        filter_empty_gt=True,
         iscrowd=False),
     pipeline=train_pipeline,
     dynamic_scale=img_scale)
@@ -115,6 +115,7 @@
         ],
         classes=CLASSES,
         filter_empty_gt=False,
+        test_mode=True,
         iscrowd=True),
     pipeline=test_pipeline,
     dynamic_scale=None,
@@ -194,4 +195,9 @@
         # dict(type='WandbLoggerHookV2'),
     ])
 
-export = dict(use_jit=False, export_blade=False, end2end=False)
+export = dict(use_jit=True,
+              export_blade=True,  # ????blade
+              end2end=False,      # ??????????nms???jit + blade
+              batch_size=1,       # static_opt=True???????batch_size
+              fp16_failback_ratio=0.05,   # fp16 fallback?fp32 ?layer ??
+              static_opt=True)    # ????static shape ?????True
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py
index 9c20fee9..6e992292 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py
@@ -97,7 +97,7 @@
             dict(type='LoadAnnotations', with_bbox=True)
         ],
         classes=CLASSES,
-        filter_empty_gt=False,
+        filter_empty_gt=True,
         iscrowd=False),
     pipeline=train_pipeline,
     dynamic_scale=img_scale)
@@ -115,6 +115,7 @@
         ],
         classes=CLASSES,
         filter_empty_gt=False,
+        test_mode=True,
         iscrowd=True),
     pipeline=test_pipeline,
     dynamic_scale=None,
diff --git a/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py b/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
index 436244d9..52c3d171 100644
--- a/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
@@ -1,7 +1,14 @@
 _base_ = './yolox_s_8xb16_300e_coco.py'
 
 # model settings
-model = dict(model_type='tiny')
+model = dict(
+    backbone=dict(
+        model_type='tiny',  # s m l x tiny nano
+    ),
+    head=dict(
+        model_type='tiny',
+    )
+)
 
 CLASSES = [
     'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index 3b22e168..c451af0a 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -37,6 +37,7 @@ def reparameterize_models(model):
     logging.info(
         'export : PAI-export reparameterize_count(RepVGGBlock, ) switch to deploy with {} blocks'
         .format(reparameterize_count))
+    print(reparameterize_count)
     return model
 
 
diff --git a/easycv/hooks/yolox_mode_switch_hook.py b/easycv/hooks/yolox_mode_switch_hook.py
index c723cab9..d8ab6da7 100644
--- a/easycv/hooks/yolox_mode_switch_hook.py
+++ b/easycv/hooks/yolox_mode_switch_hook.py
@@ -41,5 +41,3 @@ def before_train_epoch(self, runner):
             runner.logger.info('Add additional L1 loss now!')
             model.head.use_l1 = True
 
-        if hasattr(runner.model.module, 'epoch_counter'):
-            runner.model.module.epoch_counter = epoch
diff --git a/easycv/models/backbones/darknet.py b/easycv/models/backbones/darknet.py
index 49c1dc2b..82b571c2 100644
--- a/easycv/models/backbones/darknet.py
+++ b/easycv/models/backbones/darknet.py
@@ -15,8 +15,7 @@ def __init__(self,
                  depth,
                  in_channels=3,
                  stem_out_channels=32,
-                 out_features=('dark3', 'dark4', 'dark5'),
-                 spp_type='spp'):
+                 out_features=('dark3', 'dark4', 'dark5')):
         """
         Args:
             depth (int): depth of darknet used in model, usually use [21, 53] for this param.
@@ -49,18 +48,12 @@ def __init__(self,
             *self.make_group_layer(in_channels, num_blocks[2], stride=2))
         in_channels *= 2  # 512
 
-        if spp_type == 'spp':
-            self.dark5 = nn.Sequential(
-                *self.make_group_layer(in_channels, num_blocks[3], stride=2),
-                *self.make_spp_block([in_channels, in_channels * 2],
-                                     in_channels * 2),
-            )
-        elif spp_type == 'sppf':
-            self.dark5 = nn.Sequential(
-                *self.make_group_layer(in_channels, num_blocks[3], stride=2),
-                *self.make_sppf_block([in_channels, in_channels * 2],
-                                      in_channels * 2),
-            )
+        self.dark5 = nn.Sequential(
+            *self.make_group_layer(in_channels, num_blocks[3], stride=2),
+            *self.make_spp_block([in_channels, in_channels * 2],
+                                 in_channels * 2),
+        )
+
 
     def make_group_layer(self,
                          in_channels: int,
@@ -94,22 +87,6 @@ def make_spp_block(self, filters_list, in_filters):
         ])
         return m
 
-    def make_sppf_block(self, filters_list, in_filters):
-        m = nn.Sequential(*[
-            BaseConv(in_filters, filters_list[0], 1, stride=1, act='lrelu'),
-            BaseConv(
-                filters_list[0], filters_list[1], 3, stride=1, act='lrelu'),
-            SPPBottleneck(
-                in_channels=filters_list[1],
-                out_channels=filters_list[0],
-                activation='lrelu',
-            ),
-            BaseConv(
-                filters_list[0], filters_list[1], 3, stride=1, act='lrelu'),
-            BaseConv(
-                filters_list[1], filters_list[0], 1, stride=1, act='lrelu'),
-        ])
-        return m
 
     def forward(self, x):
         outputs = {}
diff --git a/easycv/models/backbones/network_blocks.py b/easycv/models/backbones/network_blocks.py
index 5bbfaec8..ad6922cd 100644
--- a/easycv/models/backbones/network_blocks.py
+++ b/easycv/models/backbones/network_blocks.py
@@ -1,4 +1,4 @@
-# Copyright (c) 2014-2021 Megvii Inc And Alibaba PAI Team. All rights reserved.
+# Copyright (c) 2014-2021 Megvii Inc, AlanLi And Alibaba PAI Team. All rights reserved.
 
 import torch
 import torch.nn as nn
@@ -59,8 +59,8 @@ class BaseConv(nn.Module):
     def __init__(self,
                  in_channels,
                  out_channels,
-                 ksize=1,
-                 stride=1,
+                 ksize,
+                 stride,
                  groups=1,
                  bias=False,
                  act='silu'):
@@ -161,10 +161,7 @@ def __init__(self,
         hidden_channels = in_channels // 2
         self.conv1 = BaseConv(
             in_channels, hidden_channels, 1, stride=1, act=activation)
-        # self.m = nn.ModuleList([
-        #     nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2)
-        #     for ks in kernel_sizes
-        # ])
+
         self.m = nn.MaxPool2d(
             kernel_size=kernel_size, stride=1, padding=kernel_size // 2)
 
@@ -177,8 +174,7 @@ def forward(self, x):
         x1 = self.m(x)
         x2 = self.m(x1)
         x = self.conv2(torch.cat([x, x1, x2, self.m(x2)], 1))
-        # x = torch.cat([x] + [m(x) for m in self.m], dim=1)
-        # x = self.conv2(x)
+
         return x
 
 
@@ -288,7 +284,10 @@ def forward(self, x):
 
 
 class GSConv(nn.Module):
-    # GSConv https://github.com/AlanLi1997/slim-neck-by-gsconv
+    """
+        GSConv is used to merge the channel information of DSConv and BaseConv
+        You can refer to https://github.com/AlanLi1997/slim-neck-by-gsconv for more details
+    """
     def __init__(self, c1, c2, k=1, s=1, g=1, act='silu'):
         super().__init__()
         c_ = c2 // 2
@@ -309,24 +308,26 @@ def forward(self, x):
 
 
 class GSBottleneck(nn.Module):
-    # GS Bottleneck https://github.com/AlanLi1997/slim-neck-by-gsconv
+    """
+        The use of GSBottleneck is to stack the GSConv layer
+        You can refer to https://github.com/AlanLi1997/slim-neck-by-gsconv for more details
+    """
     def __init__(self, c1, c2, k=3, s=1):
         super().__init__()
         c_ = c2 // 2
-        # for lighting
+
         self.conv_lighting = nn.Sequential(
             GSConv(c1, c_, 1, 1), GSConv(c_, c2, 1, 1, act='identity'))
-        # for receptive field
-        self.conv = nn.Sequential(
-            GSConv(c1, c_, 3, 1), GSConv(c_, c2, 3, 1, act='identity'))
-        self.shortcut = nn.Identity()
 
     def forward(self, x):
         return self.conv_lighting(x)
 
 
 class VoVGSCSP(nn.Module):
-    # VoV-GSCSP https://github.com/AlanLi1997/slim-neck-by-gsconv
+    """
+        VoVGSCSP is a new neck structure used in CSPNet
+        You can refer to https://github.com/AlanLi1997/slim-neck-by-gsconv for more details
+    """
     def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
         super().__init__()
         c_ = int(c2 * e)
diff --git a/easycv/models/backbones/repvgg_yolox_backbone.py b/easycv/models/backbones/repvgg_yolox_backbone.py
index e20f8c6c..aef3ffe8 100644
--- a/easycv/models/backbones/repvgg_yolox_backbone.py
+++ b/easycv/models/backbones/repvgg_yolox_backbone.py
@@ -1,16 +1,11 @@
 # borrow some code from https://github.com/DingXiaoH/RepVGG/repvgg.py MIT2.0
-import copy
-import math
 import warnings
 
 import numpy as np
 import torch
 import torch.nn as nn
 
-
-def make_divisible(x, divisor):
-    # Upward revision the value x to make it evenly divisible by the divisor.
-    return math.ceil(x / divisor) * divisor
+from easycv.models.utils.ops import make_divisible
 
 
 def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
@@ -31,7 +26,7 @@ def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
 
 
 class RepVGGBlock(nn.Module):
-
+    """Basic Block of RepVGG"""
     def __init__(self,
                  in_channels,
                  out_channels,
@@ -108,6 +103,7 @@ def forward(self, inputs):
     #           loss += weight_decay_coefficient * 0.5 * blk.get_cust_L2()
     #       optimizer.zero_grad()
     #       loss.backward()
+
     def get_custom_L2(self):
         K3 = self.rbr_dense.conv.weight
         K1 = self.rbr_1x1.conv.weight
@@ -201,7 +197,6 @@ def switch_to_deploy(self):
 
 class ConvBNAct(nn.Module):
     '''Normal Conv with SiLU activation'''
-
     def __init__(self,
                  in_channels,
                  out_channels,
@@ -363,6 +358,7 @@ def forward(self, x):
         return tuple(outputs)
 
 
+
 if __name__ == '__main__':
 
     from torchsummaryX import summary
@@ -390,4 +386,4 @@ def forward(self, x):
     model = model.cuda()
 
     a = torch.randn(1, 3, 640, 640).cuda()
-    summary(model, a)
+    summary(model, a)
\ No newline at end of file
diff --git a/easycv/models/detection/detectors/convert2new.py b/easycv/models/detection/detectors/convert2new.py
new file mode 100644
index 00000000..176c4876
--- /dev/null
+++ b/easycv/models/detection/detectors/convert2new.py
@@ -0,0 +1,97 @@
+from easycv.utils.checkpoint import load_checkpoint
+from easycv.utils.config_tools import (CONFIG_TEMPLATE_ZOO,
+                                       mmcv_config_fromfile, rebuild_config)
+import torch
+from easycv.models import build_model
+
+
+if __name__=='__main__':
+    cfg_path = '/apsara/xinyi.zxy/code/pr154/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py'
+    cfg = mmcv_config_fromfile(cfg_path)
+    model = build_model(cfg.model)
+    print(model)
+
+    ckpt_path = '/apsara/xinyi.zxy/pretrain/asff_tood3/epoch_300_new.pth'
+    model_ckpt = torch.load(ckpt_path)
+    pretrain_model_state = model_ckpt['state_dict']
+
+    model.load_state_dict(pretrain_model_state)
+
+    exit()
+
+    model_state_dict = model.state_dict()  # ??model?key
+
+    # of1 = open('new.txt','w')
+    # for key in model_state_dict.keys():
+    #     of1.writelines(key+'\n')
+    #
+    # of2 = open('pre.txt', 'w')
+    # for key in pretrain_model_state.keys():
+    #     of2.writelines(key + '\n')
+
+    key_ori = [
+        'backbone.stem',
+        'ERBlock_2.0',
+        'ERBlock_2.1.conv1',
+        'ERBlock_2.1.block.0',
+        'ERBlock_3.0',
+        'ERBlock_3.1.conv1',
+        'ERBlock_3.1.block.0',
+        'ERBlock_3.1.block.1',
+        'ERBlock_3.1.block.2',
+        'ERBlock_4.0',
+        'ERBlock_4.1.conv1',
+        'ERBlock_4.1.block.0',
+        'ERBlock_4.1.block.1',
+        'ERBlock_4.1.block.2',
+        'ERBlock_4.1.block.3',
+        'ERBlock_4.1.block.4',
+        'ERBlock_5.0',
+        'ERBlock_5.1.conv1',
+        'ERBlock_5.1.block.0',
+        'ERBlock_5.2'
+    ]
+
+    key_new = [
+        'backbone.stage0',
+        'stage1.0',
+        'stage1.1',
+        'stage1.2',
+        'stage2.0',
+        'stage2.1',
+        'stage2.2',
+        'stage2.3',
+        'stage2.4',
+        'stage3.0',
+        'stage3.1',
+        'stage3.2',
+        'stage3.3',
+        'stage3.4',
+        'stage3.5',
+        'stage3.6',
+        'stage4.0',
+        'stage4.1',
+        'stage4.2',
+        'stage4.3'
+    ]
+
+    print(len(key_ori)==len(key_new))
+
+    for i, key in enumerate(pretrain_model_state):
+        find = False
+        for t_i, t_k in enumerate(key_ori):
+            if t_k in key:
+                find = True
+                break
+        if find:
+            model_state_dict[key.replace(t_k,key_new[t_i])] = pretrain_model_state[key]
+        else:
+            model_state_dict[key] = pretrain_model_state[key]
+
+    model.load_state_dict(model_state_dict)
+
+    model_ckpt['state_dict'] = model_state_dict
+    ckpt_path_new = '/apsara/xinyi.zxy/pretrain/asff_tood3/epoch_300_new.pth'
+    torch.save(model_ckpt, ckpt_path_new)
+
+
diff --git a/easycv/models/detection/detectors/yolox/ASFF.py b/easycv/models/detection/detectors/yolox/asff.py
similarity index 88%
rename from easycv/models/detection/detectors/yolox/ASFF.py
rename to easycv/models/detection/detectors/yolox/asff.py
index 56448502..2aa40680 100644
--- a/easycv/models/detection/detectors/yolox/ASFF.py
+++ b/easycv/models/detection/detectors/yolox/asff.py
@@ -1,3 +1,4 @@
+# Copyright (c) 2014-2021 Alibaba PAI-Teams and GOATmessi7. All rights reserved.
 import torch
 import torch.nn as nn
 import torch.nn.functional as F
@@ -52,6 +53,8 @@ def __init__(self,
                     int(1024 * multiplier), self.inter_dim, 1, 1, act=act)
                 self.compress_level_1 = Conv(
                     int(512 * multiplier), self.inter_dim, 1, 1, act=act)
+            else:
+                raise ValueError('Invalid level {}'.format(level))
 
         # add expand layer
         self.expand = Conv(
@@ -167,31 +170,3 @@ def forward(self, x):  # l,m,s
 
         return out
 
-if __name__=='__main__':
-
-    width = 0.5
-    num_classes = 80
-    in_channels = [256, 512, 1024]
-
-    asff_channel = 2
-    act = 'silu'
-    type = 'ASFF_sim'
-    expand_kernel = 1
-
-    asff_1 = ASFF(
-        level=0, type = type, multiplier=width, asff_channel=asff_channel, act=act,expand_kernel=expand_kernel).cuda()
-    asff_2 = ASFF(
-        level=1, type = type, multiplier=width, asff_channel=asff_channel, act=act,expand_kernel=expand_kernel).cuda()
-    asff_3 = ASFF(
-        level=2, type = type, multiplier=width, asff_channel=asff_channel, act=act,expand_kernel=expand_kernel).cuda()
-
-    input = (torch.rand(1, 128, 80, 80).cuda(), torch.rand(1, 256, 40,
-                                                           40).cuda(),
-             torch.rand(1, 512, 20, 20).cuda())
-
-    from torchsummaryX import summary
-
-    summary(asff_1, input)
-    summary(asff_2, input)
-    summary(asff_3, input)
-
diff --git a/easycv/models/detection/detectors/yolox/yolo_head_template.py b/easycv/models/detection/detectors/yolox/yolo_head_template.py
index 1d7e5981..cc0c7b54 100644
--- a/easycv/models/detection/detectors/yolox/yolo_head_template.py
+++ b/easycv/models/detection/detectors/yolox/yolo_head_template.py
@@ -7,10 +7,10 @@
 import torch.nn as nn
 import torch.nn.functional as F
 from abc import abstractmethod
-from easycv.models.builder import HEADS
+
 from easycv.models.backbones.network_blocks import BaseConv, DWConv
 from easycv.models.detection.utils import bboxes_iou
-from easycv.models.loss import FocalLoss, IOUloss, VarifocalLoss
+from easycv.models.loss import IOUloss
 
 
 class YOLOXHead_Template(nn.Module):
@@ -142,13 +142,8 @@ def __init__(self,
         self.obj_loss_type = obj_loss_type
         if obj_loss_type == 'BCE':
             self.obj_loss = nn.BCEWithLogitsLoss(reduction='none')
-        elif obj_loss_type == 'focal':
-            self.obj_loss = FocalLoss(reduction='none')
-
-        elif obj_loss_type == 'v_focal':
-            self.obj_loss = VarifocalLoss(reduction='none')
         else:
-            assert 'Undefined loss type: {}'.format(obj_loss_type)
+            raise KeyError('Undefined loss type: {}'.format(obj_loss_type))
 
         self.strides = strides
         self.grids = [torch.zeros(1)] * len(in_channels)
@@ -347,12 +342,9 @@ def get_losses(
         loss_iou = (self.iou_loss(
             bbox_preds.view(-1, 4)[fg_masks], reg_targets)).sum() / num_fg
 
-        if self.obj_loss_type == 'focal':
-            loss_obj = (self.focal_loss(obj_preds.sigmoid().view(-1, 1),
-                                        obj_targets)).sum() / num_fg
-        else:
-            loss_obj = (self.obj_loss(obj_preds.view(-1, 1),
-                                      obj_targets)).sum() / num_fg
+        loss_obj = (self.obj_loss(obj_preds.view(-1, 1),
+                                  obj_targets)).sum() / num_fg
+
         loss_cls = (self.bcewithlog_loss(
             cls_preds.view(-1, self.num_classes)[fg_masks],
             cls_targets)).sum() / num_fg
@@ -437,13 +429,13 @@ def get_assignments(
         )
         # reference to: https://github.com/Megvii-BaseDetection/YOLOX/pull/811
         # NOTE: Fix `selected index k out of range`
-        npa: int = fg_mask.sum().item()  # number of positive anchors
+        num_pos_anchors: int = fg_mask.sum().item()  # number of positive anchors
 
-        if npa == 0:
+        if num_pos_anchors == 0:
             gt_matched_classes = torch.zeros(0, device=fg_mask.device).long()
             pred_ious_this_matching = torch.rand(0, device=fg_mask.device)
             matched_gt_inds = gt_matched_classes
-            num_fg = npa
+            num_fg = num_pos_anchors
 
             if mode == 'cpu':
                 gt_matched_classes = gt_matched_classes.cuda()
diff --git a/easycv/models/detection/detectors/yolox/yolo_pafpn.py b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
index e8ae8ec7..af9e1b7c 100644
--- a/easycv/models/detection/detectors/yolox/yolo_pafpn.py
+++ b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
@@ -9,11 +9,8 @@
 from easycv.models.backbones.network_blocks import (BaseConv, CSPLayer, DWConv,
                                                     GSConv, VoVGSCSP)
 from easycv.models.backbones.repvgg_yolox_backbone import RepVGGYOLOX
-from .ASFF import ASFF
+from .asff import ASFF
 
-def make_divisible(x, divisor):
-    # Upward revision the value x to make it evenly divisible by the divisor.
-    return math.ceil(x / divisor) * divisor
 
 
 
diff --git a/easycv/models/detection/utils/misc.py b/easycv/models/detection/utils/misc.py
index 3bb72050..8cf3a006 100644
--- a/easycv/models/detection/utils/misc.py
+++ b/easycv/models/detection/utils/misc.py
@@ -1,6 +1,6 @@
 # Copyright (c) OpenMMLab. All rights reserved.
 from typing import List, Optional
-
+from torch.autograd import Function
 import numpy as np
 import torch
 import torchvision
@@ -181,3 +181,31 @@ def inverse_sigmoid(x, eps=1e-3):
     x1 = x.clamp(min=eps)
     x2 = (1 - x).clamp(min=eps)
     return torch.log(x1 / x2)
+
+class SigmoidGeometricMean(Function):
+    """Forward and backward function of geometric mean of two sigmoid
+    functions.
+
+    This implementation with analytical gradient function substitutes
+    the autograd function of (x.sigmoid() * y.sigmoid()).sqrt(). The
+    original implementation incurs none during gradient backprapagation
+    if both x and y are very small values.
+    """
+
+    @staticmethod
+    def forward(ctx, x, y):
+        x_sigmoid = x.sigmoid()
+        y_sigmoid = y.sigmoid()
+        z = (x_sigmoid * y_sigmoid).sqrt()
+        ctx.save_for_backward(x_sigmoid, y_sigmoid, z)
+        return z
+
+    @staticmethod
+    def backward(ctx, grad_output):
+        x_sigmoid, y_sigmoid, z = ctx.saved_tensors
+        grad_x = grad_output * z * (1 - x_sigmoid) / 2
+        grad_y = grad_output * z * (1 - y_sigmoid) / 2
+        return grad_x, grad_y
+
+
+sigmoid_geometric_mean = SigmoidGeometricMean.apply
diff --git a/easycv/models/detection/utils/utils.py b/easycv/models/detection/utils/utils.py
deleted file mode 100644
index 9e7ba775..00000000
--- a/easycv/models/detection/utils/utils.py
+++ /dev/null
@@ -1,66 +0,0 @@
-# Copyright (c) Alibaba, Inc. and its affiliates.
-import numpy as np
-from torch.autograd import Function
-from torch.nn import functional as F
-
-
-class SigmoidGeometricMean(Function):
-    """Forward and backward function of geometric mean of two sigmoid
-    functions.
-
-    This implementation with analytical gradient function substitutes
-    the autograd function of (x.sigmoid() * y.sigmoid()).sqrt(). The
-    original implementation incurs none during gradient backprapagation
-    if both x and y are very small values.
-    """
-
-    @staticmethod
-    def forward(ctx, x, y):
-        x_sigmoid = x.sigmoid()
-        y_sigmoid = y.sigmoid()
-        z = (x_sigmoid * y_sigmoid).sqrt()
-        ctx.save_for_backward(x_sigmoid, y_sigmoid, z)
-        return z
-
-    @staticmethod
-    def backward(ctx, grad_output):
-        x_sigmoid, y_sigmoid, z = ctx.saved_tensors
-        grad_x = grad_output * z * (1 - x_sigmoid) / 2
-        grad_y = grad_output * z * (1 - y_sigmoid) / 2
-        return grad_x, grad_y
-
-
-sigmoid_geometric_mean = SigmoidGeometricMean.apply
-
-
-def output_postprocess(outputs, img_metas=None):
-    detection_boxes = []
-    detection_scores = []
-    detection_classes = []
-    img_metas_list = []
-
-    for i in range(len(outputs)):
-        if img_metas:
-            img_metas_list.append(img_metas[i])
-        if outputs[i] is not None:
-            bboxes = outputs[i][:, 0:4] if outputs[i] is not None else None
-            if img_metas:
-                bboxes /= img_metas[i]['scale_factor'][0]
-            detection_boxes.append(bboxes.cpu().numpy())
-            detection_scores.append(
-                (outputs[i][:, 4] * outputs[i][:, 5]).cpu().numpy())
-            detection_classes.append(outputs[i][:, 6].cpu().numpy().astype(
-                np.int32))
-        else:
-            detection_boxes.append(None)
-            detection_scores.append(None)
-            detection_classes.append(None)
-
-    test_outputs = {
-        'detection_boxes': detection_boxes,
-        'detection_scores': detection_scores,
-        'detection_classes': detection_classes,
-        'img_metas': img_metas_list
-    }
-
-    return test_outputs
diff --git a/easycv/models/loss/focal_loss.py b/easycv/models/loss/focal_loss.py
index 6adb4630..2e58cfd5 100644
--- a/easycv/models/loss/focal_loss.py
+++ b/easycv/models/loss/focal_loss.py
@@ -24,6 +24,8 @@ def reduce_loss(loss, reduction):
         return loss.mean()
     elif reduction_enum == 2:
         return loss.sum()
+    else:
+        raise ValueError('reduction_enum should be 0,1,2')
 
 
 def varifocal_loss(pred,
diff --git a/easycv/models/loss/iou_loss.py b/easycv/models/loss/iou_loss.py
index e11b812a..87a5062e 100644
--- a/easycv/models/loss/iou_loss.py
+++ b/easycv/models/loss/iou_loss.py
@@ -73,7 +73,6 @@ def giou_loss(pred, target, eps=1e-7):
 
 @LOSSES.register_module
 class IOUloss(nn.Module):
-
     def __init__(self, reduction='none', loss_type='iou'):
         super(IOUloss, self).__init__()
         self.reduction = reduction
@@ -144,11 +143,11 @@ def forward(self, pred, target):
         elif self.loss_type == 'diou':
             c_tl = torch.min(
                 (pred[:, :2] - pred[:, 2:] / 2),
-                (target[:, :2] - target[:, 2:] / 2)  # 包围框的左上点
+                (target[:, :2] - target[:, 2:] / 2)
             )
             c_br = torch.max(
                 (pred[:, :2] + pred[:, 2:] / 2),
-                (target[:, :2] + target[:, 2:] / 2)  # 包围框的右下点
+                (target[:, :2] + target[:, 2:] / 2)
             )
             convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(
                 c_br[:, 1] - c_tl[:, 1], 2) + 1e-7  # convex diagonal squared
@@ -197,11 +196,11 @@ def forward(self, pred, target):
                           torch.pow(pred[:, 1] - target[:, 1], 2)
                           )  # center diagonal squared
 
-            dis_w = torch.pow(pred[:, 2] - target[:, 2], 2)  # 两个框的w欧式距离
-            dis_h = torch.pow(pred[:, 3] - target[:, 3], 2)  # 两个框的h欧式距离
+            dis_w = torch.pow(pred[:, 2] - target[:, 2], 2)
+            dis_h = torch.pow(pred[:, 3] - target[:, 3], 2)
 
-            C_w = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + 1e-7  # 包围框的w平方
-            C_h = torch.pow(c_br[:, 1] - c_tl[:, 1], 2) + 1e-7  # 包围框的h平方
+            C_w = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + 1e-7
+            C_h = torch.pow(c_br[:, 1] - c_tl[:, 1], 2) + 1e-7
 
             eiou = iou - (center_dis / convex_dis) - (dis_w / C_w) - (
                 dis_h / C_h)
diff --git a/easycv/models/utils/ops.py b/easycv/models/utils/ops.py
index e8b73ef5..03c5277b 100644
--- a/easycv/models/utils/ops.py
+++ b/easycv/models/utils/ops.py
@@ -4,7 +4,7 @@
 
 import torch
 import torch.nn.functional as F
-
+import math
 
 def resize_tensor(input,
                   size=None,
@@ -46,3 +46,7 @@ def resize_tensor(input,
     if isinstance(size, torch.Size):
         size = tuple(int(x) for x in size)
     return F.interpolate(input, size, scale_factor, mode, align_corners)
+
+def make_divisible(x, divisor):
+    # Upward revision the value x to make it evenly divisible by the divisor.
+    return math.ceil(x / divisor) * divisor
diff --git a/easycv/utils/checkpoint.py b/easycv/utils/checkpoint.py
index 298219fc..a44fd1b6 100644
--- a/easycv/utils/checkpoint.py
+++ b/easycv/utils/checkpoint.py
@@ -4,8 +4,7 @@
 import torch
 from mmcv.parallel import is_module_wrapper
 from mmcv.runner import load_checkpoint as mmcv_load_checkpoint
-from mmcv.runner.checkpoint import (_save_to_state_dict, get_state_dict,
-                                    weights_to_cpu)
+from mmcv.runner.checkpoint import (get_state_dict,weights_to_cpu)
 from torch.optim import Optimizer
 
 from easycv.file import io
diff --git a/tools/eval.py b/tools/eval.py
index 20463ac6..ca2167b3 100644
--- a/tools/eval.py
+++ b/tools/eval.py
@@ -31,7 +31,6 @@
                                        mmcv_config_fromfile, rebuild_config)
 from easycv.utils.mmlab_utils import dynamic_adapt_for_mmlab
 
-from mmcv.runner.checkpoint import _load_checkpoint
 from easycv.utils.setup_env import setup_multi_processes
 
 

From 2f7534f07fc83ca674a94833eb1e874ff6112510 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Sun, 14 Aug 2022 16:05:37 +0800
Subject: [PATCH 37/69] remove useless file

---
 .../models/detection/detectors/convert2new.py | 97 -------------------
 1 file changed, 97 deletions(-)
 delete mode 100644 easycv/models/detection/detectors/convert2new.py

diff --git a/easycv/models/detection/detectors/convert2new.py b/easycv/models/detection/detectors/convert2new.py
deleted file mode 100644
index 176c4876..00000000
--- a/easycv/models/detection/detectors/convert2new.py
+++ /dev/null
@@ -1,97 +0,0 @@
-from easycv.utils.checkpoint import load_checkpoint
-from easycv.utils.config_tools import (CONFIG_TEMPLATE_ZOO,
-                                       mmcv_config_fromfile, rebuild_config)
-import torch
-from easycv.models import build_model
-
-
-if __name__=='__main__':
-    cfg_path = '/apsara/xinyi.zxy/code/pr154/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py'
-    cfg = mmcv_config_fromfile(cfg_path)
-    model = build_model(cfg.model)
-    print(model)
-
-    ckpt_path = '/apsara/xinyi.zxy/pretrain/asff_tood3/epoch_300_new.pth'
-    model_ckpt = torch.load(ckpt_path)
-    pretrain_model_state = model_ckpt['state_dict']
-
-    model.load_state_dict(pretrain_model_state)
-
-    exit()
-
-    model_state_dict = model.state_dict()  # ??model?key
-
-    # of1 = open('new.txt','w')
-    # for key in model_state_dict.keys():
-    #     of1.writelines(key+'\n')
-    #
-    # of2 = open('pre.txt', 'w')
-    # for key in pretrain_model_state.keys():
-    #     of2.writelines(key + '\n')
-
-    key_ori = [
-        'backbone.stem',
-        'ERBlock_2.0',
-        'ERBlock_2.1.conv1',
-        'ERBlock_2.1.block.0',
-        'ERBlock_3.0',
-        'ERBlock_3.1.conv1',
-        'ERBlock_3.1.block.0',
-        'ERBlock_3.1.block.1',
-        'ERBlock_3.1.block.2',
-        'ERBlock_4.0',
-        'ERBlock_4.1.conv1',
-        'ERBlock_4.1.block.0',
-        'ERBlock_4.1.block.1',
-        'ERBlock_4.1.block.2',
-        'ERBlock_4.1.block.3',
-        'ERBlock_4.1.block.4',
-        'ERBlock_5.0',
-        'ERBlock_5.1.conv1',
-        'ERBlock_5.1.block.0',
-        'ERBlock_5.2'
-    ]
-
-    key_new = [
-        'backbone.stage0',
-        'stage1.0',
-        'stage1.1',
-        'stage1.2',
-        'stage2.0',
-        'stage2.1',
-        'stage2.2',
-        'stage2.3',
-        'stage2.4',
-        'stage3.0',
-        'stage3.1',
-        'stage3.2',
-        'stage3.3',
-        'stage3.4',
-        'stage3.5',
-        'stage3.6',
-        'stage4.0',
-        'stage4.1',
-        'stage4.2',
-        'stage4.3'
-    ]
-
-    print(len(key_ori)==len(key_new))
-
-    for i, key in enumerate(pretrain_model_state):
-        find = False
-        for t_i, t_k in enumerate(key_ori):
-            if t_k in key:
-                find = True
-                break
-        if find:
-            model_state_dict[key.replace(t_k,key_new[t_i])] = pretrain_model_state[key]
-        else:
-            model_state_dict[key] = pretrain_model_state[key]
-
-    model.load_state_dict(model_state_dict)
-
-    model_ckpt['state_dict'] = model_state_dict
-    ckpt_path_new = '/apsara/xinyi.zxy/pretrain/asff_tood3/epoch_300_new.pth'
-    torch.save(model_ckpt, ckpt_path_new)
-
-

From a364d60a6692040f5f6ce9b6a250e6024141229b Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Mon, 15 Aug 2022 11:29:46 +0800
Subject: [PATCH 38/69] fix cr bug

---
 .../detection/yolox/yolox_l_8xb8_300e_coco.py |   5 +-
 .../yolox/yolox_m_8xb16_300e_coco.py          |   5 +-
 .../yolox/yolox_nano_8xb16_300e_coco.py       |   5 +-
 .../yolox/yolox_s_8xb16_300e_coco.py          |  19 +-
 .../yolox_s_8xb16_300e_coco_asff_tood3.py     |  19 +-
 .../yolox/yolox_s_8xb16_300e_coco_rep.py      |   7 +-
 .../yolox/yolox_s_8xb16_300e_coco_tood3.py    |   7 +-
 .../yolox/yolox_tiny_8xb16_300e_coco.py       |   5 +-
 easycv/apis/export.py                         |  24 +-
 easycv/hooks/yolox_mode_switch_hook.py        |   1 -
 easycv/models/backbones/darknet.py            |   2 -
 easycv/models/backbones/network_blocks.py     |   3 +
 .../models/backbones/repvgg_yolox_backbone.py |   5 +-
 .../detectors/detr/detr_transformer.py        |   1 +
 .../detection/detectors/yolox/__init__.py     |   4 +-
 .../models/detection/detectors/yolox/asff.py  |  17 +-
 .../detection/detectors/yolox/postprocess.py  |  91 ++++---
 .../detection/detectors/yolox/tood_head.py    |   8 +-
 .../detection/detectors/yolox/yolo_head.py    |  50 ++--
 .../detectors/yolox/yolo_head_template.py     |  29 +--
 .../detection/detectors/yolox/yolo_pafpn.py   |   8 +-
 .../models/detection/detectors/yolox/yolox.py |  27 +-
 .../detection/detectors/yolox/yolox_bak.py    | 239 ------------------
 easycv/models/detection/utils/boxes.py        |  14 +-
 easycv/models/detection/utils/misc.py         |   4 +-
 easycv/models/detection/utils/tensorrt_nms.py |   0
 easycv/models/loss/iou_loss.py                | 197 ++++-----------
 easycv/models/utils/ops.py                    |   4 +-
 easycv/toolkit/blade/cv_blade_utils.py        |  15 +-
 easycv/utils/checkpoint.py                    |   2 +-
 easycv/utils/mmlab_utils.py                   |   2 +-
 test.py                                       |  64 ++---
 32 files changed, 280 insertions(+), 603 deletions(-)
 delete mode 100644 easycv/models/detection/detectors/yolox/yolox_bak.py
 create mode 100644 easycv/models/detection/utils/tensorrt_nms.py

diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
index 4a84d96a..0339f7ad 100644
--- a/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
+++ b/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
@@ -5,10 +5,7 @@
     backbone=dict(
         model_type='l',  # s m l x tiny nano
     ),
-    head=dict(
-        model_type='l',
-    )
-)
+    head=dict(model_type='l', ))
 
 data = dict(imgs_per_gpu=8, workers_per_gpu=4)
 
diff --git a/configs/detection/yolox/yolox_m_8xb16_300e_coco.py b/configs/detection/yolox/yolox_m_8xb16_300e_coco.py
index 2fb74503..1f0d2d90 100644
--- a/configs/detection/yolox/yolox_m_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_m_8xb16_300e_coco.py
@@ -5,7 +5,4 @@
     backbone=dict(
         model_type='m',  # s m l x tiny nano
     ),
-    head=dict(
-        model_type='m',
-    )
-)
+    head=dict(model_type='m', ))
diff --git a/configs/detection/yolox/yolox_nano_8xb16_300e_coco.py b/configs/detection/yolox/yolox_nano_8xb16_300e_coco.py
index 8a8d9592..92dc9d65 100644
--- a/configs/detection/yolox/yolox_nano_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_nano_8xb16_300e_coco.py
@@ -5,7 +5,4 @@
     backbone=dict(
         model_type='nano',  # s m l x tiny nano
     ),
-    head=dict(
-        model_type='nano',
-    )
-)
+    head=dict(model_type='nano', ))
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index 99c4465e..c83c2910 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -10,16 +10,13 @@
         backbone='CSPDarknet',
         model_type='s',  # s m l x tiny nano
         use_att=None,
-        neck='yolo'
-    ),
+        neck='yolo'),
     head=dict(
         type='YOLOXHead',
         model_type='s',
         obj_loss_type='BCE',
         reg_loss_type='giou',
-        num_classes=80
-    )
-)
+        num_classes=80))
 
 # s m l x
 img_scale = (640, 640)
@@ -49,7 +46,7 @@
 
 # dataset settings
 # data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
+data_root = '/apsara/xinyi.zxy/data/coco/'
 
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
@@ -196,3 +193,13 @@
     ])
 
 export = dict(use_jit=False, export_blade=False, end2end=False)
+
+export = dict(use_jit=True,
+              export_blade=True,  # ????blade
+              end2end=False,      # ??????????nms???jit + blade
+              batch_size=32,       # static_opt=True???????batch_size
+              blade_config=dict(
+                    dict(enable_fp16=True,
+                    fp16_fallback_op_ratio=0.05)
+              ),   # fp16 fallback?fp32 ?layer ??
+              static_opt=True)    # ????static shape ?????True
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
index 13164511..09b48005 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
@@ -10,19 +10,16 @@
         backbone='RepVGGYOLOX',
         model_type='s',  # s m l x tiny nano
         use_att='ASFF',
-        neck='yolo'
-    ),
+        neck='yolo'),
     head=dict(
         type='TOODHead',
         model_type='s',
         obj_loss_type='BCE',
         reg_loss_type='giou',
-        num_classes=80
-    )
-)
+        num_classes=80))
 
 # s m l x
-img_scale = (672, 672)
+img_scale = (640, 640)
 random_size = (14, 26)
 scale_ratio = (0.1, 2)
 
@@ -196,4 +193,12 @@
         # dict(type='WandbLoggerHookV2'),
     ])
 
-export = dict(use_jit=False, export_blade=False, end2end=False)
+export = dict(use_jit=True,
+              export_blade=True,  # ????blade
+              end2end=False,      # ??????????nms???jit + blade
+              batch_size=32,       # static_opt=True???????batch_size
+              blade_config=dict(
+                    dict(enable_fp16=True,
+                    fp16_fallback_op_ratio=0.05)
+              ),   # fp16 fallback?fp32 ?layer ??
+              static_opt=True)    # ????static shape ?????True
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
index af05aa82..e9f4646f 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
@@ -10,16 +10,13 @@
         backbone='RepVGGYOLOX',
         model_type='s',  # s m l x tiny nano
         use_att=None,
-        neck='yolo'
-    ),
+        neck='yolo'),
     head=dict(
         type='YOLOXHead',
         model_type='s',
         obj_loss_type='BCE',
         reg_loss_type='giou',
-        num_classes=80
-    )
-)
+        num_classes=80))
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py
index 6e992292..e534092f 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py
@@ -10,16 +10,13 @@
         backbone='RepVGGYOLOX',
         model_type='s',  # s m l x tiny nano
         use_att=None,
-        neck='yolo'
-    ),
+        neck='yolo'),
     head=dict(
         type='TOODHead',
         model_type='s',
         obj_loss_type='BCE',
         reg_loss_type='giou',
-        num_classes=80
-    )
-)
+        num_classes=80))
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py b/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
index 52c3d171..76ab0b45 100644
--- a/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
@@ -5,10 +5,7 @@
     backbone=dict(
         model_type='tiny',  # s m l x tiny nano
     ),
-    head=dict(
-        model_type='tiny',
-    )
-)
+    head=dict(model_type='tiny', ))
 
 CLASSES = [
     'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index c451af0a..b7f828b6 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -31,13 +31,13 @@ def reparameterize_models(model):
     """
     reparameterize_count = 0
     for layer in model.modules():
-        reparameterize_count += 1
         if isinstance(layer, RepVGGBlock):
+            reparameterize_count += 1
             layer.switch_to_deploy()
     logging.info(
         'export : PAI-export reparameterize_count(RepVGGBlock, ) switch to deploy with {} blocks'
         .format(reparameterize_count))
-    print(reparameterize_count)
+    print('reparam:', reparameterize_count)
     return model
 
 
@@ -194,6 +194,7 @@ def _export_yolox(model, cfg, filename):
         batch_size = cfg.export.get('batch_size', 1)
         static_opt = cfg.export.get('static_opt', True)
         img_scale = cfg.get('img_scale', (640, 640))
+
         assert (
             len(img_scale) == 2
         ), 'Export YoloX predictor config contains img_scale must be (int, int) tuple!'
@@ -220,8 +221,9 @@ def _export_yolox(model, cfg, filename):
             yolox_trace = torch.jit.trace(model_export, input.to(device))
 
         if getattr(cfg.export, 'export_blade', False):
-            blade_config = cfg.export.get('blade_config',
-                                          dict(enable_fp16=True))
+            blade_config = cfg.export.get(
+                'blade_config',
+                dict(enable_fp16=True, fp16_fallback_op_ratio=0.05))
 
             from easycv.toolkit.blade import blade_env_assert, blade_optimize
 
@@ -669,15 +671,11 @@ def __init__(self,
 
         self.example_inputs = example_inputs
         self.preprocess_fn = preprocess_fn
-        self.ignore_postprocess = getattr(self.model, 'ignore_postprocess',
-                                          False)
-        if not self.ignore_postprocess:
-            self.postprocess_fn = postprocess_fn
-        else:
-            self.postprocess_fn = None
-        logging.warning(
-            'Model {} ignore_postprocess set to be {} during export !'.format(
-                type(model), self.ignore_postprocess))
+        self.postprocess_fn = postprocess_fn
+
+        if postprocess_fn == None:
+            self.model.head.decode_in_inference = False
+
         self.trace_model = trace_model
         if self.trace_model:
             self.trace_module()
diff --git a/easycv/hooks/yolox_mode_switch_hook.py b/easycv/hooks/yolox_mode_switch_hook.py
index d8ab6da7..9d396773 100644
--- a/easycv/hooks/yolox_mode_switch_hook.py
+++ b/easycv/hooks/yolox_mode_switch_hook.py
@@ -40,4 +40,3 @@ def before_train_epoch(self, runner):
             train_loader.dataset.update_skip_type_keys(self.skip_type_keys)
             runner.logger.info('Add additional L1 loss now!')
             model.head.use_l1 = True
-
diff --git a/easycv/models/backbones/darknet.py b/easycv/models/backbones/darknet.py
index 82b571c2..ec8ca458 100644
--- a/easycv/models/backbones/darknet.py
+++ b/easycv/models/backbones/darknet.py
@@ -54,7 +54,6 @@ def __init__(self,
                                  in_channels * 2),
         )
 
-
     def make_group_layer(self,
                          in_channels: int,
                          num_blocks: int,
@@ -87,7 +86,6 @@ def make_spp_block(self, filters_list, in_filters):
         ])
         return m
 
-
     def forward(self, x):
         outputs = {}
         x = self.stem(x)
diff --git a/easycv/models/backbones/network_blocks.py b/easycv/models/backbones/network_blocks.py
index ad6922cd..e5d60e60 100644
--- a/easycv/models/backbones/network_blocks.py
+++ b/easycv/models/backbones/network_blocks.py
@@ -288,6 +288,7 @@ class GSConv(nn.Module):
         GSConv is used to merge the channel information of DSConv and BaseConv
         You can refer to https://github.com/AlanLi1997/slim-neck-by-gsconv for more details
     """
+
     def __init__(self, c1, c2, k=1, s=1, g=1, act='silu'):
         super().__init__()
         c_ = c2 // 2
@@ -312,6 +313,7 @@ class GSBottleneck(nn.Module):
         The use of GSBottleneck is to stack the GSConv layer
         You can refer to https://github.com/AlanLi1997/slim-neck-by-gsconv for more details
     """
+
     def __init__(self, c1, c2, k=3, s=1):
         super().__init__()
         c_ = c2 // 2
@@ -328,6 +330,7 @@ class VoVGSCSP(nn.Module):
         VoVGSCSP is a new neck structure used in CSPNet
         You can refer to https://github.com/AlanLi1997/slim-neck-by-gsconv for more details
     """
+
     def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5):
         super().__init__()
         c_ = int(c2 * e)
diff --git a/easycv/models/backbones/repvgg_yolox_backbone.py b/easycv/models/backbones/repvgg_yolox_backbone.py
index aef3ffe8..843858cf 100644
--- a/easycv/models/backbones/repvgg_yolox_backbone.py
+++ b/easycv/models/backbones/repvgg_yolox_backbone.py
@@ -27,6 +27,7 @@ def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
 
 class RepVGGBlock(nn.Module):
     """Basic Block of RepVGG"""
+
     def __init__(self,
                  in_channels,
                  out_channels,
@@ -197,6 +198,7 @@ def switch_to_deploy(self):
 
 class ConvBNAct(nn.Module):
     '''Normal Conv with SiLU activation'''
+
     def __init__(self,
                  in_channels,
                  out_channels,
@@ -358,7 +360,6 @@ def forward(self, x):
         return tuple(outputs)
 
 
-
 if __name__ == '__main__':
 
     from torchsummaryX import summary
@@ -386,4 +387,4 @@ def forward(self, x):
     model = model.cuda()
 
     a = torch.randn(1, 3, 640, 640).cuda()
-    summary(model, a)
\ No newline at end of file
+    summary(model, a)
diff --git a/easycv/models/detection/detectors/detr/detr_transformer.py b/easycv/models/detection/detectors/detr/detr_transformer.py
index 213ee8c0..f7a6dd8c 100644
--- a/easycv/models/detection/detectors/detr/detr_transformer.py
+++ b/easycv/models/detection/detectors/detr/detr_transformer.py
@@ -187,6 +187,7 @@ def forward(self, mask):
 
 
 class TransformerDecoder(nn.Module):
+
     def __init__(self,
                  decoder_layer,
                  num_layers,
diff --git a/easycv/models/detection/detectors/yolox/__init__.py b/easycv/models/detection/detectors/yolox/__init__.py
index 48fa177f..f2ce11ae 100644
--- a/easycv/models/detection/detectors/yolox/__init__.py
+++ b/easycv/models/detection/detectors/yolox/__init__.py
@@ -1,3 +1,3 @@
-from .yolox import YOLOX
 from .tood_head import TOODHead
-from .yolo_head import YOLOXHead
\ No newline at end of file
+from .yolo_head import YOLOXHead
+from .yolox import YOLOX
diff --git a/easycv/models/detection/detectors/yolox/asff.py b/easycv/models/detection/detectors/yolox/asff.py
index 2aa40680..d4c62c3c 100644
--- a/easycv/models/detection/detectors/yolox/asff.py
+++ b/easycv/models/detection/detectors/yolox/asff.py
@@ -2,12 +2,15 @@
 import torch
 import torch.nn as nn
 import torch.nn.functional as F
+
 from easycv.models.backbones.network_blocks import BaseConv
 
+
 class ASFF(nn.Module):
+
     def __init__(self,
                  level,
-                 type = 'ASFF',
+                 type='ASFF',
                  asff_channel=2,
                  expand_kernel=3,
                  multiplier=1,
@@ -58,11 +61,7 @@ def __init__(self,
 
         # add expand layer
         self.expand = Conv(
-            self.inter_dim,
-            self.inter_dim,
-            expand_kernel,
-            1,
-            act=act)
+            self.inter_dim, self.inter_dim, expand_kernel, 1, act=act)
 
         self.weight_level_0 = Conv(self.inter_dim, asff_channel, 1, 1, act=act)
         self.weight_level_1 = Conv(self.inter_dim, asff_channel, 1, 1, act=act)
@@ -87,14 +86,12 @@ def expand_channel(self, x):
         )
         return x
 
-
     def mean_channel(self, x):
         # [b,c,h,w]->[b,c/4,h*2,w*2]
         x1 = x[:, ::2, :, :]
         x2 = x[:, 1::2, :, :]
         return (x1 + x2) / 2
 
-
     def forward(self, x):  # l,m,s
         """
         #
@@ -111,7 +108,8 @@ def forward(self, x):  # l,m,s
                 level_1_resized = self.stride_level_1(x_level_1)
                 level_2_downsampled_inter = F.max_pool2d(
                     x_level_2, 3, stride=2, padding=1)
-                level_2_resized = self.stride_level_2(level_2_downsampled_inter)
+                level_2_resized = self.stride_level_2(
+                    level_2_downsampled_inter)
             elif self.level == 1:
                 level_0_compressed = self.compress_level_0(x_level_0)
                 level_0_resized = F.interpolate(
@@ -169,4 +167,3 @@ def forward(self, x):  # l,m,s
         out = self.expand(fused_out_reduced)
 
         return out
-
diff --git a/easycv/models/detection/detectors/yolox/postprocess.py b/easycv/models/detection/detectors/yolox/postprocess.py
index 27e9b76a..4d33c7ce 100644
--- a/easycv/models/detection/detectors/yolox/postprocess.py
+++ b/easycv/models/detection/detectors/yolox/postprocess.py
@@ -1,8 +1,10 @@
-from torch import nn
 import torch
+from torch import nn
+
 
 class TRT8_NMS(torch.autograd.Function):
     '''TensorRT NMS operation'''
+
     @staticmethod
     def forward(
         ctx,
@@ -12,15 +14,17 @@ def forward(
         box_coding=1,
         iou_threshold=0.45,
         max_output_boxes=100,
-        plugin_version="1",
+        plugin_version='1',
         score_activation=0,
         score_threshold=0.25,
     ):
         batch_size, num_boxes, num_classes = scores.shape
-        num_det = torch.randint(0, max_output_boxes, (batch_size, 1), dtype=torch.int32)
+        num_det = torch.randint(
+            0, max_output_boxes, (batch_size, 1), dtype=torch.int32)
         det_boxes = torch.randn(batch_size, max_output_boxes, 4)
         det_scores = torch.randn(batch_size, max_output_boxes)
-        det_classes = torch.randint(0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32)
+        det_classes = torch.randint(
+            0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32)
         return num_det, det_boxes, det_scores, det_classes
 
     @staticmethod
@@ -31,26 +35,34 @@ def symbolic(g,
                  box_coding=1,
                  iou_threshold=0.45,
                  max_output_boxes=100,
-                 plugin_version="1",
+                 plugin_version='1',
                  score_activation=0,
                  score_threshold=0.25):
-        out = g.op("TRT::EfficientNMS_TRT",
-                   boxes,
-                   scores,
-                   background_class_i=background_class,
-                   box_coding_i=box_coding,
-                   iou_threshold_f=iou_threshold,
-                   max_output_boxes_i=max_output_boxes,
-                   plugin_version_s=plugin_version,
-                   score_activation_i=score_activation,
-                   score_threshold_f=score_threshold,
-                   outputs=4)
+        out = g.op(
+            'TRT::EfficientNMS_TRT',
+            boxes,
+            scores,
+            background_class_i=background_class,
+            box_coding_i=box_coding,
+            iou_threshold_f=iou_threshold,
+            max_output_boxes_i=max_output_boxes,
+            plugin_version_s=plugin_version,
+            score_activation_i=score_activation,
+            score_threshold_f=score_threshold,
+            outputs=4)
         nums, boxes, scores, classes = out
         return nums, boxes, scores, classes
 
+
 class ONNX_TRT8(nn.Module):
     '''onnx module with TensorRT NMS operation.'''
-    def __init__(self, max_obj=100, iou_thres=0.45, score_thres=0.25, max_wh=None ,device=None):
+
+    def __init__(self,
+                 max_obj=100,
+                 iou_thres=0.45,
+                 score_thres=0.25,
+                 max_wh=None,
+                 device=None):
         super().__init__()
         assert max_wh is None
         self.device = device if device else torch.device('cpu')
@@ -67,22 +79,29 @@ def forward(self, x):
         conf = x[:, :, 4:5]
         score = x[:, :, 5:]
         score *= conf
-        num_det, det_boxes, det_scores, det_classes = TRT8_NMS.apply(box, score, self.background_class, self.box_coding,
-                                                                    self.iou_threshold, self.max_obj,
-                                                                    self.plugin_version, self.score_activation,
-                                                                    self.score_threshold)
+        num_det, det_boxes, det_scores, det_classes = TRT8_NMS.apply(
+            box, score, self.background_class, self.box_coding,
+            self.iou_threshold, self.max_obj, self.plugin_version,
+            self.score_activation, self.score_threshold)
         return num_det, det_boxes, det_scores, det_classes
 
-def create_tensorrt_postprocess(example_scores, iou_thres, score_thres):
+
+def create_tensorrt_postprocess(example_scores,
+                                iou_thres=0.45,
+                                score_thres=0.25):
     from torch_blade import tensorrt
     import torch_blade._torch_blade._backends as backends
     import io
 
-    model = torch.jit.trace(ONNX_TRT8(iou_thres=iou_thres, score_thres=score_thres), example_scores)
+    model = torch.jit.trace(
+        ONNX_TRT8(iou_thres=iou_thres, score_thres=score_thres),
+        example_scores)
     example_outputs = model(example_scores)
 
-    input_names=['input']
-    output_names=['num_det', 'det_boxes', 'det_example_scores', 'det_classes']
+    input_names = ['input']
+    output_names = [
+        'num_det', 'det_boxes', 'det_example_scores', 'det_classes'
+    ]
     with io.BytesIO() as onnx_proto_f:
         torch.onnx.export(
             model,
@@ -90,22 +109,22 @@ def create_tensorrt_postprocess(example_scores, iou_thres, score_thres):
             onnx_proto_f,
             input_names=input_names,
             output_names=output_names,
-            example_outputs=example_outputs
-        )
+            example_outputs=example_outputs)
         onnx_proto = onnx_proto_f.getvalue()
 
     def _copy_meta(data, name, dtype, sizes):
         data.name = name
         if dtype.is_floating_point:
-            data.dtype = "Float"
+            data.dtype = 'Float'
         else:
-            data.dtype = "Int"
+            data.dtype = 'Int'
         data.sizes = sizes
         return data
 
     state = backends.EngineState()
     state.inputs = [
-        _copy_meta(backends.TensorInfo(), name, tensor.dtype, list(tensor.shape))
+        _copy_meta(backends.TensorInfo(), name, tensor.dtype,
+                   list(tensor.shape))
         for name, tensor in zip(input_names, [example_scores])
     ]
     state.outputs = [
@@ -113,22 +132,26 @@ def _copy_meta(data, name, dtype, sizes):
         for name, tensor in zip(output_names, example_outputs)
     ]
     state = tensorrt.cvt_onnx_to_tensorrt(onnx_proto, state, [], dict())
+
     class Model(torch.nn.Module):
+
         def __init__(self, state):
             super().__init__()
             self._trt_engine_ext = backends.create_engine(state)
-    
+
         def forward(self, x):
             return self._trt_engine_ext.execute([x])
+
     trt_ext = torch.jit.script(Model(state))
     return trt_ext
 
 
-if __name__=="__main__":
+if __name__ == '__main__':
     bs = 32
     num_boxes = 100
     num_classes = 2
-    example_scores = torch.randn([bs, num_boxes, 4 + 1 + num_classes], dtype=torch.float32)
+    example_scores = torch.randn([bs, num_boxes, 4 + 1 + num_classes],
+                                 dtype=torch.float32)
     trt_ext = create_tensorrt_postprocess(example_scores)
     out = trt_ext.forward(example_scores)
-    print(out)
\ No newline at end of file
+    print(out)
diff --git a/easycv/models/detection/detectors/yolox/tood_head.py b/easycv/models/detection/detectors/yolox/tood_head.py
index f6277f7f..ee0a0da9 100644
--- a/easycv/models/detection/detectors/yolox/tood_head.py
+++ b/easycv/models/detection/detectors/yolox/tood_head.py
@@ -2,6 +2,7 @@
 import torch.nn as nn
 import torch.nn.functional as F
 from mmcv.cnn import ConvModule, normal_init
+
 from easycv.models.builder import HEADS
 from .yolo_head_template import YOLOXHead_Template
 
@@ -80,6 +81,7 @@ def forward(self, feat, avg_feat=None):
 
 @HEADS.register_module
 class TOODHead(YOLOXHead_Template):
+
     def __init__(
             self,
             num_classes,
@@ -119,8 +121,7 @@ def __init__(
             stage=stage,
             obj_loss_type=obj_loss_type,
             reg_loss_type=reg_loss_type,
-            decode_in_inference=decode_in_inference
-        )
+            decode_in_inference=decode_in_inference)
 
         self.stacked_convs = stacked_convs
         self.conv_cfg = conv_cfg
@@ -156,7 +157,6 @@ def __init__(
                     conv_cfg=conv_cfg,
                     norm_cfg=self.norm_cfg))
 
-
     def forward(self, xin, labels=None, imgs=None):
         outputs = []
         origin_preds = []
@@ -243,5 +243,3 @@ def forward(self, xin, labels=None, imgs=None):
                 return self.decode_outputs(outputs, dtype=xin[0].type())
             else:
                 return outputs
-
-
diff --git a/easycv/models/detection/detectors/yolox/yolo_head.py b/easycv/models/detection/detectors/yolox/yolo_head.py
index 1c9b125e..28de2cad 100644
--- a/easycv/models/detection/detectors/yolox/yolo_head.py
+++ b/easycv/models/detection/detectors/yolox/yolo_head.py
@@ -8,18 +8,20 @@
 
 @HEADS.register_module
 class YOLOXHead(YOLOXHead_Template):
-    def __init__(self,
-                 num_classes = 80,
-                 model_type='s',
-                 strides=[8, 16, 32],
-                 in_channels=[256, 512, 1024],
-                 act='silu',
-                 depthwise=False,
-                 stage='CLOUD',
-                 obj_loss_type='BCE',
-                 reg_loss_type='giou',
-                 decode_in_inference=True,
-        ):
+
+    def __init__(
+        self,
+        num_classes=80,
+        model_type='s',
+        strides=[8, 16, 32],
+        in_channels=[256, 512, 1024],
+        act='silu',
+        depthwise=False,
+        stage='CLOUD',
+        obj_loss_type='BCE',
+        reg_loss_type='giou',
+        decode_in_inference=True,
+    ):
         """
         Args:
             num_classes (int): detection class numbers.
@@ -32,19 +34,17 @@ def __init__(self,
             obj_loss_type (str): the loss function of the obj conf. Default value: l1.
             reg_loss_type (str): the loss function of the box prediction. Default value: l1.
         """
-        super(YOLOXHead,self).__init__(
-                num_classes = num_classes,
-                model_type=model_type,
-                strides=strides,
-                in_channels=in_channels,
-                act=act,
-                depthwise=depthwise,
-                stage=stage,
-                obj_loss_type=obj_loss_type,
-                reg_loss_type=reg_loss_type,
-                decode_in_inference=decode_in_inference
-        )
-
+        super(YOLOXHead, self).__init__(
+            num_classes=num_classes,
+            model_type=model_type,
+            strides=strides,
+            in_channels=in_channels,
+            act=act,
+            depthwise=depthwise,
+            stage=stage,
+            obj_loss_type=obj_loss_type,
+            reg_loss_type=reg_loss_type,
+            decode_in_inference=decode_in_inference)
 
     def forward(self, xin, labels=None, imgs=None):
         outputs = []
diff --git a/easycv/models/detection/detectors/yolox/yolo_head_template.py b/easycv/models/detection/detectors/yolox/yolo_head_template.py
index cc0c7b54..4cf77d43 100644
--- a/easycv/models/detection/detectors/yolox/yolo_head_template.py
+++ b/easycv/models/detection/detectors/yolox/yolo_head_template.py
@@ -1,16 +1,16 @@
 # Copyright (c) 2014-2021 Megvii Inc And Alibaba PAI-Teams. All rights reserved.
 import logging
 import math
+from abc import abstractmethod
 from distutils.version import LooseVersion
 
 import torch
 import torch.nn as nn
 import torch.nn.functional as F
-from abc import abstractmethod
 
 from easycv.models.backbones.network_blocks import BaseConv, DWConv
 from easycv.models.detection.utils import bboxes_iou
-from easycv.models.loss import IOUloss
+from easycv.models.loss import GIoULoss, IOUloss, IoULoss
 
 
 class YOLOXHead_Template(nn.Module):
@@ -24,7 +24,7 @@ class YOLOXHead_Template(nn.Module):
     }
 
     def __init__(self,
-                 num_classes = 80,
+                 num_classes=80,
                  model_type='s',
                  strides=[8, 16, 32],
                  in_channels=[256, 512, 1024],
@@ -33,8 +33,7 @@ def __init__(self,
                  stage='CLOUD',
                  obj_loss_type='BCE',
                  reg_loss_type='giou',
-                 decode_in_inference=True
-        ):
+                 decode_in_inference=True):
         """
         Args:
             num_classes (int): detection class numbers.
@@ -138,6 +137,10 @@ def __init__(self,
         self.l1_loss = nn.L1Loss(reduction='none')
 
         self.iou_loss = IOUloss(reduction='none', loss_type=reg_loss_type)
+        if reg_loss_type == 'iou':
+            self.iou_loss1 = IoULoss(reduction='none', mode='square')
+        elif reg_loss_type == 'giou':
+            self.iou_loss1 = GIoULoss(reduction='none')
 
         self.obj_loss_type = obj_loss_type
         if obj_loss_type == 'BCE':
@@ -342,6 +345,9 @@ def get_losses(
         loss_iou = (self.iou_loss(
             bbox_preds.view(-1, 4)[fg_masks], reg_targets)).sum() / num_fg
 
+        # loss_iou1 = (self.iou_loss1(
+        #     bbox_preds.view(-1, 4)[fg_masks], reg_targets,xyxy=False)).sum() / num_fg
+
         loss_obj = (self.obj_loss(obj_preds.view(-1, 1),
                                   obj_targets)).sum() / num_fg
 
@@ -367,16 +373,6 @@ def get_losses(
             num_fg / max(num_gts, 1),
         )
 
-    def focal_loss(self, pred, gt):
-        pos_inds = gt.eq(1).float()
-        neg_inds = gt.eq(0).float()
-        pos_loss = torch.log(pred + 1e-5) * torch.pow(1 - pred,
-                                                      2) * pos_inds * 0.75
-        neg_loss = torch.log(1 - pred + 1e-5) * torch.pow(pred,
-                                                          2) * neg_inds * 0.25
-        loss = -(pos_loss + neg_loss)
-        return loss
-
     def get_l1_target(self,
                       l1_target,
                       gt,
@@ -429,7 +425,8 @@ def get_assignments(
         )
         # reference to: https://github.com/Megvii-BaseDetection/YOLOX/pull/811
         # NOTE: Fix `selected index k out of range`
-        num_pos_anchors: int = fg_mask.sum().item()  # number of positive anchors
+        num_pos_anchors: int = fg_mask.sum().item(
+        )  # number of positive anchors
 
         if num_pos_anchors == 0:
             gt_matched_classes = torch.zeros(0, device=fg_mask.device).long()
diff --git a/easycv/models/detection/detectors/yolox/yolo_pafpn.py b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
index af9e1b7c..741a4922 100644
--- a/easycv/models/detection/detectors/yolox/yolo_pafpn.py
+++ b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
@@ -4,16 +4,15 @@
 
 import torch
 import torch.nn as nn
-from easycv.models.registry import BACKBONES
+
 from easycv.models.backbones.darknet import CSPDarknet
 from easycv.models.backbones.network_blocks import (BaseConv, CSPLayer, DWConv,
                                                     GSConv, VoVGSCSP)
 from easycv.models.backbones.repvgg_yolox_backbone import RepVGGYOLOX
+from easycv.models.registry import BACKBONES
 from .asff import ASFF
 
 
-
-
 @BACKBONES.register_module
 class YOLOPAFPN(nn.Module):
     """
@@ -246,7 +245,7 @@ def __init__(self,
                 expand_kernel=expand_kernel,
                 multiplier=width,
                 act=act,
-                )
+            )
             self.asff_2 = ASFF(
                 level=1,
                 type=self.use_att,
@@ -264,7 +263,6 @@ def __init__(self,
                 act=act,
             )
 
-
     def forward(self, input):
         """
         Args:
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index 0c12336e..fc2a231a 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -6,9 +6,9 @@
 import torch
 import torch.nn as nn
 from torch import Tensor
-from easycv.utils.config_tools import mmcv_config_fromfile
+
 from easycv.models.base import BaseModel
-from easycv.models.builder import (MODELS, build_model, build_backbone, build_head,
+from easycv.models.builder import (MODELS, build_backbone, build_head,
                                    build_neck)
 from easycv.models.detection.utils import postprocess
 
@@ -27,7 +27,14 @@ class YOLOX(BaseModel):
     The network returns loss values from three YOLO layers during training
     and detection results during test.
     """
-    def __init__(self, backbone, test_conf, nms_thre, head=None, neck=None, pretrained=True):
+
+    def __init__(self,
+                 backbone,
+                 test_conf,
+                 nms_thre,
+                 head=None,
+                 neck=None,
+                 pretrained=True):
         super(YOLOX, self).__init__()
 
         self.pretrained = pretrained
@@ -41,8 +48,6 @@ def __init__(self, backbone, test_conf, nms_thre, head=None, neck=None, pretrain
         self.test_conf = test_conf
         self.nms_thre = nms_thre
 
-
-
     def forward_train(self,
                       img: Tensor,
                       gt_bboxes: Tensor,
@@ -83,7 +88,6 @@ def forward_train(self,
                          device=loss.device).float()
         }
 
-
         return outputs
 
     def forward_test(self, img: Tensor, img_metas=None) -> Tensor:
@@ -152,17 +156,8 @@ def forward_export(self, img):
             fpn_outs = self.backbone(img)
             outputs = self.head(fpn_outs)
 
-            if self.decode_in_inference:
+            if self.head.decode_in_inference:
                 outputs = postprocess(outputs, self.num_classes,
                                       self.test_conf, self.nms_thre)
 
         return outputs
-
-if __name__=='__main__':
-    config_path = '/apsara/xinyi.zxy/code/pr154/configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
-    cfg = mmcv_config_fromfile(config_path)
-
-    print(cfg)
-
-    model = build_model(cfg.model)
-    print(model)
\ No newline at end of file
diff --git a/easycv/models/detection/detectors/yolox/yolox_bak.py b/easycv/models/detection/detectors/yolox/yolox_bak.py
deleted file mode 100644
index 5806fd8d..00000000
--- a/easycv/models/detection/detectors/yolox/yolox_bak.py
+++ /dev/null
@@ -1,239 +0,0 @@
-# Copyright (c) 2014-2021 Megvii Inc And Alibaba PAI-Teams. All rights reserved.
-import logging
-from typing import Dict
-
-import numpy as np
-import torch
-import torch.nn as nn
-from torch import Tensor
-
-
-from easycv.models.base import BaseModel
-from easycv.models.builder import MODELS
-from easycv.models.detection.utils import postprocess
-from .tood_head import TOODHead
-from .yolo_head import YOLOXHead
-from .yolo_pafpn import YOLOPAFPN
-
-
-def init_yolo(M):
-    for m in M.modules():
-        if isinstance(m, nn.BatchNorm2d):
-            m.eps = 1e-3
-            m.momentum = 0.03
-
-
-# @MODELS.register_module
-class YOLOX(BaseModel):
-    """
-    YOLOX model module. The module list is defined by create_yolov3_modules function.
-    The network returns loss values from three YOLO layers during training
-    and detection results during test.
-    """
-    param_map = {
-        'nano': [0.33, 0.25],
-        'tiny': [0.33, 0.375],
-        's': [0.33, 0.5],
-        'm': [0.67, 0.75],
-        'l': [1.0, 1.0],
-        'x': [1.33, 1.25]
-    }
-
-    # TODO configs support more params
-    # backbone(Darknet)、neck(YOLOXPAFPN)、head(YOLOXHead)
-    def __init__(self,
-                 model_type: str = 's',
-                 num_classes: int = 80,
-                 test_size: tuple = (640, 640),
-                 test_conf: float = 0.01,
-                 nms_thre: float = 0.65,
-                 use_att: str = None,
-                 obj_loss_type: str = 'l1',
-                 reg_loss_type: str = 'l1',
-                 head_type: str = 'yolox',
-                 neck: str = 'yolo',
-                 neck_mode: str = 'all',
-                 act: str = 'silu',
-                 asff_channel: int = 16,
-                 stacked_convs: int = 6,
-                 la_down_rate: int = 8,
-                 conv_layers: int = 2,
-                 decode_in_inference: bool = True,
-                 backbone='CSPDarknet',
-                 expand_kernel=3,
-                 pretrained: str = None):
-        super(YOLOX, self).__init__()
-        assert model_type in self.param_map, f'invalid model_type for yolox {model_type}, valid ones are {list(self.param_map.keys())}'
-
-        in_channels = [256, 512, 1024]
-        depth = self.param_map[model_type][0]
-        width = self.param_map[model_type][1]
-
-        self.backbone = YOLOPAFPN(
-            depth,
-            width,
-            in_channels=in_channels,
-            asff_channel=asff_channel,
-            act=act,
-            use_att=use_att,
-            backbone=backbone,
-            neck=neck,
-            neck_mode=neck_mode,
-            expand_kernel=expand_kernel)
-
-        self.head_type = head_type
-        if head_type == 'yolox':
-            self.head = YOLOXHead(
-                num_classes,
-                width,
-                in_channels=in_channels,
-                act=act,
-                obj_loss_type=obj_loss_type,
-                reg_loss_type=reg_loss_type)
-            self.head.initialize_biases(1e-2)
-        elif head_type == 'tood':
-            self.head = TOODHead(
-                num_classes,
-                width,
-                in_channels=in_channels,
-                act=act,
-                obj_loss_type=obj_loss_type,
-                reg_loss_type=reg_loss_type,
-                stacked_convs=stacked_convs,
-                la_down_rate=la_down_rate,
-                conv_layers=conv_layers,
-                decode_in_inference=decode_in_inference)
-            self.head.initialize_biases(1e-2)
-
-        self.decode_in_inference = decode_in_inference
-        # use decode, we will use post process as default
-        if not self.decode_in_inference:
-            logging.warning(
-                'YOLOX-PAI head decode_in_inference close for speed test, post process will be close at same time!'
-            )
-            self.ignore_postprocess = True
-            logging.warning('YOLOX-PAI ignore_postprocess set to be True')
-        else:
-            self.ignore_postprocess = False
-
-        self.apply(init_yolo)  # init_yolo(self)
-        self.num_classes = num_classes
-        self.test_conf = test_conf
-        self.nms_thre = nms_thre
-        self.test_size = test_size
-        self.epoch_counter = 0
-
-    def forward_train(self,
-                      img: Tensor,
-                      gt_bboxes: Tensor,
-                      gt_labels: Tensor,
-                      img_metas=None,
-                      scale=None) -> Dict[str, Tensor]:
-        """ Abstract interface for model forward in training
-
-        Args:
-            img (Tensor): image tensor, NxCxHxW
-            target (List[Tensor]): list of target tensor, NTx5 [class,x_c,y_c,w,h]
-        """
-
-        # gt_bboxes = gt_bboxes.to(torch.float16)
-        # gt_labels = gt_labels.to(torch.float16)
-
-        fpn_outs = self.backbone(img)
-
-        targets = torch.cat([gt_labels, gt_bboxes], dim=2)
-
-        loss, iou_loss, conf_loss, cls_loss, l1_loss, num_fg = self.head(
-            fpn_outs, targets, img)
-
-        outputs = {
-            'total_loss':
-            loss,
-            'iou_l':
-            iou_loss,
-            'conf_l':
-            conf_loss,
-            'cls_l':
-            cls_loss,
-            'img_h':
-            torch.tensor(img_metas[0]['img_shape'][0],
-                         device=loss.device).float(),
-            'img_w':
-            torch.tensor(img_metas[0]['img_shape'][1],
-                         device=loss.device).float()
-        }
-
-
-        return outputs
-
-    def forward_test(self, img: Tensor, img_metas=None) -> Tensor:
-        """ Abstract interface for model forward in training
-
-        Args:
-            img (Tensor): image tensor, NxCxHxW
-            target (List[Tensor]): list of target tensor, NTx5 [class,x_c,y_c,w,h]
-        """
-        with torch.no_grad():
-            fpn_outs = self.backbone(img)
-            outputs = self.head(fpn_outs)
-
-            outputs = postprocess(outputs, self.num_classes, self.test_conf,
-                                  self.nms_thre)
-
-            detection_boxes = []
-            detection_scores = []
-            detection_classes = []
-            img_metas_list = []
-
-            for i in range(len(outputs)):
-                if img_metas:
-                    img_metas_list.append(img_metas[i])
-                if outputs[i] is not None:
-                    bboxes = outputs[i][:,
-                                        0:4] if outputs[i] is not None else None
-                    if img_metas:
-                        bboxes /= img_metas[i]['scale_factor'][0]
-                    detection_boxes.append(bboxes.cpu().numpy())
-                    detection_scores.append(
-                        (outputs[i][:, 4] * outputs[i][:, 5]).cpu().numpy())
-                    detection_classes.append(
-                        outputs[i][:, 6].cpu().numpy().astype(np.int32))
-                else:
-                    detection_boxes.append(None)
-                    detection_scores.append(None)
-                    detection_classes.append(None)
-
-            test_outputs = {
-                'detection_boxes': detection_boxes,
-                'detection_scores': detection_scores,
-                'detection_classes': detection_classes,
-                'img_metas': img_metas_list
-            }
-
-        return test_outputs
-
-    def forward(self, img, mode='compression', **kwargs):
-        if mode == 'train':
-            return self.forward_train(img, **kwargs)
-        elif mode == 'test':
-            return self.forward_test(img, **kwargs)
-        elif mode == 'compression':
-            return self.forward_compression(img, **kwargs)
-
-    def forward_compression(self, x):
-        # fpn output content features of [dark3, dark4, dark5]
-        fpn_outs = self.backbone(x)
-        outputs = self.head(fpn_outs)
-
-        return outputs
-
-    def forward_export(self, img):
-        with torch.no_grad():
-            fpn_outs = self.backbone(img)
-            outputs = self.head(fpn_outs)
-
-            if self.decode_in_inference:
-                outputs = postprocess(outputs, self.num_classes,
-                                      self.test_conf, self.nms_thre)
-
-        return outputs
diff --git a/easycv/models/detection/utils/boxes.py b/easycv/models/detection/utils/boxes.py
index e0c0f12f..9fb9771e 100644
--- a/easycv/models/detection/utils/boxes.py
+++ b/easycv/models/detection/utils/boxes.py
@@ -39,7 +39,8 @@ def bboxes_iou(bboxes_a, bboxes_b, xyxy=True):
 # refer to easycv/models/detection/detectors/yolox/postprocess.py and test.py to rebuild a torch-blade-trtplugin NMS, which is checked by zhoulou in test.py
 # infer docker images is : registry.cn-shanghai.aliyuncs.com/pai-ai-test/eas-service:easycv_blade_181_export
 def trtplugin_efficientnms_postprocess():
-    return 
+    return
+
 
 def postprocess(prediction, num_classes, conf_thre=0.7, nms_thre=0.45):
     box_corner = prediction.new(prediction.shape)
@@ -155,7 +156,12 @@ def generalized_box_iou(boxes1, boxes2):
     return iou - (area - union) / area
 
 
-def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
+def bbox_overlaps(bboxes1,
+                  bboxes2,
+                  mode='iou',
+                  is_aligned=False,
+                  eps=1e-6,
+                  xyxy=True):
     """Calculate overlap between two set of bboxes.
 
     FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889
@@ -277,6 +283,10 @@ def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
     assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
     assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
 
+    if not xyxy:
+        bboxes1 = box_cxcywh_to_xyxy(bboxes1)
+        bboxes2 = box_cxcywh_to_xyxy(bboxes2)
+
     # Batch dim must be the same
     # Batch dim: (B1, B2, ... Bn)
     assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
diff --git a/easycv/models/detection/utils/misc.py b/easycv/models/detection/utils/misc.py
index 8cf3a006..409ff258 100644
--- a/easycv/models/detection/utils/misc.py
+++ b/easycv/models/detection/utils/misc.py
@@ -1,11 +1,12 @@
 # Copyright (c) OpenMMLab. All rights reserved.
 from typing import List, Optional
-from torch.autograd import Function
+
 import numpy as np
 import torch
 import torchvision
 from packaging import version
 from torch import Tensor
+from torch.autograd import Function
 
 if version.parse(torchvision.__version__) < version.parse('0.7'):
     from torchvision.ops import _new_empty_tensor
@@ -182,6 +183,7 @@ def inverse_sigmoid(x, eps=1e-3):
     x2 = (1 - x).clamp(min=eps)
     return torch.log(x1 / x2)
 
+
 class SigmoidGeometricMean(Function):
     """Forward and backward function of geometric mean of two sigmoid
     functions.
diff --git a/easycv/models/detection/utils/tensorrt_nms.py b/easycv/models/detection/utils/tensorrt_nms.py
new file mode 100644
index 00000000..e69de29b
diff --git a/easycv/models/loss/iou_loss.py b/easycv/models/loss/iou_loss.py
index 87a5062e..0206d8cf 100644
--- a/easycv/models/loss/iou_loss.py
+++ b/easycv/models/loss/iou_loss.py
@@ -14,7 +14,7 @@
 
 @mmcv.jit(derivate=True, coderize=True)
 @weighted_loss
-def iou_loss(pred, target, linear=False, mode='log', eps=1e-6):
+def iou_loss(pred, target, linear=False, mode='log', eps=1e-6, xyxy=True):
     """IoU loss.
 
     Computing the IoU loss between a set of predicted bboxes and target bboxes.
@@ -39,7 +39,8 @@ def iou_loss(pred, target, linear=False, mode='log', eps=1e-6):
         warnings.warn('DeprecationWarning: Setting "linear=True" in '
                       'iou_loss is deprecated, please use "mode=`linear`" '
                       'instead.')
-    ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
+    ious = bbox_overlaps(
+        pred, target, is_aligned=True, xyxy=xyxy).clamp(min=eps)
     if mode == 'linear':
         loss = 1 - ious
     elif mode == 'square':
@@ -53,7 +54,7 @@ def iou_loss(pred, target, linear=False, mode='log', eps=1e-6):
 
 @mmcv.jit(derivate=True, coderize=True)
 @weighted_loss
-def giou_loss(pred, target, eps=1e-7):
+def giou_loss(pred, target, eps=1e-7, xyxy=True):
     r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
     Box Regression <https://arxiv.org/abs/1902.09630>`_.
 
@@ -66,13 +67,15 @@ def giou_loss(pred, target, eps=1e-7):
     Return:
         Tensor: Loss tensor.
     """
-    gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)
+    gious = bbox_overlaps(
+        pred, target, mode='giou', is_aligned=True, eps=eps, xyxy=xyxy)
     loss = 1 - gious
     return loss
 
 
 @LOSSES.register_module
 class IOUloss(nn.Module):
+
     def __init__(self, reduction='none', loss_type='iou'):
         super(IOUloss, self).__init__()
         self.reduction = reduction
@@ -141,14 +144,10 @@ def forward(self, pred, target):
             loss = 1 - giou.clamp(min=-1.0, max=1.0)
 
         elif self.loss_type == 'diou':
-            c_tl = torch.min(
-                (pred[:, :2] - pred[:, 2:] / 2),
-                (target[:, :2] - target[:, 2:] / 2)
-            )
-            c_br = torch.max(
-                (pred[:, :2] + pred[:, 2:] / 2),
-                (target[:, :2] + target[:, 2:] / 2)
-            )
+            c_tl = torch.min((pred[:, :2] - pred[:, 2:] / 2),
+                             (target[:, :2] - target[:, 2:] / 2))
+            c_br = torch.max((pred[:, :2] + pred[:, 2:] / 2),
+                             (target[:, :2] + target[:, 2:] / 2))
             convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(
                 c_br[:, 1] - c_tl[:, 1], 2) + 1e-7  # convex diagonal squared
 
@@ -256,6 +255,7 @@ def forward(self,
                 weight=None,
                 avg_factor=None,
                 reduction_override=None,
+                xyxy=True,
                 **kwargs):
         """Forward function.
 
@@ -292,6 +292,7 @@ def forward(self,
             eps=self.eps,
             reduction=reduction,
             avg_factor=avg_factor,
+            xyxy=xyxy,
             **kwargs)
         return loss
 
@@ -299,146 +300,40 @@ def forward(self,
 @LOSSES.register_module()
 class GIoULoss(nn.Module):
 
-    def __init__(self,
-                 eps=1e-6,
-                 reduction='mean',
-                 loss_type='giou',
-                 loss_weight=1.0):
+    def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
         super(GIoULoss, self).__init__()
         self.eps = eps
         self.reduction = reduction
-        self.loss_type = loss_type
-
-    def forward(self, pred, target):
-        assert pred.shape[0] == target.shape[0]
-        if target.dtype != pred.dtype:
-            target = target.to(pred.dtype)
-        pred = pred.view(-1, 4)
-        target = target.view(-1, 4)
-        tl = torch.max((pred[:, :2] - pred[:, 2:] / 2),
-                       (target[:, :2] - target[:, 2:] / 2))
-        br = torch.min((pred[:, :2] + pred[:, 2:] / 2),
-                       (target[:, :2] + target[:, 2:] / 2))
-
-        area_p = torch.prod(pred[:, 2:], 1)
-        area_g = torch.prod(target[:, 2:], 1)
-
-        en = (tl < br).type(tl.type()).prod(dim=1)
-        area_i = torch.prod(br - tl, 1) * en
-        iou = (area_i) / (area_p + area_g - area_i + 1e-16)
-
-        if self.loss_type == 'iou':
-            loss = 1 - iou**2
-
-        elif self.loss_type == 'siou':
-            # angle cost
-            c_h = torch.max(pred[:, 1], target[:, 1]) - torch.min(
-                pred[:, 1], target[:, 1])
-            c_w = torch.max(pred[:, 0], target[:, 0]) - torch.min(
-                pred[:, 0], target[:, 0])
-            sigma = torch.sqrt(((pred[:, :2] - target[:, :2])**2).sum(dim=1))
-            # angle_cost = 1 - 2 * torch.pow(torch.sin(torch.arctan(c_h / c_w) - torch.tensor(math.pi / 4)),2)
-            angle_cost = 2 * (c_h * c_w) / (sigma**2)
-
-            # distance cost
-            gamma = 2 - angle_cost
-            # gamma = 1
-            c_dw = torch.max(pred[:, 0], target[:, 0]) - torch.min(
-                pred[:, 0], target[:, 0]) + (pred[:, 2] + target[:, 2]) / 2
-            c_dh = torch.max(pred[:, 1], target[:, 1]) - torch.min(
-                pred[:, 1], target[:, 1]) + (pred[:, 3] + target[:, 3]) / 2
-            p_x = ((target[:, 0] - pred[:, 0]) / c_dw)**2
-            p_y = ((target[:, 1] - pred[:, 1]) / c_dh)**2
-            dist_cost = 2 - torch.exp(-gamma * p_x) - torch.exp(-gamma * p_y)
-
-            # shape cost
-            theta = 4
-            w_w = torch.abs(pred[:, 2] - target[:, 2]) / torch.max(
-                pred[:, 2], target[:, 2])
-            w_h = torch.abs(pred[:, 3] - target[:, 3]) / torch.max(
-                pred[:, 3], target[:, 3])
-            shape_cost = torch.pow((1 - torch.exp(-w_w)), theta) + torch.pow(
-                (1 - torch.exp(-w_h)), theta)
-
-            loss = 1 - iou + (dist_cost + shape_cost) / 2
-
-        elif self.loss_type == 'giou':
-            c_tl = torch.min((pred[:, :2] - pred[:, 2:] / 2),
-                             (target[:, :2] - target[:, 2:] / 2))
-            c_br = torch.max((pred[:, :2] + pred[:, 2:] / 2),
-                             (target[:, :2] + target[:, 2:] / 2))
-            area_c = torch.prod(c_br - c_tl, 1)
-            giou = iou - (area_c - area_i) / area_c.clamp(1e-16)
-            loss = 1 - giou.clamp(min=-1.0, max=1.0)
-
-        elif self.loss_type == 'diou':
-            c_tl = torch.min(
-                (pred[:, :2] - pred[:, 2:] / 2),
-                (target[:, :2] - target[:, 2:] / 2)  # 包围框的左上点
-            )
-            c_br = torch.max(
-                (pred[:, :2] + pred[:, 2:] / 2),
-                (target[:, :2] + target[:, 2:] / 2)  # 包围框的右下点
-            )
-            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(
-                c_br[:, 1] - c_tl[:, 1], 2) + 1e-7  # convex diagonal squared
-
-            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) +
-                          torch.pow(pred[:, 1] - target[:, 1], 2)
-                          )  # center diagonal squared
-
-            diou = iou - (center_dis / convex_dis)
-            loss = 1 - diou.clamp(min=-1.0, max=1.0)
-
-        elif self.loss_type == 'ciou':
-            c_tl = torch.min((pred[:, :2] - pred[:, 2:] / 2),
-                             (target[:, :2] - target[:, 2:] / 2))
-            c_br = torch.max((pred[:, :2] + pred[:, 2:] / 2),
-                             (target[:, :2] + target[:, 2:] / 2))
-            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(
-                c_br[:, 1] - c_tl[:, 1], 2) + 1e-7  # convex diagonal squared
-
-            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) +
-                          torch.pow(pred[:, 1] - target[:, 1], 2)
-                          )  # center diagonal squared
-
-            v = (4 / math.pi**2) * torch.pow(
-                torch.atan(target[:, 2] / torch.clamp(target[:, 3], min=1e-7))
-                - torch.atan(pred[:, 2] / torch.clamp(pred[:, 3], min=1e-7)),
-                2)
-
-            with torch.no_grad():
-                alpha = v / ((1 + 1e-7) - iou + v)
-
-            ciou = iou - (center_dis / convex_dis + alpha * v)
-
-            loss = 1 - ciou.clamp(min=-1.0, max=1.0)
-
-        elif self.loss_type == 'eiou':
-
-            c_tl = torch.min((pred[:, :2] - pred[:, 2:] / 2),
-                             (target[:, :2] - target[:, 2:] / 2))
-            c_br = torch.max((pred[:, :2] + pred[:, 2:] / 2),
-                             (target[:, :2] + target[:, 2:] / 2))
-            convex_dis = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + torch.pow(
-                c_br[:, 1] - c_tl[:, 1], 2) + 1e-7  # convex diagonal squared
-
-            center_dis = (torch.pow(pred[:, 0] - target[:, 0], 2) +
-                          torch.pow(pred[:, 1] - target[:, 1], 2)
-                          )  # center diagonal squared
-
-            dis_w = torch.pow(pred[:, 2] - target[:, 2], 2)  # 两个框的w欧式距离
-            dis_h = torch.pow(pred[:, 3] - target[:, 3], 2)  # 两个框的h欧式距离
-
-            C_w = torch.pow(c_br[:, 0] - c_tl[:, 0], 2) + 1e-7  # 包围框的w平方
-            C_h = torch.pow(c_br[:, 1] - c_tl[:, 1], 2) + 1e-7  # 包围框的h平方
-
-            eiou = iou - (center_dis / convex_dis) - (dis_w / C_w) - (
-                dis_h / C_h)
-
-            loss = 1 - eiou.clamp(min=-1.0, max=1.0)
+        self.loss_weight = loss_weight
 
-        if self.reduction == 'mean':
-            loss = loss.mean()
-        elif self.reduction == 'sum':
-            loss = loss.sum()
+    def forward(self,
+                pred,
+                target,
+                weight=None,
+                avg_factor=None,
+                reduction_override=None,
+                xyxy=True,
+                **kwargs):
+        if weight is not None and not torch.any(weight > 0):
+            if pred.dim() == weight.dim() + 1:
+                weight = weight.unsqueeze(1)
+            return (pred * weight).sum()  # 0
+        assert reduction_override in (None, 'none', 'mean', 'sum')
+        reduction = (
+            reduction_override if reduction_override else self.reduction)
+        if weight is not None and weight.dim() > 1:
+            # TODO: remove this in the future
+            # reduce the weight of shape (n, 4) to (n,) to match the
+            # giou_loss of shape (n,)
+            assert weight.shape == pred.shape
+            weight = weight.mean(-1)
+        loss = self.loss_weight * giou_loss(
+            pred,
+            target,
+            weight,
+            eps=self.eps,
+            reduction=reduction,
+            avg_factor=avg_factor,
+            xyxy=xyxy,
+            **kwargs)
+        return loss
diff --git a/easycv/models/utils/ops.py b/easycv/models/utils/ops.py
index 03c5277b..05cc1aa4 100644
--- a/easycv/models/utils/ops.py
+++ b/easycv/models/utils/ops.py
@@ -1,10 +1,11 @@
 # Copyright (c) OpenMMLab. All rights reserved.
 # Adapt from: https://github.com/open-mmlab/mmpose/blob/master/mmpose/models/utils/ops.py
+import math
 import warnings
 
 import torch
 import torch.nn.functional as F
-import math
+
 
 def resize_tensor(input,
                   size=None,
@@ -47,6 +48,7 @@ def resize_tensor(input,
         size = tuple(int(x) for x in size)
     return F.interpolate(input, size, scale_factor, mode, align_corners)
 
+
 def make_divisible(x, divisor):
     # Upward revision the value x to make it evenly divisible by the divisor.
     return math.ceil(x / divisor) * divisor
diff --git a/easycv/toolkit/blade/cv_blade_utils.py b/easycv/toolkit/blade/cv_blade_utils.py
index 8191c646..80c33944 100644
--- a/easycv/toolkit/blade/cv_blade_utils.py
+++ b/easycv/toolkit/blade/cv_blade_utils.py
@@ -66,7 +66,8 @@ def blade_env_assert():
 
 
 @contextmanager
-def opt_trt_config(input_config=dict(enable_fp16=True)):
+def opt_trt_config(
+        input_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.05)):
     from torch_blade import tensorrt
     torch_config = torch_blade.Config()
 
@@ -239,9 +240,11 @@ def check_results(results0, results1):
 def blade_optimize(script_model,
                    model,
                    inputs,
-                   blade_config=dict(enable_fp16=True),
+                   blade_config=dict(
+                       enable_fp16=True, fp16_fallback_op_ratio=0.05),
                    backend='TensorRT',
                    batch=1,
+                   warm_up_time=10,
                    compute_cost=True,
                    static_opt=True):
 
@@ -289,24 +292,24 @@ def blade_optimize(script_model,
     print(opt_model.forward.graph)
     torch.cuda.empty_cache()
     # warm-up
-    for k in range(10):
+    for k in range(warm_up_time):
         test_result = opt_model(*inputs)
         torch.cuda.synchronize()
 
     torch.cuda.synchronize()
     cu_prof_start()
-    for k in range(10):
+    for k in range(warm_up_time):
         test_result = opt_model(*inputs)
         torch.cuda.synchronize()
     cu_prof_stop()
     import torch.autograd.profiler as profiler
     with profiler.profile(use_cuda=True) as prof:
-        for k in range(10):
+        for k in range(warm_up_time):
             test_result = opt_model(*inputs)
             torch.cuda.synchronize()
 
     with profiler.profile(use_cuda=True) as prof:
-        for k in range(10):
+        for k in range(warm_up_time):
             test_result = opt_model(*inputs)
             torch.cuda.synchronize()
 
diff --git a/easycv/utils/checkpoint.py b/easycv/utils/checkpoint.py
index a44fd1b6..4bf0af60 100644
--- a/easycv/utils/checkpoint.py
+++ b/easycv/utils/checkpoint.py
@@ -4,7 +4,7 @@
 import torch
 from mmcv.parallel import is_module_wrapper
 from mmcv.runner import load_checkpoint as mmcv_load_checkpoint
-from mmcv.runner.checkpoint import (get_state_dict,weights_to_cpu)
+from mmcv.runner.checkpoint import get_state_dict, weights_to_cpu
 from torch.optim import Optimizer
 
 from easycv.file import io
diff --git a/easycv/utils/mmlab_utils.py b/easycv/utils/mmlab_utils.py
index bff3040a..db7e94e6 100644
--- a/easycv/utils/mmlab_utils.py
+++ b/easycv/utils/mmlab_utils.py
@@ -14,7 +14,7 @@
 
 try:
     from mmcv.runner.hooks import HOOKS
-    HOOKS._module_dict.pop('YOLOXLrUpdaterHook', None)
+    # HOOKS._module_dict.pop('YOLOXLrUpdaterHook', None)
     from mmdet.models.builder import MODELS as MMMODELS
     from mmdet.models.builder import BACKBONES as MMBACKBONES
     from mmdet.models.builder import NECKS as MMNECKS
diff --git a/test.py b/test.py
index a8de94c3..d1d37a6c 100755
--- a/test.py
+++ b/test.py
@@ -1,22 +1,21 @@
 # from easycv.models.detection.detectors.yolox import YOLOX
-from easycv.models.detection.detectors.yolox.postprocess import create_tensorrt_postprocess
+import sys
+
+import numpy as np
 import torch
+from PIL import Image
 from torchvision.transforms import Compose
 
+from easycv.datasets.registry import PIPELINES
 from easycv.models import build_model
+from easycv.models.detection.detectors.yolox.postprocess import \
+    create_tensorrt_postprocess
+from easycv.models.detection.utils import postprocess
 from easycv.utils.checkpoint import load_checkpoint
 from easycv.utils.config_tools import mmcv_config_fromfile
 from easycv.utils.registry import build_from_cfg
-from easycv.datasets.registry import PIPELINES
-from easycv.models.detection.utils import postprocess
-
 
-
-import sys
-import numpy as np
-from PIL import Image
-
-if __name__=='__main__':
+if __name__ == '__main__':
     #a = YOLOX(decode_in_inference=False).eval()
     cfg = sys.argv[1]
     ckpt_path = sys.argv[2]
@@ -32,9 +31,10 @@
     pipeline = [build_from_cfg(p, PIPELINES) for p in test_pipeline]
     pipeline = Compose(pipeline)
 
-    # 8400 ishard code, need to reimplement to  sum(img_w / stride_i + img_h /stride_i) 
+    # 8400 ishard code, need to reimplement to  sum(img_w / stride_i + img_h /stride_i)
     example_scores = torch.randn([1, 8400, 85], dtype=torch.float32)
-    trt_ext = create_tensorrt_postprocess(example_scores, iou_thres=model.nms_thre, score_thres=model.test_conf)
+    trt_ext = create_tensorrt_postprocess(
+        example_scores, iou_thres=model.nms_thre, score_thres=model.test_conf)
 
     # img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000129062.jpg'
     img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000254016.jpg'
@@ -52,9 +52,10 @@
     model.decode_in_inference = False
     # print(type(model), model.decode_in_inference)
     c = model.forward_export(img)
-    
+
     # print(type(c), c.shape)
-    print(model.test_conf, model.nms_thre, model.num_classes, model.decode_in_inference)
+    print(model.test_conf, model.nms_thre, model.num_classes,
+          model.decode_in_inference)
     tc = model.head.decode_outputs(c, c[0].type())
     # print(type(tc))
     # print(tc.shape)
@@ -62,25 +63,26 @@
     import copy
     tcback = copy.deepcopy(tc)
 
-    tpa = postprocess(tc, model.num_classes, model.test_conf, model.nms_thre)[0]
+    tpa = postprocess(tc, model.num_classes, model.test_conf,
+                      model.nms_thre)[0]
     # print(tpa)
     tpa[:, 4] = tpa[:, 4] * tpa[:, 5]
     tpa[:, 5] = tpa[:, 6]
     tpa = tpa[:, :6]
     # print("fuck tpa:", len(tpa), tpa[0].shape)
-    box_a = tpa[:,:4]
-    score_a = tpa[:,4]
-    id_a = tpa[:,5]
+    box_a = tpa[:, :4]
+    score_a = tpa[:, 4]
+    id_a = tpa[:, 5]
     # print(tpa)
 
-    # trt_ext must be cuda 
+    # trt_ext must be cuda
     tcback = tcback
     tpb = trt_ext.forward(tcback)
     # print("fuck tpb:",len(tpb))
-     
+
     valid_length = min(len(tpa), tpb[2].shape[1])
     print(valid_length)
-    valid_length = min(valid_length,30)
+    valid_length = min(valid_length, 30)
 
     box_a = box_a[:valid_length]
     score_a = score_a[:valid_length]
@@ -90,16 +92,16 @@
     print(tpb[2].shape)
     print(tpb[3].shape)
 
-    box_b = tpb[1][:,:valid_length,:].cpu().view(box_a.shape)
-    score_b = tpb[2][:,:valid_length].cpu().view(score_a.shape)
-    id_b = tpb[3][:,:valid_length].cpu().view(id_a.shape)
-    
+    box_b = tpb[1][:, :valid_length, :].cpu().view(box_a.shape)
+    score_b = tpb[2][:, :valid_length].cpu().view(score_a.shape)
+    id_b = tpb[3][:, :valid_length].cpu().view(id_a.shape)
+
     def get_diff(input_a, input_b, name='score'):
-        print("name:", name)
-        print("shape:", input_a.shape)
-        print("max_diff  :",torch.max(input_a-input_b))
-        print("avg_diff  :",torch.mean(input_a-input_b))
-        print("totol_diff:",torch.sum(input_a-input_b))
+        print('name:', name)
+        print('shape:', input_a.shape)
+        print('max_diff  :', torch.max(input_a - input_b))
+        print('avg_diff  :', torch.mean(input_a - input_b))
+        print('totol_diff:', torch.sum(input_a - input_b))
 
     get_diff(box_a, box_b, 'box')
     get_diff(score_a, score_b, 'score')
@@ -110,4 +112,4 @@ def get_diff(input_a, input_b, name='score'):
         img = Image.open(img_path)
         pred = TorchYoloXPredictor('models/predict.pt')
         m = pred.predict([img])
-        print(m)
\ No newline at end of file
+        print(m)

From 10f549e4f8e2561627c731511482b109743c0c5a Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Mon, 15 Aug 2022 14:46:36 +0800
Subject: [PATCH 39/69] fix cr problem

---
 .../yolox_s_8xb16_300e_coco_asff_tood3.py     |   2 +-
 easycv/apis/export.py                         |  18 +--
 .../models/detection/detectors/yolox/test.py  | 115 ++++++++++++++++++
 .../detectors/yolox/yolo_head_template.py     |   8 +-
 easycv/models/loss/__init__.py                |   2 +-
 easycv/models/loss/iou_loss.py                |   7 +-
 6 files changed, 131 insertions(+), 21 deletions(-)
 create mode 100644 easycv/models/detection/detectors/yolox/test.py

diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
index 09b48005..ecd63a54 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
@@ -195,7 +195,7 @@
 
 export = dict(use_jit=True,
               export_blade=True,  # ????blade
-              end2end=False,      # ??????????nms???jit + blade
+              end2end=True,      # ??????????nms???jit + blade
               batch_size=32,       # static_opt=True???????batch_size
               blade_config=dict(
                     dict(enable_fp16=True,
diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index b7f828b6..435e54d0 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -23,7 +23,7 @@
 
 
 def reparameterize_models(model):
-    """ reparameterize model for inference, especially for
+    """ reparameterize model for inference, especially forf
             1. rep conv block : merge 3x3 weight 1x1 weights
         call module switch_to_deploy recursively
     Args:
@@ -230,10 +230,8 @@ def _export_yolox(model, cfg, filename):
             assert blade_env_assert()
 
             if end2end:
-                if batch_size == 1:
-                    input = 255 * torch.rand(img_scale + (3, ))
-                else:
-                    input = 255 * torch.rand(img_scale + (3, batch_size))
+                # input [b,h,w,c]
+                input = 255 * torch.rand((batch_size,)+img_scale + (3,))
 
             yolox_blade = blade_optimize(
                 script_model=model,
@@ -511,15 +509,16 @@ def __call__(
         ) -> Tuple[torch.Tensor, Dict[str, Tuple[float, float]]]:
             """
             Args:
-                image (torch.Tensor): image format should be [H, W, C]
+                image (torch.Tensor): image format should be [b, H, W, C]
             """
             input_h, input_w = self.target_size
-            image = image.permute(2, 0, 1)
+            print('img.shape', image.shape)
+            image = image.permute(0, 3, 1, 2)
 
             # rgb2bgr
-            image = image[torch.tensor([2, 1, 0]), :, :]
+            image = image[:,torch.tensor([2, 1, 0]), :, :]
 
-            image = torch.unsqueeze(image, 0)
+            # image = torch.unsqueeze(image, 0)
             ori_h, ori_w = image.shape[-2:]
 
             mean = [123.675, 116.28, 103.53]
@@ -690,6 +689,7 @@ def forward(self, image):
 
         with torch.no_grad():
             if self.preprocess_fn is not None:
+                print('before', image.shape)
                 output = self.preprocess_fn(image)
                 # if multi values ​​are returned, the first one must be image, others ​​are optional,
                 # and others will all be passed into postprocess_fn
diff --git a/easycv/models/detection/detectors/yolox/test.py b/easycv/models/detection/detectors/yolox/test.py
new file mode 100644
index 00000000..3c97f3c6
--- /dev/null
+++ b/easycv/models/detection/detectors/yolox/test.py
@@ -0,0 +1,115 @@
+# from easycv.models.detection.detectors.yolox import YOLOX
+from easycv.models.detection.detectors.yolox.postprocess import create_tensorrt_postprocess
+import torch
+from torchvision.transforms import Compose
+
+from easycv.models import build_model
+from easycv.utils.checkpoint import load_checkpoint
+from easycv.utils.config_tools import mmcv_config_fromfile
+from easycv.utils.registry import build_from_cfg
+from easycv.datasets.registry import PIPELINES
+from easycv.models.detection.utils import postprocess
+
+import sys
+import numpy as np
+from PIL import Image
+
+if __name__ == '__main__':
+    # a = YOLOX(decode_in_inference=False).eval()
+    cfg = sys.argv[1]
+    ckpt_path = sys.argv[2]
+
+    cfg = mmcv_config_fromfile(cfg)
+    model = build_model(cfg.model)
+    load_checkpoint(model, ckpt_path, map_location='cpu')
+    model = model.eval()
+
+    test_pipeline = cfg.test_pipeline
+    CLASSES = cfg.CLASSES
+
+    pipeline = [build_from_cfg(p, PIPELINES) for p in test_pipeline]
+    pipeline = Compose(pipeline)
+
+    # 8400 ishard code, need to reimplement to  sum(img_w / stride_i + img_h /stride_i)
+    example_scores = torch.randn([1, 8400, 85], dtype=torch.float32)
+    trt_ext = create_tensorrt_postprocess(example_scores, iou_thres=model.nms_thre, score_thres=model.test_conf)
+
+    # img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000129062.jpg'
+    img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000254016.jpg'
+    # img = cv2.imread(img_path)
+    img = Image.open(img_path)
+    if type(img) is not np.ndarray:
+        img = np.asarray(img)
+
+    # ori_img_shape = img.shape[:2]
+    data_dict = {'img': img}
+    data_dict = pipeline(data_dict)
+    img = data_dict['img']
+    img = torch.unsqueeze(img._data, 0)
+    # print(img.shape)
+    model.decode_in_inference = False
+    # print(type(model), model.decode_in_inference)
+    c = model.forward_export(img)
+
+    # print(type(c), c.shape)
+    print(model.test_conf, model.nms_thre, model.num_classes, model.decode_in_inference)
+    tc = model.head.decode_outputs(c, c[0].type())
+    # print(type(tc))
+    # print(tc.shape)
+
+    import copy
+
+    tcback = copy.deepcopy(tc)
+
+    tpa = postprocess(tc, model.num_classes, model.test_conf, model.nms_thre)[0]
+    # print(tpa)
+    tpa[:, 4] = tpa[:, 4] * tpa[:, 5]
+    tpa[:, 5] = tpa[:, 6]
+    tpa = tpa[:, :6]
+    # print("fuck tpa:", len(tpa), tpa[0].shape)
+    box_a = tpa[:, :4]
+    score_a = tpa[:, 4]
+    id_a = tpa[:, 5]
+    # print(tpa)
+
+    # trt_ext must be cuda
+    tcback = tcback
+    tpb = trt_ext.forward(tcback)
+    # print("fuck tpb:",len(tpb))
+
+    valid_length = min(len(tpa), tpb[2].shape[1])
+    print(valid_length)
+    valid_length = min(valid_length, 30)
+
+    box_a = box_a[:valid_length]
+    score_a = score_a[:valid_length]
+    id_a = id_a[:valid_length]
+
+    print(tpb[1].shape)
+    print(tpb[2].shape)
+    print(tpb[3].shape)
+
+    box_b = tpb[1][:, :valid_length, :].cpu().view(box_a.shape)
+    score_b = tpb[2][:, :valid_length].cpu().view(score_a.shape)
+    id_b = tpb[3][:, :valid_length].cpu().view(id_a.shape)
+
+
+    def get_diff(input_a, input_b, name='score'):
+        print("name:", name)
+        print("shape:", input_a.shape)
+        print("max_diff  :", torch.max(input_a - input_b))
+        print("avg_diff  :", torch.mean(input_a - input_b))
+        print("totol_diff:", torch.sum(input_a - input_b))
+
+
+    get_diff(box_a, box_b, 'box')
+    get_diff(score_a, score_b, 'score')
+    get_diff(id_a, id_a, 'id')
+
+    if 0:
+        from easycv.predictors import TorchYoloXPredictor
+
+        img = Image.open(img_path)
+        pred = TorchYoloXPredictor('models/predict.pt')
+        m = pred.predict([img])
+        print(m)
\ No newline at end of file
diff --git a/easycv/models/detection/detectors/yolox/yolo_head_template.py b/easycv/models/detection/detectors/yolox/yolo_head_template.py
index 4cf77d43..c19d09cc 100644
--- a/easycv/models/detection/detectors/yolox/yolo_head_template.py
+++ b/easycv/models/detection/detectors/yolox/yolo_head_template.py
@@ -10,7 +10,7 @@
 
 from easycv.models.backbones.network_blocks import BaseConv, DWConv
 from easycv.models.detection.utils import bboxes_iou
-from easycv.models.loss import GIoULoss, IOUloss, IoULoss
+from easycv.models.loss import YOLOX_IOULoss
 
 
 class YOLOXHead_Template(nn.Module):
@@ -136,11 +136,7 @@ def __init__(self,
         self.use_l1 = False
         self.l1_loss = nn.L1Loss(reduction='none')
 
-        self.iou_loss = IOUloss(reduction='none', loss_type=reg_loss_type)
-        if reg_loss_type == 'iou':
-            self.iou_loss1 = IoULoss(reduction='none', mode='square')
-        elif reg_loss_type == 'giou':
-            self.iou_loss1 = GIoULoss(reduction='none')
+        self.iou_loss = YOLOX_IOULoss(reduction='none', loss_type=reg_loss_type)
 
         self.obj_loss_type = obj_loss_type
         if obj_loss_type == 'BCE':
diff --git a/easycv/models/loss/__init__.py b/easycv/models/loss/__init__.py
index 3d991a8c..c6cec7f6 100644
--- a/easycv/models/loss/__init__.py
+++ b/easycv/models/loss/__init__.py
@@ -1,6 +1,6 @@
 # Copyright (c) Alibaba, Inc. and its affiliates.
 from .cross_entropy_loss import CrossEntropyLoss
 from .focal_loss import FocalLoss, VarifocalLoss
-from .iou_loss import GIoULoss, IoULoss, IOUloss
+from .iou_loss import GIoULoss, IoULoss, YOLOX_IOULoss
 from .mse_loss import JointsMSELoss
 from .pytorch_metric_learning import *
diff --git a/easycv/models/loss/iou_loss.py b/easycv/models/loss/iou_loss.py
index 0206d8cf..eb1af973 100644
--- a/easycv/models/loss/iou_loss.py
+++ b/easycv/models/loss/iou_loss.py
@@ -74,10 +74,9 @@ def giou_loss(pred, target, eps=1e-7, xyxy=True):
 
 
 @LOSSES.register_module
-class IOUloss(nn.Module):
-
+class YOLOX_IOULoss(nn.Module):
     def __init__(self, reduction='none', loss_type='iou'):
-        super(IOUloss, self).__init__()
+        super(YOLOX_IOULoss, self).__init__()
         self.reduction = reduction
         self.loss_type = loss_type
 
@@ -109,7 +108,7 @@ def forward(self, pred, target):
             c_w = torch.max(pred[:, 0], target[:, 0]) - torch.min(
                 pred[:, 0], target[:, 0])
             sigma = torch.sqrt(((pred[:, :2] - target[:, :2])**2).sum(dim=1))
-            # angle_cost = 1 - 2 * torch.pow(torch.sin(torch.arctan(c_h / c_w) - torch.tensor(math.pi / 4)),2)
+
             angle_cost = 2 * (c_h * c_w) / (sigma**2)
 
             # distance cost

From 1eb0a4b56adf19da06511465562accb0612b7a64 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Mon, 15 Aug 2022 14:48:42 +0800
Subject: [PATCH 40/69] fix cr problem

---
 easycv/models/detection/detectors/yolox/postprocess.py | 2 ++
 easycv/models/detection/detectors/yolox/test.py        | 2 ++
 2 files changed, 4 insertions(+)

diff --git a/easycv/models/detection/detectors/yolox/postprocess.py b/easycv/models/detection/detectors/yolox/postprocess.py
index 4d33c7ce..5464d515 100644
--- a/easycv/models/detection/detectors/yolox/postprocess.py
+++ b/easycv/models/detection/detectors/yolox/postprocess.py
@@ -1,3 +1,5 @@
+# !!!ignore it for cr, we are still work for tensorrt nms problem
+
 import torch
 from torch import nn
 
diff --git a/easycv/models/detection/detectors/yolox/test.py b/easycv/models/detection/detectors/yolox/test.py
index 3c97f3c6..4f66cab8 100644
--- a/easycv/models/detection/detectors/yolox/test.py
+++ b/easycv/models/detection/detectors/yolox/test.py
@@ -1,3 +1,5 @@
+# !!!ignore it for cr, we are still work for tensorrt nms problem
+
 # from easycv.models.detection.detectors.yolox import YOLOX
 from easycv.models.detection.detectors.yolox.postprocess import create_tensorrt_postprocess
 import torch

From 08bbba8177329f6f4a4925ec599f138054cdd9a6 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Mon, 15 Aug 2022 15:53:57 +0800
Subject: [PATCH 41/69] fix ut problem

---
 .../yolox/yolox_s_8xb16_300e_coco.py          |  10 --
 .../yolox_s_8xb16_300e_coco_asff_tood3.py     |  10 +-
 easycv/apis/export.py                         |   4 +-
 .../models/detection/detectors/yolox/test.py  |  40 +++---
 .../detectors/yolox/yolo_head_template.py     |   3 +-
 .../models/detection/detectors/yolox/yolox.py |   2 +-
 .../detectors/yolox_edge/yolox_edge.py        |  54 +++-----
 easycv/models/loss/iou_loss.py                |   1 +
 easycv/utils/mmlab_utils.py                   |   2 +-
 test.py                                       | 115 ------------------
 tests/models/detection/yolox/test_yolox.py    |   9 +-
 .../detection/yolox_edge/test_yolox_edge.py   |   9 +-
 12 files changed, 59 insertions(+), 200 deletions(-)
 delete mode 100755 test.py

diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index c83c2910..266763ae 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -193,13 +193,3 @@
     ])
 
 export = dict(use_jit=False, export_blade=False, end2end=False)
-
-export = dict(use_jit=True,
-              export_blade=True,  # ????blade
-              end2end=False,      # ??????????nms???jit + blade
-              batch_size=32,       # static_opt=True???????batch_size
-              blade_config=dict(
-                    dict(enable_fp16=True,
-                    fp16_fallback_op_ratio=0.05)
-              ),   # fp16 fallback?fp32 ?layer ??
-              static_opt=True)    # ????static shape ?????True
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
index ecd63a54..9f36a085 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
@@ -193,12 +193,4 @@
         # dict(type='WandbLoggerHookV2'),
     ])
 
-export = dict(use_jit=True,
-              export_blade=True,  # ????blade
-              end2end=True,      # ??????????nms???jit + blade
-              batch_size=32,       # static_opt=True???????batch_size
-              blade_config=dict(
-                    dict(enable_fp16=True,
-                    fp16_fallback_op_ratio=0.05)
-              ),   # fp16 fallback?fp32 ?layer ??
-              static_opt=True)    # ????static shape ?????True
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index 435e54d0..49cce56d 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -231,7 +231,7 @@ def _export_yolox(model, cfg, filename):
 
             if end2end:
                 # input [b,h,w,c]
-                input = 255 * torch.rand((batch_size,)+img_scale + (3,))
+                input = 255 * torch.rand((batch_size, ) + img_scale + (3, ))
 
             yolox_blade = blade_optimize(
                 script_model=model,
@@ -516,7 +516,7 @@ def __call__(
             image = image.permute(0, 3, 1, 2)
 
             # rgb2bgr
-            image = image[:,torch.tensor([2, 1, 0]), :, :]
+            image = image[:, torch.tensor([2, 1, 0]), :, :]
 
             # image = torch.unsqueeze(image, 0)
             ori_h, ori_w = image.shape[-2:]
diff --git a/easycv/models/detection/detectors/yolox/test.py b/easycv/models/detection/detectors/yolox/test.py
index 4f66cab8..882cb187 100644
--- a/easycv/models/detection/detectors/yolox/test.py
+++ b/easycv/models/detection/detectors/yolox/test.py
@@ -1,20 +1,21 @@
 # !!!ignore it for cr, we are still work for tensorrt nms problem
 
-# from easycv.models.detection.detectors.yolox import YOLOX
-from easycv.models.detection.detectors.yolox.postprocess import create_tensorrt_postprocess
+import sys
+
+import numpy as np
 import torch
+from PIL import Image
 from torchvision.transforms import Compose
 
+from easycv.datasets.registry import PIPELINES
 from easycv.models import build_model
+# from easycv.models.detection.detectors.yolox import YOLOX
+from easycv.models.detection.detectors.yolox.postprocess import \
+    create_tensorrt_postprocess
+from easycv.models.detection.utils import postprocess
 from easycv.utils.checkpoint import load_checkpoint
 from easycv.utils.config_tools import mmcv_config_fromfile
 from easycv.utils.registry import build_from_cfg
-from easycv.datasets.registry import PIPELINES
-from easycv.models.detection.utils import postprocess
-
-import sys
-import numpy as np
-from PIL import Image
 
 if __name__ == '__main__':
     # a = YOLOX(decode_in_inference=False).eval()
@@ -34,7 +35,8 @@
 
     # 8400 ishard code, need to reimplement to  sum(img_w / stride_i + img_h /stride_i)
     example_scores = torch.randn([1, 8400, 85], dtype=torch.float32)
-    trt_ext = create_tensorrt_postprocess(example_scores, iou_thres=model.nms_thre, score_thres=model.test_conf)
+    trt_ext = create_tensorrt_postprocess(
+        example_scores, iou_thres=model.nms_thre, score_thres=model.test_conf)
 
     # img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000129062.jpg'
     img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000254016.jpg'
@@ -54,7 +56,8 @@
     c = model.forward_export(img)
 
     # print(type(c), c.shape)
-    print(model.test_conf, model.nms_thre, model.num_classes, model.decode_in_inference)
+    print(model.test_conf, model.nms_thre, model.num_classes,
+          model.decode_in_inference)
     tc = model.head.decode_outputs(c, c[0].type())
     # print(type(tc))
     # print(tc.shape)
@@ -63,7 +66,8 @@
 
     tcback = copy.deepcopy(tc)
 
-    tpa = postprocess(tc, model.num_classes, model.test_conf, model.nms_thre)[0]
+    tpa = postprocess(tc, model.num_classes, model.test_conf,
+                      model.nms_thre)[0]
     # print(tpa)
     tpa[:, 4] = tpa[:, 4] * tpa[:, 5]
     tpa[:, 5] = tpa[:, 6]
@@ -95,14 +99,12 @@
     score_b = tpb[2][:, :valid_length].cpu().view(score_a.shape)
     id_b = tpb[3][:, :valid_length].cpu().view(id_a.shape)
 
-
     def get_diff(input_a, input_b, name='score'):
-        print("name:", name)
-        print("shape:", input_a.shape)
-        print("max_diff  :", torch.max(input_a - input_b))
-        print("avg_diff  :", torch.mean(input_a - input_b))
-        print("totol_diff:", torch.sum(input_a - input_b))
-
+        print('name:', name)
+        print('shape:', input_a.shape)
+        print('max_diff  :', torch.max(input_a - input_b))
+        print('avg_diff  :', torch.mean(input_a - input_b))
+        print('totol_diff:', torch.sum(input_a - input_b))
 
     get_diff(box_a, box_b, 'box')
     get_diff(score_a, score_b, 'score')
@@ -114,4 +116,4 @@ def get_diff(input_a, input_b, name='score'):
         img = Image.open(img_path)
         pred = TorchYoloXPredictor('models/predict.pt')
         m = pred.predict([img])
-        print(m)
\ No newline at end of file
+        print(m)
diff --git a/easycv/models/detection/detectors/yolox/yolo_head_template.py b/easycv/models/detection/detectors/yolox/yolo_head_template.py
index c19d09cc..5aa09c14 100644
--- a/easycv/models/detection/detectors/yolox/yolo_head_template.py
+++ b/easycv/models/detection/detectors/yolox/yolo_head_template.py
@@ -136,7 +136,8 @@ def __init__(self,
         self.use_l1 = False
         self.l1_loss = nn.L1Loss(reduction='none')
 
-        self.iou_loss = YOLOX_IOULoss(reduction='none', loss_type=reg_loss_type)
+        self.iou_loss = YOLOX_IOULoss(
+            reduction='none', loss_type=reg_loss_type)
 
         self.obj_loss_type = obj_loss_type
         if obj_loss_type == 'BCE':
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index fc2a231a..02a0d15c 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -44,7 +44,7 @@ def __init__(self,
         self.head = build_head(head)
 
         self.apply(init_yolo)  # init_yolo(self)
-        self.num_classes = head.num_classes
+        self.num_classes = self.head.num_classes
         self.test_conf = test_conf
         self.nms_thre = nms_thre
 
diff --git a/easycv/models/detection/detectors/yolox_edge/yolox_edge.py b/easycv/models/detection/detectors/yolox_edge/yolox_edge.py
index 7fbf7a2b..cc94362c 100644
--- a/easycv/models/detection/detectors/yolox_edge/yolox_edge.py
+++ b/easycv/models/detection/detectors/yolox_edge/yolox_edge.py
@@ -1,7 +1,8 @@
 # Copyright (c) Alibaba, Inc. and its affiliates.
 import torch.nn as nn
 
-from easycv.models.builder import MODELS
+from easycv.models.builder import (MODELS, build_backbone, build_head,
+                                   build_neck)
 from easycv.models.detection.detectors.yolox.yolo_head import YOLOXHead
 from easycv.models.detection.detectors.yolox.yolo_pafpn import YOLOPAFPN
 from easycv.models.detection.detectors.yolox.yolox import YOLOX
@@ -23,44 +24,21 @@ class YOLOX_EDGE(YOLOX):
     """
 
     def __init__(self,
-                 stage: str = 'EDGE',
-                 model_type: str = 's',
-                 num_classes: int = 80,
-                 test_size: tuple = (640, 640),
-                 test_conf: float = 0.01,
-                 nms_thre: float = 0.65,
-                 pretrained: str = None,
-                 depth: float = 1.0,
-                 width: float = 1.0,
-                 max_model_params: float = 0.0,
-                 max_model_flops: float = 0.0,
-                 activation: str = 'silu',
-                 in_channels: list = [256, 512, 1024],
-                 backbone=None,
-                 head=None):
-        super(YOLOX_EDGE, self).__init__()
-
-        if backbone is None:
-            self.backbone = YOLOPAFPN(
-                depth,
-                width,
-                in_channels=in_channels,
-                depthwise=True,
-                act=activation)
-        if head is None:
-            self.head = YOLOXHead(
-                num_classes,
-                width,
-                in_channels=in_channels,
-                depthwise=True,
-                act=activation,
-                stage=stage)
+                 backbone,
+                 test_conf,
+                 nms_thre,
+                 head=None,
+                 neck=None,
+                 pretrained=True):
+        super(YOLOX, self).__init__()
+
+        self.pretrained = pretrained
+        self.backbone = build_backbone(backbone)
+        if neck is not None:
+            self.neck = build_neck(neck)
+        self.head = build_head(head)
 
         self.apply(init_yolo)  # init_yolo(self)
-        self.head.initialize_biases(1e-2)
-
-        self.stage = stage
-        self.num_classes = num_classes
+        self.num_classes = self.head.num_classes
         self.test_conf = test_conf
         self.nms_thre = nms_thre
-        self.test_size = test_size
diff --git a/easycv/models/loss/iou_loss.py b/easycv/models/loss/iou_loss.py
index eb1af973..0cf1ef18 100644
--- a/easycv/models/loss/iou_loss.py
+++ b/easycv/models/loss/iou_loss.py
@@ -75,6 +75,7 @@ def giou_loss(pred, target, eps=1e-7, xyxy=True):
 
 @LOSSES.register_module
 class YOLOX_IOULoss(nn.Module):
+
     def __init__(self, reduction='none', loss_type='iou'):
         super(YOLOX_IOULoss, self).__init__()
         self.reduction = reduction
diff --git a/easycv/utils/mmlab_utils.py b/easycv/utils/mmlab_utils.py
index db7e94e6..bff3040a 100644
--- a/easycv/utils/mmlab_utils.py
+++ b/easycv/utils/mmlab_utils.py
@@ -14,7 +14,7 @@
 
 try:
     from mmcv.runner.hooks import HOOKS
-    # HOOKS._module_dict.pop('YOLOXLrUpdaterHook', None)
+    HOOKS._module_dict.pop('YOLOXLrUpdaterHook', None)
     from mmdet.models.builder import MODELS as MMMODELS
     from mmdet.models.builder import BACKBONES as MMBACKBONES
     from mmdet.models.builder import NECKS as MMNECKS
diff --git a/test.py b/test.py
deleted file mode 100755
index d1d37a6c..00000000
--- a/test.py
+++ /dev/null
@@ -1,115 +0,0 @@
-# from easycv.models.detection.detectors.yolox import YOLOX
-import sys
-
-import numpy as np
-import torch
-from PIL import Image
-from torchvision.transforms import Compose
-
-from easycv.datasets.registry import PIPELINES
-from easycv.models import build_model
-from easycv.models.detection.detectors.yolox.postprocess import \
-    create_tensorrt_postprocess
-from easycv.models.detection.utils import postprocess
-from easycv.utils.checkpoint import load_checkpoint
-from easycv.utils.config_tools import mmcv_config_fromfile
-from easycv.utils.registry import build_from_cfg
-
-if __name__ == '__main__':
-    #a = YOLOX(decode_in_inference=False).eval()
-    cfg = sys.argv[1]
-    ckpt_path = sys.argv[2]
-
-    cfg = mmcv_config_fromfile(cfg)
-    model = build_model(cfg.model)
-    load_checkpoint(model, ckpt_path, map_location='cpu')
-    model = model.eval()
-
-    test_pipeline = cfg.test_pipeline
-    CLASSES = cfg.CLASSES
-
-    pipeline = [build_from_cfg(p, PIPELINES) for p in test_pipeline]
-    pipeline = Compose(pipeline)
-
-    # 8400 ishard code, need to reimplement to  sum(img_w / stride_i + img_h /stride_i)
-    example_scores = torch.randn([1, 8400, 85], dtype=torch.float32)
-    trt_ext = create_tensorrt_postprocess(
-        example_scores, iou_thres=model.nms_thre, score_thres=model.test_conf)
-
-    # img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000129062.jpg'
-    img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000254016.jpg'
-    # img = cv2.imread(img_path)
-    img = Image.open(img_path)
-    if type(img) is not np.ndarray:
-        img = np.asarray(img)
-
-    # ori_img_shape = img.shape[:2]
-    data_dict = {'img': img}
-    data_dict = pipeline(data_dict)
-    img = data_dict['img']
-    img = torch.unsqueeze(img._data, 0)
-    # print(img.shape)
-    model.decode_in_inference = False
-    # print(type(model), model.decode_in_inference)
-    c = model.forward_export(img)
-
-    # print(type(c), c.shape)
-    print(model.test_conf, model.nms_thre, model.num_classes,
-          model.decode_in_inference)
-    tc = model.head.decode_outputs(c, c[0].type())
-    # print(type(tc))
-    # print(tc.shape)
-
-    import copy
-    tcback = copy.deepcopy(tc)
-
-    tpa = postprocess(tc, model.num_classes, model.test_conf,
-                      model.nms_thre)[0]
-    # print(tpa)
-    tpa[:, 4] = tpa[:, 4] * tpa[:, 5]
-    tpa[:, 5] = tpa[:, 6]
-    tpa = tpa[:, :6]
-    # print("fuck tpa:", len(tpa), tpa[0].shape)
-    box_a = tpa[:, :4]
-    score_a = tpa[:, 4]
-    id_a = tpa[:, 5]
-    # print(tpa)
-
-    # trt_ext must be cuda
-    tcback = tcback
-    tpb = trt_ext.forward(tcback)
-    # print("fuck tpb:",len(tpb))
-
-    valid_length = min(len(tpa), tpb[2].shape[1])
-    print(valid_length)
-    valid_length = min(valid_length, 30)
-
-    box_a = box_a[:valid_length]
-    score_a = score_a[:valid_length]
-    id_a = id_a[:valid_length]
-
-    print(tpb[1].shape)
-    print(tpb[2].shape)
-    print(tpb[3].shape)
-
-    box_b = tpb[1][:, :valid_length, :].cpu().view(box_a.shape)
-    score_b = tpb[2][:, :valid_length].cpu().view(score_a.shape)
-    id_b = tpb[3][:, :valid_length].cpu().view(id_a.shape)
-
-    def get_diff(input_a, input_b, name='score'):
-        print('name:', name)
-        print('shape:', input_a.shape)
-        print('max_diff  :', torch.max(input_a - input_b))
-        print('avg_diff  :', torch.mean(input_a - input_b))
-        print('totol_diff:', torch.sum(input_a - input_b))
-
-    get_diff(box_a, box_b, 'box')
-    get_diff(score_a, score_b, 'score')
-    get_diff(id_a, id_a, 'id')
-
-    if 0:
-        from easycv.predictors import TorchYoloXPredictor
-        img = Image.open(img_path)
-        pred = TorchYoloXPredictor('models/predict.pt')
-        m = pred.predict([img])
-        print(m)
diff --git a/tests/models/detection/yolox/test_yolox.py b/tests/models/detection/yolox/test_yolox.py
index 45fd955e..1b09d638 100644
--- a/tests/models/detection/yolox/test_yolox.py
+++ b/tests/models/detection/yolox/test_yolox.py
@@ -15,8 +15,13 @@ def setUp(self):
     def test_yolox(self):
         for model_type in ['s', 'm', 'l', 'x', 'tiny', 'nano']:
             model = YOLOX(
-                num_classes=2,
-                model_type=model_type,  # s m l x tiny nano
+                backbone=dict(
+                    type='YOLOPAFPN',
+                    backbone='CSPDarknet',
+                    model_type=model_type,  # s m l x tiny nano
+                    neck='yolo'),
+                head=dict(
+                    type='YOLOXHead', model_type=model_type, num_classes=2),
                 test_conf=0.01,
                 nms_thre=0.65)
             model = model.cuda()
diff --git a/tests/models/detection/yolox_edge/test_yolox_edge.py b/tests/models/detection/yolox_edge/test_yolox_edge.py
index 37fcd8d1..26a3d2d5 100644
--- a/tests/models/detection/yolox_edge/test_yolox_edge.py
+++ b/tests/models/detection/yolox_edge/test_yolox_edge.py
@@ -15,8 +15,13 @@ def setUp(self):
     def test_yolox_edge(self):
         for model_type in ['s', 'm', 'l', 'x', 'tiny', 'nano']:
             model = YOLOX_EDGE(
-                num_classes=2,
-                model_type=model_type,  # s m l x tiny nano
+                backbone=dict(
+                    type='YOLOPAFPN',
+                    backbone='CSPDarknet',
+                    model_type=model_type,  # s m l x tiny nano
+                    neck='yolo'),
+                head=dict(
+                    type='YOLOXHead', model_type=model_type, num_classes=2),
                 test_conf=0.01,
                 nms_thre=0.65)
             model = model.cuda()

From a8ba8fde172f474e379b87764cbd451fcac1c8c9 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?=E5=91=A8=E6=A5=BC?= <zhoulou.wzh@alibaba-inc.com>
Date: Mon, 15 Aug 2022 21:14:52 +0800
Subject: [PATCH 42/69] e2e trt_nms plugin export support and numeric test

---
 easycv/apis/export.py                         | 21 +++--
 .../detectors/yolox/yolo_head_template.py     | 25 +++---
 .../detection/detectors/yolox/yolo_pafpn.py   |  2 +-
 .../models/detection/detectors/yolox/yolox.py | 21 +++--
 easycv/predictors/detector.py                 | 43 +++++++---
 export_log.txt                                | 18 +++++
 numeric_test.py                               | 79 +++++++++++++++++++
 test.py                                       | 64 ++++++++-------
 8 files changed, 202 insertions(+), 71 deletions(-)
 create mode 100644 export_log.txt
 create mode 100644 numeric_test.py

diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index b7f828b6..a483098d 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -54,6 +54,7 @@ def export(cfg, ckpt_path, filename):
         load_checkpoint(model, ckpt_path, map_location='cpu')
     else:
         cfg.model.backbone.pretrained = False
+    
     model = reparameterize_models(model)
 
     if isinstance(model, MOCO) or isinstance(model, DINO):
@@ -201,13 +202,19 @@ def _export_yolox(model, cfg, filename):
 
         input = 255 * torch.rand((batch_size, 3) + img_scale)
 
+        preprocess_fn = None
+        postprocess_fn = None
+        if end2end:
+            preprocess_fn = PreProcess(target_size=img_scale, keep_ratio=True)
+            postprocess_fn= DetPostProcess(max_det=100, score_thresh=0.5)
+            if cfg.model.get('use_trt_nms', False):
+                postprocess_fn = None
+        
         model_export = End2endModelExportWrapper(
             model,
             input.to(device),
-            preprocess_fn=PreProcess(target_size=img_scale, keep_ratio=True)
-            if end2end else None,
-            postprocess_fn=DetPostProcess(max_det=100, score_thresh=0.5)
-            if end2end else None,
+            preprocess_fn=preprocess_fn,
+            postprocess_fn=postprocess_fn,
             trace_model=True,
         )
 
@@ -223,7 +230,7 @@ def _export_yolox(model, cfg, filename):
         if getattr(cfg.export, 'export_blade', False):
             blade_config = cfg.export.get(
                 'blade_config',
-                dict(enable_fp16=True, fp16_fallback_op_ratio=0.05))
+                dict(enable_fp16=True, fp16_fallback_op_ratio=0.3))
 
             from easycv.toolkit.blade import blade_env_assert, blade_optimize
 
@@ -673,8 +680,8 @@ def __init__(self,
         self.preprocess_fn = preprocess_fn
         self.postprocess_fn = postprocess_fn
 
-        if postprocess_fn == None:
-            self.model.head.decode_in_inference = False
+        # if postprocess_fn == None:
+        #     self.model.head.decode_in_inference = False
 
         self.trace_model = trace_model
         if self.trace_model:
diff --git a/easycv/models/detection/detectors/yolox/yolo_head_template.py b/easycv/models/detection/detectors/yolox/yolo_head_template.py
index 4cf77d43..eca58e99 100644
--- a/easycv/models/detection/detectors/yolox/yolo_head_template.py
+++ b/easycv/models/detection/detectors/yolox/yolo_head_template.py
@@ -9,6 +9,7 @@
 import torch.nn.functional as F
 
 from easycv.models.backbones.network_blocks import BaseConv, DWConv
+from easycv.models.backbones.repvgg_yolox_backbone import RepVGGBlock
 from easycv.models.detection.utils import bboxes_iou
 from easycv.models.loss import GIoULoss, IOUloss, IoULoss
 
@@ -74,35 +75,35 @@ def __init__(self,
                 ))
             self.cls_convs.append(
                 nn.Sequential(*[
-                    Conv(
+                    RepVGGBlock(
                         in_channels=int(256 * width),
                         out_channels=int(256 * width),
-                        ksize=3,
-                        stride=1,
+                        # ksize=3,
+                        # stride=1,
                         act=act,
                     ),
-                    Conv(
+                    RepVGGBlock(
                         in_channels=int(256 * width),
                         out_channels=int(256 * width),
-                        ksize=3,
-                        stride=1,
+                        # ksize=3,
+                        # stride=1,
                         act=act,
                     ),
                 ]))
             self.reg_convs.append(
                 nn.Sequential(*[
-                    Conv(
+                    RepVGGBlock(
                         in_channels=int(256 * width),
                         out_channels=int(256 * width),
-                        ksize=3,
-                        stride=1,
+                        # ksize=3,
+                        # stride=1,
                         act=act,
                     ),
-                    Conv(
+                    RepVGGBlock(
                         in_channels=int(256 * width),
                         out_channels=int(256 * width),
-                        ksize=3,
-                        stride=1,
+                        # ksize=3,
+                        # stride=1,
                         act=act,
                     ),
                 ]))
diff --git a/easycv/models/detection/detectors/yolox/yolo_pafpn.py b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
index 741a4922..25a34181 100644
--- a/easycv/models/detection/detectors/yolox/yolo_pafpn.py
+++ b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
@@ -33,7 +33,7 @@ def __init__(self,
                  in_channels=[256, 512, 1024],
                  depthwise=False,
                  act='silu',
-                 asff_channel=2,
+                 asff_channel=16,
                  use_att=None,
                  expand_kernel=3,
                  backbone='CSPDarknet',
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index fc2a231a..7b9fddec 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -11,6 +11,7 @@
 from easycv.models.builder import (MODELS, build_backbone, build_head,
                                    build_neck)
 from easycv.models.detection.utils import postprocess
+from easycv.models.detection.detectors.yolox.postprocess import create_tensorrt_postprocess
 
 
 def init_yolo(M):
@@ -34,7 +35,8 @@ def __init__(self,
                  nms_thre,
                  head=None,
                  neck=None,
-                 pretrained=True):
+                 pretrained=True,
+                 use_trt_nms=False):
         super(YOLOX, self).__init__()
 
         self.pretrained = pretrained
@@ -47,6 +49,12 @@ def __init__(self,
         self.num_classes = head.num_classes
         self.test_conf = test_conf
         self.nms_thre = nms_thre
+        self.use_trt_nms = use_trt_nms
+
+        if use_trt_nms:
+            example_scores = torch.randn([1, 8400, 85], dtype=torch.float32)
+            self.trt_nms = create_tensorrt_postprocess(example_scores, iou_thres=self.nms_thre, score_thres=self.test_conf)
+            self.head.decode_in_inference = True
 
     def forward_train(self,
                       img: Tensor,
@@ -100,10 +108,8 @@ def forward_test(self, img: Tensor, img_metas=None) -> Tensor:
         with torch.no_grad():
             fpn_outs = self.backbone(img)
             outputs = self.head(fpn_outs)
-
             outputs = postprocess(outputs, self.num_classes, self.test_conf,
                                   self.nms_thre)
-
             detection_boxes = []
             detection_scores = []
             detection_classes = []
@@ -157,7 +163,12 @@ def forward_export(self, img):
             outputs = self.head(fpn_outs)
 
             if self.head.decode_in_inference:
-                outputs = postprocess(outputs, self.num_classes,
-                                      self.test_conf, self.nms_thre)
+                if self.use_trt_nms:
+                    print("fucking trt nms")
+                    outputs = self.trt_nms.forward(outputs)
+                else:
+                    print("fucking original nms")
+                    outputs = postprocess(outputs, self.num_classes,
+                                        self.test_conf, self.nms_thre)
 
         return outputs
diff --git a/easycv/predictors/detector.py b/easycv/predictors/detector.py
index 9f0b7f15..18da1126 100644
--- a/easycv/predictors/detector.py
+++ b/easycv/predictors/detector.py
@@ -43,6 +43,7 @@ def __init__(self,
                  model_path,
                  max_det=100,
                  score_thresh=0.5,
+                 use_trt_nms =False,
                  model_config=None):
         """
         init model
@@ -60,6 +61,7 @@ def __init__(self,
             'blade')
 
         self.use_blade = model_path.endswith('blade')
+        self.use_trt_nms = use_trt_nms
 
         if self.use_blade:
             import torch_blade
@@ -76,7 +78,6 @@ def __init__(self,
             with io.open(model_path, 'rb') as infile:
                 map_location = 'cpu' if self.device == 'cpu' else 'cuda'
                 self.model = torch.jit.load(infile, map_location)
-
             with io.open(model_path + '.config.json', 'r') as infile:
                 self.cfg = json.load(infile)
                 test_pipeline = self.cfg['test_pipeline']
@@ -117,7 +118,6 @@ def __init__(self,
 
             self.model.to(self.device)
             self.model.eval()
-
             test_pipeline = self.cfg.test_pipeline
             self.CLASSES = self.cfg.CLASSES
 
@@ -178,12 +178,21 @@ def predict(self, input_data_list, batch_size=-1, to_numpy=True):
                 img = np.asarray(img)
 
             ori_img_shape = img.shape[:2]
-
+            speed_test=1
             if self.end2end:
                 # the input should also be as the type of uint8 as mmcv
                 img = torch.from_numpy(img).to(self.device)
-                det_out = self.model(img)
-
+                if self.use_trt_nms:
+                    for i in range(speed_test):
+                        tmp_out = self.model(img)
+                    det_out={}
+                    det_out['detection_boxes']=tmp_out[1]
+                    det_out['detection_scores']=tmp_out[2]
+                    det_out['detection_classes']=tmp_out[3]
+                else:
+                    for i in range(speed_test):
+                        det_out = self.model(img)
+                    
                 detection_scores = det_out['detection_scores']
 
                 if detection_scores is not None:
@@ -196,10 +205,9 @@ def predict(self, input_data_list, batch_size=-1, to_numpy=True):
                     detection_classes = []
 
                 if to_numpy:
-                    detection_scores = detection_scores.detach().numpy()
-                    detection_boxes = detection_boxes.detach().numpy()
-                    detection_classes = detection_classes.detach().numpy()
-
+                    detection_scores = detection_scores.cpu().detach().numpy()
+                    detection_boxes = detection_boxes.cpu().detach().numpy()
+                    detection_classes = detection_classes.cpu().detach().numpy()
             else:
                 data_dict = {'img': img}
                 data_dict = self.pipeline(data_dict)
@@ -208,10 +216,19 @@ def predict(self, input_data_list, batch_size=-1, to_numpy=True):
                 data_dict.pop('img')
 
                 if self.traceable:
-                    with torch.no_grad():
-                        det_out = self.post_assign(
-                            self.model(img),
-                            img_metas=[data_dict['img_metas']._data])
+                    if self.use_trt_nms:
+                        with torch.no_grad():
+                            for i in range(speed_test):
+                                tmp_out = self.model(img)
+                            det_out={}
+                            det_out['detection_boxes']=tmp_out[1]
+                            det_out['detection_scores']=tmp_out[2]
+                            det_out['detection_classes']=tmp_out[3]
+                    else:
+                        with torch.no_grad():
+                            det_out = self.post_assign(
+                                self.model(img),
+                                img_metas=[data_dict['img_metas']._data])
                 else:
                     with torch.no_grad():
                         det_out = self.model(
diff --git a/export_log.txt b/export_log.txt
new file mode 100644
index 00000000..e56ee35f
--- /dev/null
+++ b/export_log.txt
@@ -0,0 +1,18 @@
+  646  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815.py  models/epoch_300.pth  models/output.blade
+  648  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815.py  models/epoch_300.pth  models/as2_tood32.blade
+  655  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815.py  models/epoch_300.pth  models/output.blade
+  668  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815.py  models/epoch_300.pth  models/output_bs1.blade
+  677  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815.py  models/epoch_300.pth  models/output_bs1.blade
+  688  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815.py  models/epoch_300.pth  models/output_bs1.blade
+  698  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815.py  models/epoch_300.pth  models/output_bs1_e2e.blade
+  708  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e.py  models/epoch_300.pth  models/output_bs1_e2e.blade
+  713  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e_fp16005.py  models/epoch_300.pth  models/output_bs1_e2e_fp16005.blade
+  714  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e_005.py  models/epoch_300.pth  models/output_bs1_e2e_fp005.blade
+  716  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e_f005.py  models/epoch_300.pth  models/output_bs1_e2e_f005.blade
+  719  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e_f005.py  models/epoch_300.pth  models/output_bs1_e2e_f005.blade
+  738  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e_f005_trtnms.py  models/epoch_300.pth  models/output_bs1_e2e_f005_trtnms.blade
+  741  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e_f005_trtnms.py  models/epoch_300.pth  models/output_bs1_e2e_f005_trtnms.blade
+  767  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e_f005_trtnms.py  models/epoch_300.pth  models/output_bs1_e2e_f005_trtnms.blade
+  770  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e_f005_trtnms.py  models/epoch_300.pth  models/output_bs1_e2e_f005_trtnms.blade
+  774  history | grep export | grep CUDA_VISIBLE_DEVICES=4
+  775  history | grep export | grep CUDA_VISIBLE_DEVICES=4 > export_log.txt
diff --git a/numeric_test.py b/numeric_test.py
new file mode 100644
index 00000000..b20ec1b9
--- /dev/null
+++ b/numeric_test.py
@@ -0,0 +1,79 @@
+from easycv.models.detection.detectors.yolox.postprocess import create_tensorrt_postprocess
+import torch
+from torchvision.transforms import Compose
+
+from easycv.models import build_model
+from easycv.utils.checkpoint import load_checkpoint
+from easycv.utils.config_tools import mmcv_config_fromfile
+from easycv.utils.registry import build_from_cfg
+from easycv.datasets.registry import PIPELINES
+from easycv.models.detection.utils import postprocess
+
+import sys
+import numpy as np
+from PIL import Image
+import time
+
+from contextlib import contextmanager
+@contextmanager
+def timeit_context(name):
+    startTime = time.time()
+    yield
+    elapsedTime = time.time() - startTime
+    print('[{}] finished in {} ms'.format(name, int(elapsedTime * 1000)))
+
+if __name__=='__main__':
+    if 1:
+        img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000254016.jpg'
+        from easycv.predictors import TorchYoloXPredictor
+        img = Image.open(img_path)
+
+        pred = TorchYoloXPredictor('models/output.pt')
+        m = pred.predict([img])
+        print("fucking m :", m)
+
+        pred0 = TorchYoloXPredictor('models/output_bs1.blade.jit')
+        for i in range(10):
+            m0 = pred0.predict([img])
+        with timeit_context('m0 speed test'):
+            for i in range(100):
+                m0 = pred0.predict([img]) 
+        print("fucking m0:", m0)
+        
+        pred1 = TorchYoloXPredictor('models/output_bs1_e2e.blade.jit')
+        for i in range(10):
+            m1 = pred1.predict([img])
+        with timeit_context('m1 speed test'):
+            for i in range(100):
+                m1 = pred1.predict([img])       
+        print("fucking m1:", m1)
+
+        # pred2 = TorchYoloXPredictor('models/output_bs1_e2e.blade.jit')
+        # m2 = pred2.predict([img])
+        # print("fucking m2:", m2)
+
+        # pred3 = TorchYoloXPredictor('models/output_bs1_e2e_f005.blade.jit')
+        # m3 = pred3.predict([img])
+        # print("fucking m3:", m3)
+
+        # pred4 = TorchYoloXPredictor('models/output_trtnms.pt')
+        # m4 = pred4.predict([img]) 
+        # print("fucking m4:", m4)
+
+        pred5 = TorchYoloXPredictor(model_path='models/output_bs1_noe2e_f005_trtnms.blade.blade', use_trt_nms=True)
+        # m5 = pred5.predict([img]) 
+        for i in range(10):
+            m5 = pred5.predict([img])
+        with timeit_context('m5 speed test'):
+            for i in range(100):
+                m5 = pred5.predict([img])     
+        print("fucking m5:", m5)
+
+        pred6 = TorchYoloXPredictor(model_path='models/output_bs1_e2e_f005_trtnms.blade.blade', use_trt_nms=True)
+        # m5 = pred5.predict([img]) 
+        for i in range(10):
+            m6 = pred6.predict([img])
+        with timeit_context('m6 speed test'):
+            for i in range(100):
+                m6 = pred5.predict([img])     
+        print("fucking m6:", m6)
\ No newline at end of file
diff --git a/test.py b/test.py
index d1d37a6c..a8de94c3 100755
--- a/test.py
+++ b/test.py
@@ -1,21 +1,22 @@
 # from easycv.models.detection.detectors.yolox import YOLOX
-import sys
-
-import numpy as np
+from easycv.models.detection.detectors.yolox.postprocess import create_tensorrt_postprocess
 import torch
-from PIL import Image
 from torchvision.transforms import Compose
 
-from easycv.datasets.registry import PIPELINES
 from easycv.models import build_model
-from easycv.models.detection.detectors.yolox.postprocess import \
-    create_tensorrt_postprocess
-from easycv.models.detection.utils import postprocess
 from easycv.utils.checkpoint import load_checkpoint
 from easycv.utils.config_tools import mmcv_config_fromfile
 from easycv.utils.registry import build_from_cfg
+from easycv.datasets.registry import PIPELINES
+from easycv.models.detection.utils import postprocess
+
 
-if __name__ == '__main__':
+
+import sys
+import numpy as np
+from PIL import Image
+
+if __name__=='__main__':
     #a = YOLOX(decode_in_inference=False).eval()
     cfg = sys.argv[1]
     ckpt_path = sys.argv[2]
@@ -31,10 +32,9 @@
     pipeline = [build_from_cfg(p, PIPELINES) for p in test_pipeline]
     pipeline = Compose(pipeline)
 
-    # 8400 ishard code, need to reimplement to  sum(img_w / stride_i + img_h /stride_i)
+    # 8400 ishard code, need to reimplement to  sum(img_w / stride_i + img_h /stride_i) 
     example_scores = torch.randn([1, 8400, 85], dtype=torch.float32)
-    trt_ext = create_tensorrt_postprocess(
-        example_scores, iou_thres=model.nms_thre, score_thres=model.test_conf)
+    trt_ext = create_tensorrt_postprocess(example_scores, iou_thres=model.nms_thre, score_thres=model.test_conf)
 
     # img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000129062.jpg'
     img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000254016.jpg'
@@ -52,10 +52,9 @@
     model.decode_in_inference = False
     # print(type(model), model.decode_in_inference)
     c = model.forward_export(img)
-
+    
     # print(type(c), c.shape)
-    print(model.test_conf, model.nms_thre, model.num_classes,
-          model.decode_in_inference)
+    print(model.test_conf, model.nms_thre, model.num_classes, model.decode_in_inference)
     tc = model.head.decode_outputs(c, c[0].type())
     # print(type(tc))
     # print(tc.shape)
@@ -63,26 +62,25 @@
     import copy
     tcback = copy.deepcopy(tc)
 
-    tpa = postprocess(tc, model.num_classes, model.test_conf,
-                      model.nms_thre)[0]
+    tpa = postprocess(tc, model.num_classes, model.test_conf, model.nms_thre)[0]
     # print(tpa)
     tpa[:, 4] = tpa[:, 4] * tpa[:, 5]
     tpa[:, 5] = tpa[:, 6]
     tpa = tpa[:, :6]
     # print("fuck tpa:", len(tpa), tpa[0].shape)
-    box_a = tpa[:, :4]
-    score_a = tpa[:, 4]
-    id_a = tpa[:, 5]
+    box_a = tpa[:,:4]
+    score_a = tpa[:,4]
+    id_a = tpa[:,5]
     # print(tpa)
 
-    # trt_ext must be cuda
+    # trt_ext must be cuda 
     tcback = tcback
     tpb = trt_ext.forward(tcback)
     # print("fuck tpb:",len(tpb))
-
+     
     valid_length = min(len(tpa), tpb[2].shape[1])
     print(valid_length)
-    valid_length = min(valid_length, 30)
+    valid_length = min(valid_length,30)
 
     box_a = box_a[:valid_length]
     score_a = score_a[:valid_length]
@@ -92,16 +90,16 @@
     print(tpb[2].shape)
     print(tpb[3].shape)
 
-    box_b = tpb[1][:, :valid_length, :].cpu().view(box_a.shape)
-    score_b = tpb[2][:, :valid_length].cpu().view(score_a.shape)
-    id_b = tpb[3][:, :valid_length].cpu().view(id_a.shape)
-
+    box_b = tpb[1][:,:valid_length,:].cpu().view(box_a.shape)
+    score_b = tpb[2][:,:valid_length].cpu().view(score_a.shape)
+    id_b = tpb[3][:,:valid_length].cpu().view(id_a.shape)
+    
     def get_diff(input_a, input_b, name='score'):
-        print('name:', name)
-        print('shape:', input_a.shape)
-        print('max_diff  :', torch.max(input_a - input_b))
-        print('avg_diff  :', torch.mean(input_a - input_b))
-        print('totol_diff:', torch.sum(input_a - input_b))
+        print("name:", name)
+        print("shape:", input_a.shape)
+        print("max_diff  :",torch.max(input_a-input_b))
+        print("avg_diff  :",torch.mean(input_a-input_b))
+        print("totol_diff:",torch.sum(input_a-input_b))
 
     get_diff(box_a, box_b, 'box')
     get_diff(score_a, score_b, 'score')
@@ -112,4 +110,4 @@ def get_diff(input_a, input_b, name='score'):
         img = Image.open(img_path)
         pred = TorchYoloXPredictor('models/predict.pt')
         m = pred.predict([img])
-        print(m)
+        print(m)
\ No newline at end of file

From 77973f1d5377cb2f2878cc88d17d220f472e5b5c Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Tue, 16 Aug 2022 10:21:45 +0800
Subject: [PATCH 43/69] fix bug

---
 easycv/models/detection/detectors/detection.py | 1 -
 1 file changed, 1 deletion(-)

diff --git a/easycv/models/detection/detectors/detection.py b/easycv/models/detection/detectors/detection.py
index fe91fbf8..f525d168 100644
--- a/easycv/models/detection/detectors/detection.py
+++ b/easycv/models/detection/detectors/detection.py
@@ -8,7 +8,6 @@
 
 @MODELS.register_module
 class Detection(BaseModel):
-
     def __init__(self, backbone, head=None, neck=None, pretrained=True):
         super(Detection, self).__init__()
 

From e8b6607611965688481f08fb5d47699336b59d8c Mon Sep 17 00:00:00 2001
From: wuziheng <zhoulou.wzh@alibaba-inc.com>
Date: Tue, 16 Aug 2022 12:00:27 +0800
Subject: [PATCH 44/69] fix interface for yolox use_trt_efficientnms

---
 easycv/apis/export.py                         |  28 ++-
 .../detection/detectors/yolox/postprocess.py  |   5 +-
 .../models/detection/detectors/yolox/test.py  |  21 ---
 .../detection/detectors/yolox/yolo_head.py    |   2 +
 .../detectors/yolox/yolo_head_template.py     |  12 ++
 .../models/detection/detectors/yolox/yolox.py |  27 +--
 easycv/models/detection/utils/boxes.py        |   3 -
 easycv/predictors/detector.py                 |  18 +-
 easycv/toolkit/blade/__init__.py              |   1 +
 easycv/toolkit/blade/trt_plugin_utils.py      | 164 ++++++++++++++++++
 10 files changed, 229 insertions(+), 52 deletions(-)
 create mode 100644 easycv/toolkit/blade/trt_plugin_utils.py

diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index 422250ba..52c05c4c 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -194,6 +194,9 @@ def _export_yolox(model, cfg, filename):
 
         batch_size = cfg.export.get('batch_size', 1)
         static_opt = cfg.export.get('static_opt', True)
+        use_trt_efficientnms = cfg.export.get('use_trt_efficientnms', False)
+        
+        # assert image scale and assgin input
         img_scale = cfg.get('img_scale', (640, 640))
 
         assert (
@@ -202,14 +205,37 @@ def _export_yolox(model, cfg, filename):
 
         input = 255 * torch.rand((batch_size, 3) + img_scale)
 
+        # assert use_trt_efficientnms only happens when static_opt=True
+        if static_opt is not True:
+            assert (
+                use_trt_efficientnms == False
+            ), 'Export YoloX predictor use_trt_efficientnms=True only when use static_opt=True!'
+
+        # ignore DetPostProcess when use_trt_efficientnms 
         preprocess_fn = None
         postprocess_fn = None
         if end2end:
             preprocess_fn = PreProcess(target_size=img_scale, keep_ratio=True)
             postprocess_fn= DetPostProcess(max_det=100, score_thresh=0.5)
-            if cfg.model.get('use_trt_nms', False):
+            # use_trt_efficientnms = detection.boxes.postprocess + DetPostProcess
+            if use_trt_efficientnms:
+                logging.info('PAI-YOLOX: use_trt_efficientnms=True during export, we drop DetPostProcess, because trt_efficientnms = detection.boxes.postprocess + DetPostProcess!')
                 postprocess_fn = None
         
+        # set model use_trt_efficientnms
+        from easycv.toolkit.blade import create_tensorrt_efficientnms
+        if hasattr(model, 'get_nmsboxes_num'):
+            nmsbox_num = model.get_nmsboxes_num(img_scale)
+        else:
+            logging.warning('PAI-YOLOX: use_trt_efficientnms encounter model has no attr named get_nmsboxes_num, use 8400 as default!')
+            nmsbox_num = 8400
+
+        tmp_example_scores = torch.randn([batch_size, nmsbox_num, 4 + 1 + len(cfg.CLASSES)],
+                                 dtype=torch.float32)
+        logging.info('PAI-YOLOX: use_trt_efficientnms with staic shape [{}, {}, {}]'.format(batch_size, nmsbox_num, 4 + 1 + len(cfg.CLASSES)))
+        model.trt_efficientnms = create_tensorrt_efficientnms(example_scores, iou_thres=model.nms_thre, score_thres=model.test_conf)
+        model.use_trt_efficientnms = True
+
         model_export = End2endModelExportWrapper(
             model,
             input.to(device),
diff --git a/easycv/models/detection/detectors/yolox/postprocess.py b/easycv/models/detection/detectors/yolox/postprocess.py
index 5464d515..a8407be4 100644
--- a/easycv/models/detection/detectors/yolox/postprocess.py
+++ b/easycv/models/detection/detectors/yolox/postprocess.py
@@ -3,7 +3,6 @@
 import torch
 from torch import nn
 
-
 class TRT8_NMS(torch.autograd.Function):
     '''TensorRT NMS operation'''
 
@@ -55,7 +54,6 @@ def symbolic(g,
         nums, boxes, scores, classes = out
         return nums, boxes, scores, classes
 
-
 class ONNX_TRT8(nn.Module):
     '''onnx module with TensorRT NMS operation.'''
 
@@ -87,7 +85,6 @@ def forward(self, x):
             self.score_activation, self.score_threshold)
         return num_det, det_boxes, det_scores, det_classes
 
-
 def create_tensorrt_postprocess(example_scores,
                                 iou_thres=0.45,
                                 score_thres=0.25):
@@ -102,7 +99,7 @@ def create_tensorrt_postprocess(example_scores,
 
     input_names = ['input']
     output_names = [
-        'num_det', 'det_boxes', 'det_example_scores', 'det_classes'
+        'num_det', 'detection_boxes', 'detection_scores', 'detection_classes'
     ]
     with io.BytesIO() as onnx_proto_f:
         torch.onnx.export(
diff --git a/easycv/models/detection/detectors/yolox/test.py b/easycv/models/detection/detectors/yolox/test.py
index 8e585983..d0d90b32 100644
--- a/easycv/models/detection/detectors/yolox/test.py
+++ b/easycv/models/detection/detectors/yolox/test.py
@@ -1,24 +1,9 @@
-<<<<<<< HEAD:test.py
 # from easycv.models.detection.detectors.yolox import YOLOX
 from easycv.models.detection.detectors.yolox.postprocess import create_tensorrt_postprocess
-=======
-# !!!ignore it for cr, we are still work for tensorrt nms problem
-
-import sys
-
-import numpy as np
->>>>>>> 08bbba8177329f6f4a4925ec599f138054cdd9a6:easycv/models/detection/detectors/yolox/test.py
 import torch
 from torchvision.transforms import Compose
 
 from easycv.models import build_model
-<<<<<<< HEAD:test.py
-=======
-# from easycv.models.detection.detectors.yolox import YOLOX
-from easycv.models.detection.detectors.yolox.postprocess import \
-    create_tensorrt_postprocess
-from easycv.models.detection.utils import postprocess
->>>>>>> 08bbba8177329f6f4a4925ec599f138054cdd9a6:easycv/models/detection/detectors/yolox/test.py
 from easycv.utils.checkpoint import load_checkpoint
 from easycv.utils.config_tools import mmcv_config_fromfile
 from easycv.utils.registry import build_from_cfg
@@ -26,18 +11,12 @@
 from easycv.models.detection.utils import postprocess
 
 
-<<<<<<< HEAD:test.py
-
 import sys
 import numpy as np
 from PIL import Image
 
 if __name__=='__main__':
     #a = YOLOX(decode_in_inference=False).eval()
-=======
-if __name__ == '__main__':
-    # a = YOLOX(decode_in_inference=False).eval()
->>>>>>> 08bbba8177329f6f4a4925ec599f138054cdd9a6:easycv/models/detection/detectors/yolox/test.py
     cfg = sys.argv[1]
     ckpt_path = sys.argv[2]
 
diff --git a/easycv/models/detection/detectors/yolox/yolo_head.py b/easycv/models/detection/detectors/yolox/yolo_head.py
index 28de2cad..86d9dbd0 100644
--- a/easycv/models/detection/detectors/yolox/yolo_head.py
+++ b/easycv/models/detection/detectors/yolox/yolo_head.py
@@ -46,6 +46,8 @@ def __init__(
             reg_loss_type=reg_loss_type,
             decode_in_inference=decode_in_inference)
 
+
+
     def forward(self, xin, labels=None, imgs=None):
         outputs = []
         origin_preds = []
diff --git a/easycv/models/detection/detectors/yolox/yolo_head_template.py b/easycv/models/detection/detectors/yolox/yolo_head_template.py
index 58be88ef..d329536a 100644
--- a/easycv/models/detection/detectors/yolox/yolo_head_template.py
+++ b/easycv/models/detection/detectors/yolox/yolo_head_template.py
@@ -160,6 +160,18 @@ def initialize_biases(self, prior_prob):
             b.data.fill_(-math.log((1 - prior_prob) / prior_prob))
             conv.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
 
+    def get_nmsboxes_num(self, img_scale=(640, 640)):
+        """ Count all Yolox NMS box with img_scale and head stride config
+        """
+        assert (
+            len(img_scale) == 2
+        ), 'Export YoloX predictor config contains img_scale must be (int, int) tuple!'
+
+        total_box_count = 0
+        for stride in self.strides:
+            total_box_count+= (img_scale[0] / stride) * (img_scale[1] / stride)
+        return total_box_count
+
     @abstractmethod
     def forward(self, xin, labels=None, imgs=None):
         pass
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index 4c5d5d3c..ef95f64b 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -35,8 +35,7 @@ def __init__(self,
                  nms_thre,
                  head=None,
                  neck=None,
-                 pretrained=True,
-                 use_trt_nms=False):
+                 pretrained=True):
         super(YOLOX, self).__init__()
 
         self.pretrained = pretrained
@@ -49,12 +48,16 @@ def __init__(self,
         self.num_classes = self.head.num_classes
         self.test_conf = test_conf
         self.nms_thre = nms_thre
-        self.use_trt_nms = use_trt_nms
+        self.use_trt_efficientnms = False  # TRT NMS only will be convert during export 
+        self.trt_efficientnms = None
 
-        if use_trt_nms:
-            example_scores = torch.randn([1, 8400, 85], dtype=torch.float32)
-            self.trt_nms = create_tensorrt_postprocess(example_scores, iou_thres=self.nms_thre, score_thres=self.test_conf)
-            self.head.decode_in_inference = True
+    def get_nmsboxes_num(self, img_scale=(640, 640)):
+        """ Detection neck or head should provide nms box count information
+        """
+        if hasattr(self, neck, None) is not None:
+            return self.neck.get_nmsboxes_num(img_scale=(640, 640))
+        else:
+            return self.head.get_nmsboxes_num(img_scale=(640, 640))
 
     def forward_train(self,
                       img: Tensor,
@@ -161,13 +164,13 @@ def forward_export(self, img):
         with torch.no_grad():
             fpn_outs = self.backbone(img)
             outputs = self.head(fpn_outs)
-
             if self.head.decode_in_inference:
-                if self.use_trt_nms:
-                    print("fucking trt nms")
-                    outputs = self.trt_nms.forward(outputs)
+                if self.use_trt_efficientnms:
+                    if self.trt_efficientnms is not None:
+                        outputs = self.trt_efficientnms.forward(outputs)
+                    else:
+                        logging.error('PAI-YOLOX : using trt_efficientnms set to be True, but model has not attr(trt_efficientnms)')
                 else:
-                    print("fucking original nms")
                     outputs = postprocess(outputs, self.num_classes,
                                         self.test_conf, self.nms_thre)
 
diff --git a/easycv/models/detection/utils/boxes.py b/easycv/models/detection/utils/boxes.py
index 9fb9771e..672694ca 100644
--- a/easycv/models/detection/utils/boxes.py
+++ b/easycv/models/detection/utils/boxes.py
@@ -38,9 +38,6 @@ def bboxes_iou(bboxes_a, bboxes_b, xyxy=True):
 
 # refer to easycv/models/detection/detectors/yolox/postprocess.py and test.py to rebuild a torch-blade-trtplugin NMS, which is checked by zhoulou in test.py
 # infer docker images is : registry.cn-shanghai.aliyuncs.com/pai-ai-test/eas-service:easycv_blade_181_export
-def trtplugin_efficientnms_postprocess():
-    return
-
 
 def postprocess(prediction, num_classes, conf_thre=0.7, nms_thre=0.45):
     box_corner = prediction.new(prediction.shape)
diff --git a/easycv/predictors/detector.py b/easycv/predictors/detector.py
index 18da1126..f47f00e5 100644
--- a/easycv/predictors/detector.py
+++ b/easycv/predictors/detector.py
@@ -43,7 +43,7 @@ def __init__(self,
                  model_path,
                  max_det=100,
                  score_thresh=0.5,
-                 use_trt_nms =False,
+                 use_trt_efficientnms=False,
                  model_config=None):
         """
         init model
@@ -61,7 +61,7 @@ def __init__(self,
             'blade')
 
         self.use_blade = model_path.endswith('blade')
-        self.use_trt_nms = use_trt_nms
+        self.use_trt_efficientnms = use_trt_efficientnms
 
         if self.use_blade:
             import torch_blade
@@ -178,20 +178,17 @@ def predict(self, input_data_list, batch_size=-1, to_numpy=True):
                 img = np.asarray(img)
 
             ori_img_shape = img.shape[:2]
-            speed_test=1
             if self.end2end:
                 # the input should also be as the type of uint8 as mmcv
                 img = torch.from_numpy(img).to(self.device)
-                if self.use_trt_nms:
-                    for i in range(speed_test):
-                        tmp_out = self.model(img)
+                if self.use_trt_efficientnms:
+                    tmp_out = self.model(img)
                     det_out={}
                     det_out['detection_boxes']=tmp_out[1]
                     det_out['detection_scores']=tmp_out[2]
                     det_out['detection_classes']=tmp_out[3]
                 else:
-                    for i in range(speed_test):
-                        det_out = self.model(img)
+                    det_out = self.model(img)
                     
                 detection_scores = det_out['detection_scores']
 
@@ -216,10 +213,9 @@ def predict(self, input_data_list, batch_size=-1, to_numpy=True):
                 data_dict.pop('img')
 
                 if self.traceable:
-                    if self.use_trt_nms:
+                    if self.use_trt_efficientnms:
                         with torch.no_grad():
-                            for i in range(speed_test):
-                                tmp_out = self.model(img)
+                            tmp_out = self.model(img)
                             det_out={}
                             det_out['detection_boxes']=tmp_out[1]
                             det_out['detection_scores']=tmp_out[2]
diff --git a/easycv/toolkit/blade/__init__.py b/easycv/toolkit/blade/__init__.py
index 6dbae0b1..ca94a3b4 100644
--- a/easycv/toolkit/blade/__init__.py
+++ b/easycv/toolkit/blade/__init__.py
@@ -1 +1,2 @@
 from .cv_blade_utils import blade_env_assert, blade_optimize
+from .trt_plugin_utils import create_tensorrt_efficientnms
\ No newline at end of file
diff --git a/easycv/toolkit/blade/trt_plugin_utils.py b/easycv/toolkit/blade/trt_plugin_utils.py
new file mode 100644
index 00000000..70df50e5
--- /dev/null
+++ b/easycv/toolkit/blade/trt_plugin_utils.py
@@ -0,0 +1,164 @@
+# This is a TensorRT Plugin Python Wrapper Link implementation, original plugin documents refers to
+# https://github.com/NVIDIA/TensorRT/tree/main/plugin/
+# We use python wrapper to build ONNX-TRTPlugin Engine and then wrapper as a jit script module, after this, 
+# we could replace some original model's OP with this plugin during Blade Export to speed up those are not 
+# well optimized by original Blade
+# Here we provide a TRTPlugin-EfficientNMS implementation 
+
+import torch
+from torch import nn
+
+class TRT8_NMS(torch.autograd.Function):
+    '''TensorRT NMS operation'''
+
+    @staticmethod
+    def forward(
+        ctx,
+        boxes,
+        scores,
+        background_class=-1,
+        box_coding=1,
+        iou_threshold=0.45,
+        max_output_boxes=100,
+        plugin_version='1',
+        score_activation=0,
+        score_threshold=0.25,
+    ):
+        batch_size, num_boxes, num_classes = scores.shape
+        num_det = torch.randint(
+            0, max_output_boxes, (batch_size, 1), dtype=torch.int32)
+        det_boxes = torch.randn(batch_size, max_output_boxes, 4)
+        det_scores = torch.randn(batch_size, max_output_boxes)
+        det_classes = torch.randint(
+            0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32)
+        return num_det, det_boxes, det_scores, det_classes
+
+    @staticmethod
+    def symbolic(g,
+                 boxes,
+                 scores,
+                 background_class=-1,
+                 box_coding=1,
+                 iou_threshold=0.45,
+                 max_output_boxes=100,
+                 plugin_version='1',
+                 score_activation=0,
+                 score_threshold=0.25):
+        out = g.op(
+            'TRT::EfficientNMS_TRT',
+            boxes,
+            scores,
+            background_class_i=background_class,
+            box_coding_i=box_coding,
+            iou_threshold_f=iou_threshold,
+            max_output_boxes_i=max_output_boxes,
+            plugin_version_s=plugin_version,
+            score_activation_i=score_activation,
+            score_threshold_f=score_threshold,
+            outputs=4)
+        nums, boxes, scores, classes = out
+        return nums, boxes, scores, classes
+
+class ONNX_TRT8(nn.Module):
+    '''onnx module with TensorRT NMS operation.'''
+
+    def __init__(self,
+                 max_obj=100,
+                 iou_thres=0.45,
+                 score_thres=0.25,
+                 max_wh=None,
+                 device=None):
+        super().__init__()
+        assert max_wh is None
+        self.device = device if device else torch.device('cpu')
+        self.background_class = -1,
+        self.box_coding = 1,
+        self.iou_threshold = iou_thres
+        self.max_obj = max_obj
+        self.plugin_version = '1'
+        self.score_activation = 0
+        self.score_threshold = score_thres
+
+    def forward(self, x):
+        box = x[:, :, :4]
+        conf = x[:, :, 4:5]
+        score = x[:, :, 5:]
+        score *= conf
+        num_det, det_boxes, det_scores, det_classes = TRT8_NMS.apply(
+            box, score, self.background_class, self.box_coding,
+            self.iou_threshold, self.max_obj, self.plugin_version,
+            self.score_activation, self.score_threshold)
+        return num_det, det_boxes, det_scores, det_classes
+
+def create_tensorrt_efficientnms(example_scores,
+                                iou_thres=0.45,
+                                score_thres=0.25):
+    """
+    
+    """
+    from torch_blade import tensorrt
+    import torch_blade._torch_blade._backends as backends
+    import io
+
+    model = torch.jit.trace(
+        ONNX_TRT8(iou_thres=iou_thres, score_thres=score_thres),
+        example_scores)
+    example_outputs = model(example_scores)
+
+    input_names = ['input']
+    output_names = [
+        'num_det', 'detection_boxes', 'detection_scores', 'detection_classes'
+    ]
+    with io.BytesIO() as onnx_proto_f:
+        torch.onnx.export(
+            model,
+            example_scores,
+            onnx_proto_f,
+            input_names=input_names,
+            output_names=output_names,
+            example_outputs=example_outputs)
+        onnx_proto = onnx_proto_f.getvalue()
+
+    def _copy_meta(data, name, dtype, sizes):
+        data.name = name
+        if dtype.is_floating_point:
+            data.dtype = 'Float'
+        else:
+            data.dtype = 'Int'
+        data.sizes = sizes
+        return data
+
+    state = backends.EngineState()
+    state.inputs = [
+        _copy_meta(backends.TensorInfo(), name, tensor.dtype,
+                   list(tensor.shape))
+        for name, tensor in zip(input_names, [example_scores])
+    ]
+    state.outputs = [
+        _copy_meta(backends.TensorInfo(), name, tensor.dtype, [])
+        for name, tensor in zip(output_names, example_outputs)
+    ]
+    state = tensorrt.cvt_onnx_to_tensorrt(onnx_proto, state, [], dict())
+
+    class Model(torch.nn.Module):
+
+        def __init__(self, state):
+            super().__init__()
+            self._trt_engine_ext = backends.create_engine(state)
+
+        def forward(self, x):
+            return self._trt_engine_ext.execute([x])
+
+    trt_ext = torch.jit.script(Model(state))
+    return trt_ext
+
+
+if __name__ == '__main__':
+    bs = 32
+    num_boxes = 100
+    num_classes = 2
+    example_scores = torch.randn([bs, num_boxes, 4 + 1 + num_classes],
+                                 dtype=torch.float32)
+    trt_ext = create_tensorrt_postprocess(example_scores)
+    out = trt_ext.forward(example_scores)
+    print(out)

From 525f7c059691efbba5aa9a5df42f3a7a649d9495 Mon Sep 17 00:00:00 2001
From: wuziheng <zhoulou.wzh@alibaba-inc.com>
Date: Tue, 16 Aug 2022 20:40:39 +0800
Subject: [PATCH 45/69] split preprocess from end2end+blade, speedup from
 17ms->7.2ms

---
 easycv/apis/export.py                         | 105 ++++++++++++------
 .../models/backbones/repvgg_yolox_backbone.py |   2 +-
 .../models/detection/detectors/yolox/yolox.py |   4 +-
 easycv/predictors/detector.py                 |  31 ++++--
 easycv/toolkit/blade/cv_blade_utils.py        |  24 ++--
 numeric_test.py                               |  70 ++++--------
 6 files changed, 132 insertions(+), 104 deletions(-)

diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index 52c05c4c..b45b0f64 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -2,6 +2,7 @@
 import copy
 import json
 import logging
+import cv2
 from collections import OrderedDict
 from distutils.version import LooseVersion
 from typing import Callable, Dict, List, Optional, Tuple
@@ -18,7 +19,7 @@
 from easycv.utils.checkpoint import load_checkpoint
 
 __all__ = [
-    'export', 'PreProcess', 'DetPostProcess', 'End2endModelExportWrapper'
+    'export', 'PreProcess', 'DetPostProcess', 'End2endModelExportWrapper', 'reparameterize_models'
 ]
 
 
@@ -34,13 +35,11 @@ def reparameterize_models(model):
         if isinstance(layer, RepVGGBlock):
             reparameterize_count += 1
             layer.switch_to_deploy()
-    logging.info(
+    logging.warning(
         'export : PAI-export reparameterize_count(RepVGGBlock, ) switch to deploy with {} blocks'
         .format(reparameterize_count))
-    print('reparam:', reparameterize_count)
     return model
 
-
 def export(cfg, ckpt_path, filename):
     """ export model for inference
 
@@ -55,8 +54,6 @@ def export(cfg, ckpt_path, filename):
     else:
         cfg.model.backbone.pretrained = False
     
-    model = reparameterize_models(model)
-
     if isinstance(model, MOCO) or isinstance(model, DINO):
         _export_moco(model, cfg, filename)
     elif isinstance(model, MoBY):
@@ -73,7 +70,6 @@ def export(cfg, ckpt_path, filename):
     else:
         _export_common(model, cfg, filename)
 
-
 def _export_common(model, cfg, filename):
     """ export model, add cfg dict to checkpoint['meta']['config'] without process
 
@@ -182,11 +178,11 @@ def _export_yolox(model, cfg, filename):
 
     if hasattr(cfg, 'export') and (getattr(cfg.export, 'use_jit', False) or
                                    getattr(cfg.export, 'export_blade', False)):
-        device = 'cuda' if torch.cuda.is_available() else 'cpu'
 
+        # only when we use jit or blade, we need to reparameterize_models before export        
+        model = reparameterize_models(model)
+        device = 'cuda' if torch.cuda.is_available() else 'cpu'
         model = copy.deepcopy(model)
-        model.eval()
-        model.to(device)
 
         end2end = cfg.export.get('end2end', False)
         if LooseVersion(torch.__version__) < LooseVersion('1.7.0') and end2end:
@@ -194,6 +190,7 @@ def _export_yolox(model, cfg, filename):
 
         batch_size = cfg.export.get('batch_size', 1)
         static_opt = cfg.export.get('static_opt', True)
+        use_blade = getattr(cfg.export, 'export_blade', False)
         use_trt_efficientnms = cfg.export.get('use_trt_efficientnms', False)
         
         # assert image scale and assgin input
@@ -204,6 +201,7 @@ def _export_yolox(model, cfg, filename):
         ), 'Export YoloX predictor config contains img_scale must be (int, int) tuple!'
 
         input = 255 * torch.rand((batch_size, 3) + img_scale)
+        print(input.dtype)
 
         # assert use_trt_efficientnms only happens when static_opt=True
         if static_opt is not True:
@@ -219,22 +217,31 @@ def _export_yolox(model, cfg, filename):
             postprocess_fn= DetPostProcess(max_det=100, score_thresh=0.5)
             # use_trt_efficientnms = detection.boxes.postprocess + DetPostProcess
             if use_trt_efficientnms:
-                logging.info('PAI-YOLOX: use_trt_efficientnms=True during export, we drop DetPostProcess, because trt_efficientnms = detection.boxes.postprocess + DetPostProcess!')
+                logging.warning('PAI-YOLOX: use_trt_efficientnms=True during export, we drop DetPostProcess, because trt_efficientnms = detection.boxes.postprocess + DetPostProcess!')
                 postprocess_fn = None
-        
+            
+            if use_blade:
+                logging.warning('PAI-YOLOX: End2endModelExportWrapper with preprocess_fn can\'t optimize by blade !')
+                preprocess_fn = None
+
+
         # set model use_trt_efficientnms
-        from easycv.toolkit.blade import create_tensorrt_efficientnms
-        if hasattr(model, 'get_nmsboxes_num'):
-            nmsbox_num = model.get_nmsboxes_num(img_scale)
-        else:
-            logging.warning('PAI-YOLOX: use_trt_efficientnms encounter model has no attr named get_nmsboxes_num, use 8400 as default!')
-            nmsbox_num = 8400
+        if use_trt_efficientnms:
+            from easycv.toolkit.blade import create_tensorrt_efficientnms
+            if hasattr(model, 'get_nmsboxes_num'):
+                nmsbox_num = int(model.get_nmsboxes_num(img_scale))
+            else:
+                logging.warning('PAI-YOLOX: use_trt_efficientnms encounter model has no attr named get_nmsboxes_num, use 8400 as default!')
+                nmsbox_num = 8400
 
-        tmp_example_scores = torch.randn([batch_size, nmsbox_num, 4 + 1 + len(cfg.CLASSES)],
-                                 dtype=torch.float32)
-        logging.info('PAI-YOLOX: use_trt_efficientnms with staic shape [{}, {}, {}]'.format(batch_size, nmsbox_num, 4 + 1 + len(cfg.CLASSES)))
-        model.trt_efficientnms = create_tensorrt_efficientnms(example_scores, iou_thres=model.nms_thre, score_thres=model.test_conf)
-        model.use_trt_efficientnms = True
+            tmp_example_scores = torch.randn([batch_size, nmsbox_num, 4 + 1 + len(cfg.CLASSES)],
+                                    dtype=torch.float32)
+            logging.warning('PAI-YOLOX: use_trt_efficientnms with staic shape [{}, {}, {}]'.format(batch_size, nmsbox_num, 4 + 1 + len(cfg.CLASSES)))
+            model.trt_efficientnms = create_tensorrt_efficientnms(tmp_example_scores, iou_thres=model.nms_thre, score_thres=model.test_conf)
+            model.use_trt_efficientnms = True
+
+        model.eval()
+        model.to(device)
 
         model_export = End2endModelExportWrapper(
             model,
@@ -256,22 +263,29 @@ def _export_yolox(model, cfg, filename):
         if getattr(cfg.export, 'export_blade', False):
             blade_config = cfg.export.get(
                 'blade_config',
-                dict(enable_fp16=True, fp16_fallback_op_ratio=0.3))
+                dict(enable_fp16=True, 
+                fp16_fallback_op_ratio=0.3))
 
             from easycv.toolkit.blade import blade_env_assert, blade_optimize
-
             assert blade_env_assert()
 
-            if end2end:
-                # input [b,h,w,c]
-                input = 255 * torch.rand((batch_size, ) + img_scale + (3, ))
-
+            # if end2end:
+            #     input = 255 * torch.rand((batch_size,) + img_scale + (3,))
+            
             yolox_blade = blade_optimize(
-                script_model=model,
+                speed_test_model=model,
                 model=yolox_trace,
-                inputs=(input.to(device), ),
+                inputs=(input.to(device),),
                 blade_config=blade_config,
                 static_opt=static_opt)
+            
+            tpre_input = 255 * torch.rand((batch_size,) + img_scale + (3,))
+            tpre = PreprocessExportWrapper(example_inputs=tpre_input.to(device),
+                preprocess_fn=PreProcess(target_size=img_scale, keep_ratio=True))
+            tpre.eval().to(device)
+            preprocess = torch.jit.script(tpre)
+            with io.open(filename + '.preprocess', 'wb') as prefile:
+                torch.jit.save(preprocess, prefile)
 
             with io.open(filename + '.blade', 'wb') as ofile:
                 torch.jit.save(yolox_blade, ofile)
@@ -545,7 +559,6 @@ def __call__(
                 image (torch.Tensor): image format should be [b, H, W, C]
             """
             input_h, input_w = self.target_size
-            print('img.shape', image.shape)
             image = image.permute(0, 3, 1, 2)
 
             # rgb2bgr
@@ -585,6 +598,9 @@ def __call__(
                 'ori_img_shape': (float(ori_h), float(ori_w)),
                 'img_shape': (float(h), float(w))
             }
+
+            # print('outimg.shape ', out_image.shape)
+
             return out_image, output_info
 
     @torch.jit.script
@@ -704,10 +720,7 @@ def __init__(self,
         self.example_inputs = example_inputs
         self.preprocess_fn = preprocess_fn
         self.postprocess_fn = postprocess_fn
-
-        # if postprocess_fn == None:
-        #     self.model.head.decode_in_inference = False
-
+        print("fucking!!!! : ", self.preprocess_fn , self.postprocess_fn)
         self.trace_model = trace_model
         if self.trace_model:
             self.trace_module()
@@ -722,7 +735,7 @@ def forward(self, image):
 
         with torch.no_grad():
             if self.preprocess_fn is not None:
-                print('before', image.shape)
+                # print('before', image.shape)
                 output = self.preprocess_fn(image)
                 # if multi values ​​are returned, the first one must be image, others ​​are optional,
                 # and others will all be passed into postprocess_fn
@@ -738,3 +751,21 @@ def forward(self, image):
                                                    *preprocess_outputs)
 
         return model_output
+
+
+class PreprocessExportWrapper(torch.nn.Module):
+    def __init__(self,
+                 example_inputs,
+                 preprocess_fn: Optional[Callable] = None) -> None:
+        super().__init__()
+        self.preprocess_fn = preprocess_fn
+
+    def forward(self, image):
+        with torch.no_grad():
+            output = self.preprocess_fn(image)
+            if isinstance(output, tuple):
+                image = output[0]
+                preprocess_outputs = output[1:]
+            else:
+                image = output
+        return image
\ No newline at end of file
diff --git a/easycv/models/backbones/repvgg_yolox_backbone.py b/easycv/models/backbones/repvgg_yolox_backbone.py
index 843858cf..51079f0b 100644
--- a/easycv/models/backbones/repvgg_yolox_backbone.py
+++ b/easycv/models/backbones/repvgg_yolox_backbone.py
@@ -82,7 +82,7 @@ def __init__(self,
                 stride=stride,
                 padding=padding_11,
                 groups=groups)
-            print('RepVGG Block, identity = ', self.rbr_identity)
+            # print('RepVGG Block, identity = ', self.rbr_identity)
 
     def forward(self, inputs):
         if hasattr(self, 'rbr_reparam'):
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index ef95f64b..a5e65d63 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -11,7 +11,6 @@
 from easycv.models.builder import (MODELS, build_backbone, build_head,
                                    build_neck)
 from easycv.models.detection.utils import postprocess
-from easycv.models.detection.detectors.yolox.postprocess import create_tensorrt_postprocess
 
 
 def init_yolo(M):
@@ -54,7 +53,7 @@ def __init__(self,
     def get_nmsboxes_num(self, img_scale=(640, 640)):
         """ Detection neck or head should provide nms box count information
         """
-        if hasattr(self, neck, None) is not None:
+        if getattr(self, 'neck', None) is not None:
             return self.neck.get_nmsboxes_num(img_scale=(640, 640))
         else:
             return self.head.get_nmsboxes_num(img_scale=(640, 640))
@@ -164,6 +163,7 @@ def forward_export(self, img):
         with torch.no_grad():
             fpn_outs = self.backbone(img)
             outputs = self.head(fpn_outs)
+            
             if self.head.decode_in_inference:
                 if self.use_trt_efficientnms:
                     if self.trt_efficientnms is not None:
diff --git a/easycv/predictors/detector.py b/easycv/predictors/detector.py
index f47f00e5..884896a4 100644
--- a/easycv/predictors/detector.py
+++ b/easycv/predictors/detector.py
@@ -22,6 +22,7 @@
 from easycv.utils.constant import CACHE_DIR
 from easycv.utils.mmlab_utils import dynamic_adapt_for_mmlab
 from easycv.utils.registry import build_from_cfg
+from easycv.apis.export import reparameterize_models
 from .builder import PREDICTORS
 from .classifier import TorchClassifier
 
@@ -59,7 +60,6 @@ def __init__(self,
         self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
         self.use_jit = model_path.endswith('jit') or model_path.endswith(
             'blade')
-
         self.use_blade = model_path.endswith('blade')
         self.use_trt_efficientnms = use_trt_efficientnms
 
@@ -75,6 +75,11 @@ def __init__(self,
             'score_thresh'] if 'score_thresh' in model_config else score_thresh
 
         if self.use_jit:
+            preprocess_path = ".".join(model_path.split('.')[:-1] + ['preprocess'])
+            if os.path.exists(preprocess_path):
+                with io.open(preprocess_path, 'rb') as infile:
+                    map_location = 'cpu' if self.device == 'cpu' else 'cuda'
+                    self.preprocess = torch.jit.load(infile, map_location)
             with io.open(model_path, 'rb') as infile:
                 map_location = 'cpu' if self.device == 'cpu' else 'cuda'
                 self.model = torch.jit.load(infile, map_location)
@@ -109,12 +114,15 @@ def __init__(self,
 
             # build model
             self.model = build_model(self.cfg.model)
+
             self.traceable = getattr(self.model, 'trace_able', False)
 
             self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
             map_location = 'cpu' if self.device == 'cpu' else 'cuda'
             self.ckpt = load_checkpoint(
                 self.model, self.model_path, map_location=map_location)
+            
+            self.model = reparameterize_models(self.model)
 
             self.model.to(self.device)
             self.model.eval()
@@ -180,7 +188,12 @@ def predict(self, input_data_list, batch_size=-1, to_numpy=True):
             ori_img_shape = img.shape[:2]
             if self.end2end:
                 # the input should also be as the type of uint8 as mmcv
+
                 img = torch.from_numpy(img).to(self.device)
+                img = img.unsqueeze(0)
+                if hasattr(self, 'preprocess'):
+                    img = self.preprocess(img)
+
                 if self.use_trt_efficientnms:
                     tmp_out = self.model(img)
                     det_out={}
@@ -189,7 +202,7 @@ def predict(self, input_data_list, batch_size=-1, to_numpy=True):
                     det_out['detection_classes']=tmp_out[3]
                 else:
                     det_out = self.model(img)
-                    
+
                 detection_scores = det_out['detection_scores']
 
                 if detection_scores is not None:
@@ -206,12 +219,11 @@ def predict(self, input_data_list, batch_size=-1, to_numpy=True):
                     detection_boxes = detection_boxes.cpu().detach().numpy()
                     detection_classes = detection_classes.cpu().detach().numpy()
             else:
-                data_dict = {'img': img}
-                data_dict = self.pipeline(data_dict)
-                img = data_dict['img']
-                img = torch.unsqueeze(img._data, 0).to(self.device)
-                data_dict.pop('img')
-
+                # data_dict = {'img': img}
+                # data_dict = self.pipeline(data_dict)
+                # img = data_dict['img']
+                # img = torch.unsqueeze(img._data, 0).to(self.device)
+                # data_dict.pop('img')
                 if self.traceable:
                     if self.use_trt_efficientnms:
                         with torch.no_grad():
@@ -232,6 +244,7 @@ def predict(self, input_data_list, batch_size=-1, to_numpy=True):
                             mode='test',
                             img_metas=[data_dict['img_metas']._data])
 
+                # print(det_out)
                 # det_out = det_out[:self.max_det]
                 # scale box to original image scale, this logic has some operation
                 # that can not be traced, see
@@ -249,7 +262,7 @@ def predict(self, input_data_list, batch_size=-1, to_numpy=True):
                 else:
                     detection_boxes = None
                     detection_classes = None
-
+            
             num_boxes = detection_classes.shape[
                 0] if detection_classes is not None else 0
 
diff --git a/easycv/toolkit/blade/cv_blade_utils.py b/easycv/toolkit/blade/cv_blade_utils.py
index 80c33944..c11be26f 100644
--- a/easycv/toolkit/blade/cv_blade_utils.py
+++ b/easycv/toolkit/blade/cv_blade_utils.py
@@ -237,7 +237,7 @@ def check_results(results0, results1):
         logging.error(err)
 
 
-def blade_optimize(script_model,
+def blade_optimize(speed_test_model,
                    model,
                    inputs,
                    blade_config=dict(
@@ -270,16 +270,24 @@ def blade_optimize(script_model,
                 model_inputs=tuple(inputs),
             )
 
+
     if compute_cost:
         results = []
-
         inputs_t = inputs
-        if (inputs_t[0].shape[2] == 3):
-            inputs_t = inputs_t[0].permute(2, 0, 1)
-            inputs_t = (torch.unsqueeze(inputs_t, 0), )
+
+        # end2end model and scripts needs different channel purmulate, encounter this problem only when we use end2end export
+        if (inputs_t[0].shape[-1] == 3):
+            shape_length = len(inputs_t[0].shape)
+            if shape_length == 4:
+                inputs_t = inputs_t[0].permute(0, 3, 1, 2)
+                inputs_t = [inputs_t]
+
+            if shape_length == 3:
+                inputs_t = inputs_t[0].permute(2, 0, 1)
+                inputs_t = (torch.unsqueeze(inputs_t, 0), )
 
         results.append(
-            benchmark(script_model, inputs_t, backend, batch, 'easycv'))
+            benchmark(speed_test_model, inputs_t, backend, batch, 'easycv'))
         results.append(
             benchmark(model, inputs, backend, batch, 'easycv script'))
         results.append(benchmark(opt_model, inputs, backend, batch, 'blade'))
@@ -288,8 +296,8 @@ def blade_optimize(script_model,
         summary = pd.DataFrame(results)
         logging.warning(summary.to_markdown())
 
-    print(opt_model.forward.code)
-    print(opt_model.forward.graph)
+    # print(opt_model.forward.code)
+    # print(opt_model.forward.graph)
     torch.cuda.empty_cache()
     # warm-up
     for k in range(warm_up_time):
diff --git a/numeric_test.py b/numeric_test.py
index b20ec1b9..5c136a23 100644
--- a/numeric_test.py
+++ b/numeric_test.py
@@ -22,58 +22,34 @@ def timeit_context(name):
     elapsedTime = time.time() - startTime
     print('[{}] finished in {} ms'.format(name, int(elapsedTime * 1000)))
 
+
+def model_speed_test(name, img, use_trt_efficientnms=False):
+    pred = TorchYoloXPredictor(name, use_trt_efficientnms=use_trt_efficientnms)
+    for i in range(10):
+        m0 = pred.predict([img])
+    with timeit_context('{} speed test'.format(name)):
+        for i in range(100):
+            m0 = pred.predict([img]) 
+    print(m0[0]['detection_classes'])
+    print(m0[0]['detection_scores'])
+
+
 if __name__=='__main__':
     if 1:
         img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000254016.jpg'
         from easycv.predictors import TorchYoloXPredictor
         img = Image.open(img_path)
 
-        pred = TorchYoloXPredictor('models/output.pt')
-        m = pred.predict([img])
-        print("fucking m :", m)
-
-        pred0 = TorchYoloXPredictor('models/output_bs1.blade.jit')
-        for i in range(10):
-            m0 = pred0.predict([img])
-        with timeit_context('m0 speed test'):
-            for i in range(100):
-                m0 = pred0.predict([img]) 
-        print("fucking m0:", m0)
+        # model_speed_test('models/output_bs1_e2e_f005.blade.jit', img)
+        model_speed_test('models/output_bs1_e2e_f005_trtnms.blade.blade', img, True)
+        # model_speed_test('models/output_bs1_e2e_noblade.pt', img)
+        # model_speed_test('models/output_bs1_e2e_noblade_trtnms.pt', img)
+        # model_speed_test('models/output_bs1_noe2e_noblade.pt', img)
+        # model_speed_test('models/output_bs1_noe2e_noblade_trtnms.pt', img)
         
-        pred1 = TorchYoloXPredictor('models/output_bs1_e2e.blade.jit')
-        for i in range(10):
-            m1 = pred1.predict([img])
-        with timeit_context('m1 speed test'):
-            for i in range(100):
-                m1 = pred1.predict([img])       
-        print("fucking m1:", m1)
-
-        # pred2 = TorchYoloXPredictor('models/output_bs1_e2e.blade.jit')
-        # m2 = pred2.predict([img])
-        # print("fucking m2:", m2)
-
-        # pred3 = TorchYoloXPredictor('models/output_bs1_e2e_f005.blade.jit')
-        # m3 = pred3.predict([img])
-        # print("fucking m3:", m3)
-
-        # pred4 = TorchYoloXPredictor('models/output_trtnms.pt')
-        # m4 = pred4.predict([img]) 
-        # print("fucking m4:", m4)
-
-        pred5 = TorchYoloXPredictor(model_path='models/output_bs1_noe2e_f005_trtnms.blade.blade', use_trt_nms=True)
-        # m5 = pred5.predict([img]) 
-        for i in range(10):
-            m5 = pred5.predict([img])
-        with timeit_context('m5 speed test'):
-            for i in range(100):
-                m5 = pred5.predict([img])     
-        print("fucking m5:", m5)
+        # model_speed_test('models/output_bs1_e2e_f005_trtnms.blade.jit', img, True)
+        # model_speed_test('models/output_bs1_noe2e_f030.blade.jit', img, False)
+        # model_speed_test('models/output_bs1_noe2e_f030.blade.jit', img, False)
 
-        pred6 = TorchYoloXPredictor(model_path='models/output_bs1_e2e_f005_trtnms.blade.blade', use_trt_nms=True)
-        # m5 = pred5.predict([img]) 
-        for i in range(10):
-            m6 = pred6.predict([img])
-        with timeit_context('m6 speed test'):
-            for i in range(100):
-                m6 = pred5.predict([img])     
-        print("fucking m6:", m6)
\ No newline at end of file
+        # model_speed_test('models/output_bs1_e2e_f005_trtnms.blade.jit', img, False)
+        # model_speed_test('models/output_bs1_e2e_f005.blade.jit', img, False)

From cfde0cddb84cf88b63d7de2cbf49dc8d70a0931f Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Tue, 16 Aug 2022 23:29:32 +0800
Subject: [PATCH 46/69] fix cr bug

---
 .../yolox/yolox_s_8xb16_300e_coco.py          |   2 +-
 .../yolox_s_8xb16_300e_coco_asff_reptood2.py  | 202 ++++++++++++++++++
 .../yolox_s_8xb16_300e_coco_asff_tood3.py     |   5 +-
 .../yolox_s_8xb16_300e_coco_asff_tood6.py     | 198 +++++++++++++++++
 .../yolox/yolox_s_8xb16_300e_coco_rep.py      |   2 +-
 .../yolox/yolox_s_8xb16_300e_coco_reptood3.py | 199 +++++++++++++++++
 .../yolox_s_8xb16_300e_coco_tood3_rep.py      | 196 +++++++++++++++++
 convert_new.py                                | 106 +++++++++
 .../models/backbones/repvgg_yolox_backbone.py |  46 +---
 .../detection/detectors/yolox/tood_head.py    |   4 +-
 .../detection/detectors/yolox/yolo_head.py    |   4 +-
 .../detectors/yolox/yolo_head_template.py     |  17 +-
 easycv/models/detection/utils/boxes.py        |   7 +-
 easycv/models/loss/iou_loss.py                |  12 +-
 easycv/utils/mmlab_utils.py                   |   2 +-
 tools/eval.py                                 |  23 ++
 16 files changed, 962 insertions(+), 63 deletions(-)
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood6.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_reptood3.py
 create mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3_rep.py
 create mode 100644 convert_new.py

diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index 266763ae..29b20ecf 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -192,4 +192,4 @@
         # dict(type='WandbLoggerHookV2'),
     ])
 
-export = dict(use_jit=False, export_blade=False, end2end=False)
+export = dict(use_jit=True, export_blade=True, end2end=False, batch_size=32)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py
new file mode 100644
index 00000000..016e3ee1
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py
@@ -0,0 +1,202 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    test_conf=0.01,
+    nms_thre=0.65,
+    backbone=dict(
+        type='YOLOPAFPN',
+        backbone='RepVGGYOLOX',
+        model_type='s',  # s m l x tiny nano
+        use_att='ASFF',
+        asff_channel=16,
+        neck='yolo'),
+    head=dict(
+        type='TOODHead',
+        model_type='s',
+        obj_loss_type='BCE',
+        reg_loss_type='giou',
+        num_classes=80,
+        conv_type='repconv',
+        la_down_rate=8,
+        stacked_convs=2
+    ))
+
+# s m l x
+img_scale = (672, 672)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/root/workspace/data/coco/'
+data_root = '/apsara/xinyi.zxy/data/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True, export_blade=True, end2end=False, batch_size=32)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
index 9f36a085..e49f3afc 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
@@ -45,9 +45,10 @@
 ]
 
 # dataset settings
-data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
+# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
 # data_root = '/mnt/data/nas/data/detection/coco/'
 # data_root = '/root/workspace/data/coco/'
+data_root = '/apsara/xinyi.zxy/data/coco/'
 
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
@@ -193,4 +194,4 @@
         # dict(type='WandbLoggerHookV2'),
     ])
 
-export = dict(use_jit=False, export_blade=False, end2end=False)
+export = dict(use_jit=True, export_blade=True, end2end=False, batch_size=32)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood6.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood6.py
new file mode 100644
index 00000000..c59f811c
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood6.py
@@ -0,0 +1,198 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    test_conf=0.01,
+    nms_thre=0.65,
+    backbone=dict(
+        type='YOLOPAFPN',
+        backbone='RepVGGYOLOX',
+        model_type='s',  # s m l x tiny nano
+        use_att='ASFF',
+        neck='yolo'),
+    head=dict(
+        type='TOODHead',
+        model_type='s',
+        obj_loss_type='BCE',
+        reg_loss_type='giou',
+        stacked_convs=6,
+        num_classes=80))
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/root/workspace/data/coco/'
+data_root = '/apsara/xinyi.zxy/data/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True, export_blade=True, end2end=False, batch_size=32)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
index e9f4646f..ca26086e 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
@@ -46,7 +46,7 @@
 
 # dataset settings
 # data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
+data_root = '/apsara/xinyi.zxy/data/coco/'
 
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_reptood3.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_reptood3.py
new file mode 100644
index 00000000..af904214
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_reptood3.py
@@ -0,0 +1,199 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    test_conf=0.01,
+    nms_thre=0.65,
+    backbone=dict(
+        type='YOLOPAFPN',
+        backbone='RepVGGYOLOX',
+        model_type='s',  # s m l x tiny nano
+        # use_att='ASFF',
+        neck='yolo'),
+    head=dict(
+        type='TOODHead',
+        model_type='s',
+        obj_loss_type='BCE',
+        reg_loss_type='giou',
+        num_classes=80,
+        conv_type='repconv'
+    ))
+
+# s m l x
+img_scale = (672, 672)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
+# data_root = '/mnt/data/nas/data/detection/coco/'
+# data_root = '/root/workspace/data/coco/'
+data_root = '/apsara/xinyi.zxy/data/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=True, export_blade=True, end2end=False, batch_size=32)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3_rep.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3_rep.py
new file mode 100644
index 00000000..8e14d8d4
--- /dev/null
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3_rep.py
@@ -0,0 +1,196 @@
+_base_ = '../../base.py'
+
+# model settings s m l x
+model = dict(
+    type='YOLOX',
+    test_conf=0.01,
+    nms_thre=0.65,
+    backbone=dict(
+        type='YOLOPAFPN',
+        backbone='RepVGGYOLOX',
+        model_type='s',  # s m l x tiny nano
+        use_att=None,
+        neck='yolo'),
+    head=dict(
+        type='TOODHead',
+        model_type='s',
+        obj_loss_type='BCE',
+        reg_loss_type='giou',
+        conv_type='repconv',
+        num_classes=80))
+
+# s m l x
+img_scale = (640, 640)
+random_size = (14, 26)
+scale_ratio = (0.1, 2)
+
+# tiny nano without mixup
+# img_scale = (416, 416)
+# random_size = (10, 20)
+# scale_ratio = (0.5, 1.5)
+
+CLASSES = [
+    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
+    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
+    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
+    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
+    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
+    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
+    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
+    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
+    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
+    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
+    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
+    'hair drier', 'toothbrush'
+]
+
+# dataset settings
+# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
+data_root = '/apsarapangu/disk6/xinyi.zxy/coco/'
+
+img_norm_cfg = dict(
+    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+
+train_pipeline = [
+    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
+    dict(
+        type='MMRandomAffine',
+        scaling_ratio_range=scale_ratio,
+        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
+    dict(
+        type='MMMixUp',  # s m x l; tiny nano will detele
+        img_scale=img_scale,
+        ratio_range=(0.8, 1.6),
+        pad_val=114.0),
+    dict(
+        type='MMPhotoMetricDistortion',
+        brightness_delta=32,
+        contrast_range=(0.5, 1.5),
+        saturation_range=(0.5, 1.5),
+        hue_delta=18),
+    dict(type='MMRandomFlip', flip_ratio=0.5),
+    dict(type='MMResize', keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
+]
+test_pipeline = [
+    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
+    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
+    dict(type='MMNormalize', **img_norm_cfg),
+    dict(type='DefaultFormatBundle'),
+    dict(type='Collect', keys=['img'])
+]
+
+train_dataset = dict(
+    type='DetImagesMixDataset',
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_train2017.json',
+        img_prefix=data_root + 'train2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=True,
+        iscrowd=False),
+    pipeline=train_pipeline,
+    dynamic_scale=img_scale)
+
+val_dataset = dict(
+    type='DetImagesMixDataset',
+    imgs_per_gpu=2,
+    data_source=dict(
+        type='DetSourceCoco',
+        ann_file=data_root + 'annotations/instances_val2017.json',
+        img_prefix=data_root + 'val2017/',
+        pipeline=[
+            dict(type='LoadImageFromFile', to_float32=True),
+            dict(type='LoadAnnotations', with_bbox=True)
+        ],
+        classes=CLASSES,
+        filter_empty_gt=False,
+        test_mode=True,
+        iscrowd=True),
+    pipeline=test_pipeline,
+    dynamic_scale=None,
+    label_padding=False)
+
+data = dict(
+    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
+
+# additional hooks
+interval = 10
+custom_hooks = [
+    dict(
+        type='YOLOXModeSwitchHook',
+        no_aug_epochs=15,
+        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
+        priority=48),
+    dict(
+        type='SyncRandomSizeHook',
+        ratio_range=random_size,
+        img_scale=img_scale,
+        interval=interval,
+        priority=48),
+    dict(
+        type='SyncNormHook',
+        num_last_epochs=15,
+        interval=interval,
+        priority=48)
+]
+
+# evaluation
+eval_config = dict(
+    interval=10,
+    gpu_collect=False,
+    visualization_config=dict(
+        vis_num=10,
+        score_thr=0.5,
+    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
+)
+eval_pipelines = [
+    dict(
+        mode='test',
+        data=data['val'],
+        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
+    )
+]
+
+checkpoint_config = dict(interval=interval)
+
+# optimizer
+optimizer = dict(
+    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
+optimizer_config = {}
+
+# learning policy
+lr_config = dict(
+    policy='YOLOX',
+    warmup='exp',
+    by_epoch=False,
+    warmup_by_epoch=True,
+    warmup_ratio=1,
+    warmup_iters=5,  # 5 epoch
+    num_last_epochs=15,
+    min_lr_ratio=0.05)
+
+# exponetial model average
+ema = dict(decay=0.9998)
+
+# runtime settings
+total_epochs = 300
+
+# yapf:disable
+log_config = dict(
+    interval=100,
+    hooks=[
+        dict(type='TextLoggerHook'),
+        dict(type='TensorboardLoggerHookV2'),
+        # dict(type='WandbLoggerHookV2'),
+    ])
+
+export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/convert_new.py b/convert_new.py
new file mode 100644
index 00000000..d7cd31b8
--- /dev/null
+++ b/convert_new.py
@@ -0,0 +1,106 @@
+# conver to new
+from easycv.utils.checkpoint import load_checkpoint
+from easycv.utils.config_tools import (CONFIG_TEMPLATE_ZOO,
+                                       mmcv_config_fromfile, rebuild_config)
+import torch
+from easycv.models import build_model
+
+
+if __name__=='__main__':
+    # cfg_path = '/apsara/xinyi.zxy/code/pr154/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py'
+    cfg_path = '/apsara/xinyi.zxy/code/pr154/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py'
+    cfg = mmcv_config_fromfile(cfg_path)
+    model = build_model(cfg.model)
+    print(model)
+
+    # ckpt_path = '/apsara/xinyi.zxy/pretrain/asff_tood3/epoch_300_new.pth'
+    ckpt_path = '/apsara/xinyi.zxy/pretrain/ab_study/yolox_asff_reptood2.pth'
+    model_ckpt = torch.load(ckpt_path)
+    pretrain_model_state = model_ckpt['state_dict']
+
+    # model.load_state_dict(pretrain_model_state)
+    #
+    # exit()
+
+    model_state_dict = model.state_dict()  # ??model?key
+
+    # of1 = open('new.txt','w')
+    # for key in model_state_dict.keys():
+    #     of1.writelines(key+'\n')
+    #
+    # of2 = open('pre.txt', 'w')
+    # for key in pretrain_model_state.keys():
+    #     of2.writelines(key + '\n')
+
+    key_ori = [
+        'backbone.stem',
+        'ERBlock_2.0',
+        'ERBlock_2.1.conv1',
+        'ERBlock_2.1.block.0',
+        'ERBlock_3.0',
+        'ERBlock_3.1.conv1',
+        'ERBlock_3.1.block.0',
+        'ERBlock_3.1.block.1',
+        'ERBlock_3.1.block.2',
+        'ERBlock_4.0',
+        'ERBlock_4.1.conv1',
+        'ERBlock_4.1.block.0',
+        'ERBlock_4.1.block.1',
+        'ERBlock_4.1.block.2',
+        'ERBlock_4.1.block.3',
+        'ERBlock_4.1.block.4',
+        'ERBlock_5.0',
+        'ERBlock_5.1.conv1',
+        'ERBlock_5.1.block.0',
+        'ERBlock_5.2'
+    ]
+
+    key_new = [
+        'backbone.stage0',
+        'stage1.0',
+        'stage1.1',
+        'stage1.2',
+        'stage2.0',
+        'stage2.1',
+        'stage2.2',
+        'stage2.3',
+        'stage2.4',
+        'stage3.0',
+        'stage3.1',
+        'stage3.2',
+        'stage3.3',
+        'stage3.4',
+        'stage3.5',
+        'stage3.6',
+        'stage4.0',
+        'stage4.1',
+        'stage4.2',
+        'stage4.3'
+    ]
+
+    print(len(key_ori)==len(key_new))
+
+    for i, key in enumerate(pretrain_model_state):
+        find = False
+        for t_i, t_k in enumerate(key_ori):
+            if t_k in key:
+                find = True
+                break
+        if find:
+            model_state_dict[key.replace(t_k,key_new[t_i])] = pretrain_model_state[key]
+        else:
+            model_state_dict[key] = pretrain_model_state[key]
+
+    model.load_state_dict(model_state_dict)
+
+    model_ckpt['state_dict'] = model_state_dict
+    ckpt_path_new = '/apsara/xinyi.zxy/pretrain/ab_study/yolox_asff_reptood2_new.pth'
+    torch.save(model_ckpt, ckpt_path_new)
+
+    # load
+    model_ckpt_new = torch.load(ckpt_path_new)
+    pretrain_model_state_new = model_ckpt_new['state_dict']
+
+    model.load_state_dict(pretrain_model_state_new)
+    #
+    # exit()
\ No newline at end of file
diff --git a/easycv/models/backbones/repvgg_yolox_backbone.py b/easycv/models/backbones/repvgg_yolox_backbone.py
index 843858cf..2b9ffd35 100644
--- a/easycv/models/backbones/repvgg_yolox_backbone.py
+++ b/easycv/models/backbones/repvgg_yolox_backbone.py
@@ -31,7 +31,7 @@ class RepVGGBlock(nn.Module):
     def __init__(self,
                  in_channels,
                  out_channels,
-                 kernel_size=3,
+                 ksize=3,
                  stride=1,
                  padding=1,
                  dilation=1,
@@ -44,10 +44,10 @@ def __init__(self,
         self.groups = groups
         self.in_channels = in_channels
 
-        assert kernel_size == 3
+        assert ksize == 3
         assert padding == 1
 
-        padding_11 = padding - kernel_size // 2
+        padding_11 = padding - ksize // 2
 
         self.nonlinearity = nn.ReLU()
         self.se = nn.Identity()
@@ -56,7 +56,7 @@ def __init__(self,
             self.rbr_reparam = nn.Conv2d(
                 in_channels=in_channels,
                 out_channels=out_channels,
-                kernel_size=kernel_size,
+                kernel_size=ksize,
                 stride=stride,
                 padding=padding,
                 dilation=dilation,
@@ -71,7 +71,7 @@ def __init__(self,
             self.rbr_dense = conv_bn(
                 in_channels=in_channels,
                 out_channels=out_channels,
-                kernel_size=kernel_size,
+                kernel_size=ksize,
                 stride=stride,
                 padding=padding,
                 groups=groups)
@@ -319,7 +319,7 @@ def __init__(
         self.stage0 = RepVGGBlock(
             in_channels=in_channels,
             out_channels=channels_list[0],
-            kernel_size=3,
+            ksize=3,
             stride=2)
         self.stage1 = self._make_stage(channels_list[0], channels_list[1],
                                        num_repeats[1])
@@ -339,7 +339,7 @@ def _make_stage(self,
         blocks = []
         blocks.append(
             RepVGGBlock(
-                in_channels, out_channels, kernel_size=3, stride=stride))
+                in_channels, out_channels, ksize=3, stride=stride))
         for i in range(repeat):
             blocks.append(RepVGGBlock(out_channels, out_channels))
         if add_ppf:
@@ -357,34 +357,4 @@ def forward(self, x):
         outputs.append(x)
         x = self.stage4(x)
         outputs.append(x)
-        return tuple(outputs)
-
-
-if __name__ == '__main__':
-
-    from torchsummaryX import summary
-
-    depth_mul = 0.33
-    width_mul = 0.5
-    num_repeat_backbone = [1, 6, 12, 18, 6]
-    channels_list_backbone = [64, 128, 256, 512, 1024]
-    num_repeat_neck = [12, 12, 12, 12]
-    channels_list_neck = [256, 128, 128, 256, 256, 512]
-    channels = 3
-    num_repeat = [(max(round(i * depth_mul), 1) if i > 1 else i)
-                  for i in (num_repeat_backbone + num_repeat_neck)]
-
-    channels_list = [
-        make_divisible(i * width_mul, 8)
-        for i in (channels_list_backbone + channels_list_neck)
-    ]
-    # model = RepVGGYOLOX(in_channels=channels, channels_list=channels_list, num_repeats=num_repeat)
-    model = RepVGGYOLOX(in_channels=channels, depth=depth_mul, width=width_mul)
-    for layer in model.modules():
-        if isinstance(layer, RepVGGBlock):
-            layer.switch_to_deploy()
-
-    model = model.cuda()
-
-    a = torch.randn(1, 3, 640, 640).cuda()
-    summary(model, a)
+        return tuple(outputs)
\ No newline at end of file
diff --git a/easycv/models/detection/detectors/yolox/tood_head.py b/easycv/models/detection/detectors/yolox/tood_head.py
index ee0a0da9..6d2b28e8 100644
--- a/easycv/models/detection/detectors/yolox/tood_head.py
+++ b/easycv/models/detection/detectors/yolox/tood_head.py
@@ -89,7 +89,7 @@ def __init__(
             strides=[8, 16, 32],
             in_channels=[256, 512, 1024],
             act='silu',
-            depthwise=False,
+            conv_type='conv',
             stage='CLOUD',
             obj_loss_type='BCE',
             reg_loss_type='giou',
@@ -117,7 +117,7 @@ def __init__(
             strides=strides,
             in_channels=in_channels,
             act=act,
-            depthwise=depthwise,
+            conv_type=conv_type,
             stage=stage,
             obj_loss_type=obj_loss_type,
             reg_loss_type=reg_loss_type,
diff --git a/easycv/models/detection/detectors/yolox/yolo_head.py b/easycv/models/detection/detectors/yolox/yolo_head.py
index 28de2cad..d01bc191 100644
--- a/easycv/models/detection/detectors/yolox/yolo_head.py
+++ b/easycv/models/detection/detectors/yolox/yolo_head.py
@@ -16,7 +16,7 @@ def __init__(
         strides=[8, 16, 32],
         in_channels=[256, 512, 1024],
         act='silu',
-        depthwise=False,
+        conv_type='conv',
         stage='CLOUD',
         obj_loss_type='BCE',
         reg_loss_type='giou',
@@ -40,7 +40,7 @@ def __init__(
             strides=strides,
             in_channels=in_channels,
             act=act,
-            depthwise=depthwise,
+            conv_type=conv_type,
             stage=stage,
             obj_loss_type=obj_loss_type,
             reg_loss_type=reg_loss_type,
diff --git a/easycv/models/detection/detectors/yolox/yolo_head_template.py b/easycv/models/detection/detectors/yolox/yolo_head_template.py
index 5aa09c14..016746f4 100644
--- a/easycv/models/detection/detectors/yolox/yolo_head_template.py
+++ b/easycv/models/detection/detectors/yolox/yolo_head_template.py
@@ -11,6 +11,7 @@
 from easycv.models.backbones.network_blocks import BaseConv, DWConv
 from easycv.models.detection.utils import bboxes_iou
 from easycv.models.loss import YOLOX_IOULoss
+from easycv.models.backbones.repvgg_yolox_backbone import RepVGGBlock
 
 
 class YOLOXHead_Template(nn.Module):
@@ -29,7 +30,7 @@ def __init__(self,
                  strides=[8, 16, 32],
                  in_channels=[256, 512, 1024],
                  act='silu',
-                 depthwise=False,
+                 conv_type='conv',
                  stage='CLOUD',
                  obj_loss_type='BCE',
                  reg_loss_type='giou',
@@ -61,7 +62,19 @@ def __init__(self,
         self.obj_preds = nn.ModuleList()
         self.stems = nn.ModuleList()
 
-        Conv = DWConv if depthwise else BaseConv
+
+        default_conv_type_list = ['conv', 'dwconv', 'repconv']
+
+        if conv_type not in default_conv_type_list:
+            logging.warning(
+                'YOLOX-PAI tood head conv_type must in [conv, dwconv, repconv], otherwise we use repconv as default')
+            conv_type = 'repconv'
+        if conv_type == 'conv':
+            Conv = BaseConv
+        if conv_type == 'dwconv':
+            Conv = DWConv
+        if conv_type == 'repconv':
+            Conv = RepVGGBlock
 
         for i in range(len(in_channels)):
             self.stems.append(
diff --git a/easycv/models/detection/utils/boxes.py b/easycv/models/detection/utils/boxes.py
index 9fb9771e..f4fb0d41 100644
--- a/easycv/models/detection/utils/boxes.py
+++ b/easycv/models/detection/utils/boxes.py
@@ -160,8 +160,7 @@ def bbox_overlaps(bboxes1,
                   bboxes2,
                   mode='iou',
                   is_aligned=False,
-                  eps=1e-6,
-                  xyxy=True):
+                  eps=1e-6):
     """Calculate overlap between two set of bboxes.
 
     FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889
@@ -283,10 +282,6 @@ def bbox_overlaps(bboxes1,
     assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)
     assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)
 
-    if not xyxy:
-        bboxes1 = box_cxcywh_to_xyxy(bboxes1)
-        bboxes2 = box_cxcywh_to_xyxy(bboxes2)
-
     # Batch dim must be the same
     # Batch dim: (B1, B2, ... Bn)
     assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
diff --git a/easycv/models/loss/iou_loss.py b/easycv/models/loss/iou_loss.py
index 0cf1ef18..42a6ffce 100644
--- a/easycv/models/loss/iou_loss.py
+++ b/easycv/models/loss/iou_loss.py
@@ -14,7 +14,7 @@
 
 @mmcv.jit(derivate=True, coderize=True)
 @weighted_loss
-def iou_loss(pred, target, linear=False, mode='log', eps=1e-6, xyxy=True):
+def iou_loss(pred, target, linear=False, mode='log', eps=1e-6):
     """IoU loss.
 
     Computing the IoU loss between a set of predicted bboxes and target bboxes.
@@ -40,7 +40,7 @@ def iou_loss(pred, target, linear=False, mode='log', eps=1e-6, xyxy=True):
                       'iou_loss is deprecated, please use "mode=`linear`" '
                       'instead.')
     ious = bbox_overlaps(
-        pred, target, is_aligned=True, xyxy=xyxy).clamp(min=eps)
+        pred, target, is_aligned=True).clamp(min=eps)
     if mode == 'linear':
         loss = 1 - ious
     elif mode == 'square':
@@ -54,7 +54,7 @@ def iou_loss(pred, target, linear=False, mode='log', eps=1e-6, xyxy=True):
 
 @mmcv.jit(derivate=True, coderize=True)
 @weighted_loss
-def giou_loss(pred, target, eps=1e-7, xyxy=True):
+def giou_loss(pred, target, eps=1e-7):
     r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
     Box Regression <https://arxiv.org/abs/1902.09630>`_.
 
@@ -68,7 +68,7 @@ def giou_loss(pred, target, eps=1e-7, xyxy=True):
         Tensor: Loss tensor.
     """
     gious = bbox_overlaps(
-        pred, target, mode='giou', is_aligned=True, eps=eps, xyxy=xyxy)
+        pred, target, mode='giou', is_aligned=True, eps=eps)
     loss = 1 - gious
     return loss
 
@@ -255,7 +255,6 @@ def forward(self,
                 weight=None,
                 avg_factor=None,
                 reduction_override=None,
-                xyxy=True,
                 **kwargs):
         """Forward function.
 
@@ -292,7 +291,6 @@ def forward(self,
             eps=self.eps,
             reduction=reduction,
             avg_factor=avg_factor,
-            xyxy=xyxy,
             **kwargs)
         return loss
 
@@ -312,7 +310,6 @@ def forward(self,
                 weight=None,
                 avg_factor=None,
                 reduction_override=None,
-                xyxy=True,
                 **kwargs):
         if weight is not None and not torch.any(weight > 0):
             if pred.dim() == weight.dim() + 1:
@@ -334,6 +331,5 @@ def forward(self,
             eps=self.eps,
             reduction=reduction,
             avg_factor=avg_factor,
-            xyxy=xyxy,
             **kwargs)
         return loss
diff --git a/easycv/utils/mmlab_utils.py b/easycv/utils/mmlab_utils.py
index bff3040a..db7e94e6 100644
--- a/easycv/utils/mmlab_utils.py
+++ b/easycv/utils/mmlab_utils.py
@@ -14,7 +14,7 @@
 
 try:
     from mmcv.runner.hooks import HOOKS
-    HOOKS._module_dict.pop('YOLOXLrUpdaterHook', None)
+    # HOOKS._module_dict.pop('YOLOXLrUpdaterHook', None)
     from mmdet.models.builder import MODELS as MMMODELS
     from mmdet.models.builder import BACKBONES as MMBACKBONES
     from mmdet.models.builder import NECKS as MMNECKS
diff --git a/tools/eval.py b/tools/eval.py
index ca2167b3..9e1cf19b 100644
--- a/tools/eval.py
+++ b/tools/eval.py
@@ -32,6 +32,27 @@
 from easycv.utils.mmlab_utils import dynamic_adapt_for_mmlab
 
 from easycv.utils.setup_env import setup_multi_processes
+import logging
+from easycv.models.backbones.repvgg_yolox_backbone import RepVGGBlock
+
+
+def reparameterize_models(model):
+    """ reparameterize model for inference, especially forf
+            1. rep conv block : merge 3x3 weight 1x1 weights
+        call module switch_to_deploy recursively
+    Args:
+        model: nn.Module
+    """
+    reparameterize_count = 0
+    for layer in model.modules():
+        if isinstance(layer, RepVGGBlock):
+            reparameterize_count += 1
+            layer.switch_to_deploy()
+    logging.info(
+        'export : PAI-export reparameterize_count(RepVGGBlock, ) switch to deploy with {} blocks'
+        .format(reparameterize_count))
+    print('reparam:', reparameterize_count)
+    return model
 
 
 def parse_args():
@@ -185,6 +206,8 @@ def main():
     print(f'use device {device}')
     checkpoint = load_checkpoint(model, args.checkpoint, map_location=device)
 
+    model = reparameterize_models(model)
+
     model.to(device)
     # if args.fuse_conv_bn:
     #     model = fuse_module(model)

From 600e2c2c23dbdafd6348b284fe8b7fbdd3ebc891 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Wed, 17 Aug 2022 14:48:36 +0800
Subject: [PATCH 47/69] fix ut

---
 .../yolox/yolox_s_8xb16_300e_coco.py          |  2 +-
 .../yolox_s_8xb16_300e_coco_asff_reptood2.py  |  3 +-
 .../yolox/yolox_s_8xb16_300e_coco_reptood3.py |  3 +-
 convert_new.py                                | 66 +++++----------
 easycv/apis/export.py                         | 81 +++++++++++--------
 .../models/backbones/repvgg_yolox_backbone.py |  5 +-
 .../models/detection/detectors/detection.py   |  1 +
 .../detection/detectors/yolox/postprocess.py  |  3 +
 .../models/detection/detectors/yolox/test.py  | 63 ++++++++-------
 .../detection/detectors/yolox/yolo_head.py    |  2 -
 .../detectors/yolox/yolo_head_template.py     | 32 ++++----
 .../models/detection/detectors/yolox/yolox.py | 10 ++-
 easycv/models/detection/utils/boxes.py        |  7 +-
 easycv/models/loss/iou_loss.py                |  6 +-
 easycv/predictors/detector.py                 | 38 ++++-----
 easycv/toolkit/blade/__init__.py              |  2 +-
 easycv/toolkit/blade/cv_blade_utils.py        |  3 -
 easycv/toolkit/blade/trt_plugin_utils.py      | 26 +++---
 easycv/utils/mmlab_utils.py                   |  2 +-
 numeric_test.py                               | 55 -------------
 tests/apis/test_export_blade.py               | 27 ++++---
 21 files changed, 180 insertions(+), 257 deletions(-)
 delete mode 100644 numeric_test.py

diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index 29b20ecf..9a307571 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -192,4 +192,4 @@
         # dict(type='WandbLoggerHookV2'),
     ])
 
-export = dict(use_jit=True, export_blade=True, end2end=False, batch_size=32)
+export = dict(use_jit=True, export_blade=False, end2end=True, batch_size=1)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py
index 016e3ee1..8b4eefba 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py
@@ -20,8 +20,7 @@
         num_classes=80,
         conv_type='repconv',
         la_down_rate=8,
-        stacked_convs=2
-    ))
+        stacked_convs=2))
 
 # s m l x
 img_scale = (672, 672)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_reptood3.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_reptood3.py
index af904214..eea00cdf 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_reptood3.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_reptood3.py
@@ -17,8 +17,7 @@
         obj_loss_type='BCE',
         reg_loss_type='giou',
         num_classes=80,
-        conv_type='repconv'
-    ))
+        conv_type='repconv'))
 
 # s m l x
 img_scale = (672, 672)
diff --git a/convert_new.py b/convert_new.py
index d7cd31b8..b3b0ced1 100644
--- a/convert_new.py
+++ b/convert_new.py
@@ -1,12 +1,12 @@
 # conver to new
+import torch
+
+from easycv.models import build_model
 from easycv.utils.checkpoint import load_checkpoint
 from easycv.utils.config_tools import (CONFIG_TEMPLATE_ZOO,
                                        mmcv_config_fromfile, rebuild_config)
-import torch
-from easycv.models import build_model
-
 
-if __name__=='__main__':
+if __name__ == '__main__':
     # cfg_path = '/apsara/xinyi.zxy/code/pr154/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py'
     cfg_path = '/apsara/xinyi.zxy/code/pr154/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py'
     cfg = mmcv_config_fromfile(cfg_path)
@@ -33,52 +33,23 @@
     #     of2.writelines(key + '\n')
 
     key_ori = [
-        'backbone.stem',
-        'ERBlock_2.0',
-        'ERBlock_2.1.conv1',
-        'ERBlock_2.1.block.0',
-        'ERBlock_3.0',
-        'ERBlock_3.1.conv1',
-        'ERBlock_3.1.block.0',
-        'ERBlock_3.1.block.1',
-        'ERBlock_3.1.block.2',
-        'ERBlock_4.0',
-        'ERBlock_4.1.conv1',
-        'ERBlock_4.1.block.0',
-        'ERBlock_4.1.block.1',
-        'ERBlock_4.1.block.2',
-        'ERBlock_4.1.block.3',
-        'ERBlock_4.1.block.4',
-        'ERBlock_5.0',
-        'ERBlock_5.1.conv1',
-        'ERBlock_5.1.block.0',
-        'ERBlock_5.2'
+        'backbone.stem', 'ERBlock_2.0', 'ERBlock_2.1.conv1',
+        'ERBlock_2.1.block.0', 'ERBlock_3.0', 'ERBlock_3.1.conv1',
+        'ERBlock_3.1.block.0', 'ERBlock_3.1.block.1', 'ERBlock_3.1.block.2',
+        'ERBlock_4.0', 'ERBlock_4.1.conv1', 'ERBlock_4.1.block.0',
+        'ERBlock_4.1.block.1', 'ERBlock_4.1.block.2', 'ERBlock_4.1.block.3',
+        'ERBlock_4.1.block.4', 'ERBlock_5.0', 'ERBlock_5.1.conv1',
+        'ERBlock_5.1.block.0', 'ERBlock_5.2'
     ]
 
     key_new = [
-        'backbone.stage0',
-        'stage1.0',
-        'stage1.1',
-        'stage1.2',
-        'stage2.0',
-        'stage2.1',
-        'stage2.2',
-        'stage2.3',
-        'stage2.4',
-        'stage3.0',
-        'stage3.1',
-        'stage3.2',
-        'stage3.3',
-        'stage3.4',
-        'stage3.5',
-        'stage3.6',
-        'stage4.0',
-        'stage4.1',
-        'stage4.2',
-        'stage4.3'
+        'backbone.stage0', 'stage1.0', 'stage1.1', 'stage1.2', 'stage2.0',
+        'stage2.1', 'stage2.2', 'stage2.3', 'stage2.4', 'stage3.0', 'stage3.1',
+        'stage3.2', 'stage3.3', 'stage3.4', 'stage3.5', 'stage3.6', 'stage4.0',
+        'stage4.1', 'stage4.2', 'stage4.3'
     ]
 
-    print(len(key_ori)==len(key_new))
+    print(len(key_ori) == len(key_new))
 
     for i, key in enumerate(pretrain_model_state):
         find = False
@@ -87,7 +58,8 @@
                 find = True
                 break
         if find:
-            model_state_dict[key.replace(t_k,key_new[t_i])] = pretrain_model_state[key]
+            model_state_dict[key.replace(
+                t_k, key_new[t_i])] = pretrain_model_state[key]
         else:
             model_state_dict[key] = pretrain_model_state[key]
 
@@ -103,4 +75,4 @@
 
     model.load_state_dict(pretrain_model_state_new)
     #
-    # exit()
\ No newline at end of file
+    # exit()
diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index b45b0f64..f462c08e 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -2,11 +2,11 @@
 import copy
 import json
 import logging
-import cv2
 from collections import OrderedDict
 from distutils.version import LooseVersion
 from typing import Callable, Dict, List, Optional, Tuple
 
+import cv2
 import torch
 import torchvision.transforms.functional as t_f
 from mmcv.utils import Config
@@ -19,7 +19,8 @@
 from easycv.utils.checkpoint import load_checkpoint
 
 __all__ = [
-    'export', 'PreProcess', 'DetPostProcess', 'End2endModelExportWrapper', 'reparameterize_models'
+    'export', 'PreProcess', 'DetPostProcess', 'End2endModelExportWrapper',
+    'reparameterize_models'
 ]
 
 
@@ -40,6 +41,7 @@ def reparameterize_models(model):
         .format(reparameterize_count))
     return model
 
+
 def export(cfg, ckpt_path, filename):
     """ export model for inference
 
@@ -53,7 +55,7 @@ def export(cfg, ckpt_path, filename):
         load_checkpoint(model, ckpt_path, map_location='cpu')
     else:
         cfg.model.backbone.pretrained = False
-    
+
     if isinstance(model, MOCO) or isinstance(model, DINO):
         _export_moco(model, cfg, filename)
     elif isinstance(model, MoBY):
@@ -70,6 +72,7 @@ def export(cfg, ckpt_path, filename):
     else:
         _export_common(model, cfg, filename)
 
+
 def _export_common(model, cfg, filename):
     """ export model, add cfg dict to checkpoint['meta']['config'] without process
 
@@ -179,7 +182,7 @@ def _export_yolox(model, cfg, filename):
     if hasattr(cfg, 'export') and (getattr(cfg.export, 'use_jit', False) or
                                    getattr(cfg.export, 'export_blade', False)):
 
-        # only when we use jit or blade, we need to reparameterize_models before export        
+        # only when we use jit or blade, we need to reparameterize_models before export
         model = reparameterize_models(model)
         device = 'cuda' if torch.cuda.is_available() else 'cpu'
         model = copy.deepcopy(model)
@@ -192,7 +195,7 @@ def _export_yolox(model, cfg, filename):
         static_opt = cfg.export.get('static_opt', True)
         use_blade = getattr(cfg.export, 'export_blade', False)
         use_trt_efficientnms = cfg.export.get('use_trt_efficientnms', False)
-        
+
         # assert image scale and assgin input
         img_scale = cfg.get('img_scale', (640, 640))
 
@@ -209,35 +212,46 @@ def _export_yolox(model, cfg, filename):
                 use_trt_efficientnms == False
             ), 'Export YoloX predictor use_trt_efficientnms=True only when use static_opt=True!'
 
-        # ignore DetPostProcess when use_trt_efficientnms 
+        # ignore DetPostProcess when use_trt_efficientnms
         preprocess_fn = None
         postprocess_fn = None
         if end2end:
             preprocess_fn = PreProcess(target_size=img_scale, keep_ratio=True)
-            postprocess_fn= DetPostProcess(max_det=100, score_thresh=0.5)
-            # use_trt_efficientnms = detection.boxes.postprocess + DetPostProcess
+            postprocess_fn = DetPostProcess(max_det=100, score_thresh=0.5)
+
             if use_trt_efficientnms:
-                logging.warning('PAI-YOLOX: use_trt_efficientnms=True during export, we drop DetPostProcess, because trt_efficientnms = detection.boxes.postprocess + DetPostProcess!')
+                logging.warning(
+                    'PAI-YOLOX: use_trt_efficientnms=True during export, we drop DetPostProcess, because trt_efficientnms = detection.boxes.postprocess + DetPostProcess!'
+                )
                 postprocess_fn = None
-            
+
             if use_blade:
-                logging.warning('PAI-YOLOX: End2endModelExportWrapper with preprocess_fn can\'t optimize by blade !')
+                logging.warning(
+                    'PAI-YOLOX: End2endModelExportWrapper with preprocess_fn can\'t optimize by blade !'
+                )
                 preprocess_fn = None
 
-
         # set model use_trt_efficientnms
         if use_trt_efficientnms:
             from easycv.toolkit.blade import create_tensorrt_efficientnms
             if hasattr(model, 'get_nmsboxes_num'):
                 nmsbox_num = int(model.get_nmsboxes_num(img_scale))
             else:
-                logging.warning('PAI-YOLOX: use_trt_efficientnms encounter model has no attr named get_nmsboxes_num, use 8400 as default!')
+                logging.warning(
+                    'PAI-YOLOX: use_trt_efficientnms encounter model has no attr named get_nmsboxes_num, use 8400 as default!'
+                )
                 nmsbox_num = 8400
 
-            tmp_example_scores = torch.randn([batch_size, nmsbox_num, 4 + 1 + len(cfg.CLASSES)],
-                                    dtype=torch.float32)
-            logging.warning('PAI-YOLOX: use_trt_efficientnms with staic shape [{}, {}, {}]'.format(batch_size, nmsbox_num, 4 + 1 + len(cfg.CLASSES)))
-            model.trt_efficientnms = create_tensorrt_efficientnms(tmp_example_scores, iou_thres=model.nms_thre, score_thres=model.test_conf)
+            tmp_example_scores = torch.randn(
+                [batch_size, nmsbox_num, 4 + 1 + len(cfg.CLASSES)],
+                dtype=torch.float32)
+            logging.warning(
+                'PAI-YOLOX: use_trt_efficientnms with staic shape [{}, {}, {}]'
+                .format(batch_size, nmsbox_num, 4 + 1 + len(cfg.CLASSES)))
+            model.trt_efficientnms = create_tensorrt_efficientnms(
+                tmp_example_scores,
+                iou_thres=model.nms_thre,
+                score_thres=model.test_conf)
             model.use_trt_efficientnms = True
 
         model.eval()
@@ -263,25 +277,25 @@ def _export_yolox(model, cfg, filename):
         if getattr(cfg.export, 'export_blade', False):
             blade_config = cfg.export.get(
                 'blade_config',
-                dict(enable_fp16=True, 
-                fp16_fallback_op_ratio=0.3))
+                dict(enable_fp16=True, fp16_fallback_op_ratio=0.3))
 
             from easycv.toolkit.blade import blade_env_assert, blade_optimize
             assert blade_env_assert()
 
-            # if end2end:
-            #     input = 255 * torch.rand((batch_size,) + img_scale + (3,))
-            
+            # optimize model with blade
             yolox_blade = blade_optimize(
                 speed_test_model=model,
                 model=yolox_trace,
-                inputs=(input.to(device),),
+                inputs=(input.to(device), ),
                 blade_config=blade_config,
                 static_opt=static_opt)
-            
-            tpre_input = 255 * torch.rand((batch_size,) + img_scale + (3,))
-            tpre = PreprocessExportWrapper(example_inputs=tpre_input.to(device),
-                preprocess_fn=PreProcess(target_size=img_scale, keep_ratio=True))
+
+            # save preprocess jit model to accelerate the preprocess procedure
+            tpre_input = 255 * torch.rand((batch_size, ) + img_scale + (3, ))
+            tpre = PreprocessExportWrapper(
+                example_inputs=tpre_input.to(device),
+                preprocess_fn=PreProcess(
+                    target_size=img_scale, keep_ratio=True))
             tpre.eval().to(device)
             preprocess = torch.jit.script(tpre)
             with io.open(filename + '.preprocess', 'wb') as prefile:
@@ -564,7 +578,6 @@ def __call__(
             # rgb2bgr
             image = image[:, torch.tensor([2, 1, 0]), :, :]
 
-            # image = torch.unsqueeze(image, 0)
             ori_h, ori_w = image.shape[-2:]
 
             mean = [123.675, 116.28, 103.53]
@@ -599,8 +612,6 @@ def __call__(
                 'img_shape': (float(h), float(w))
             }
 
-            # print('outimg.shape ', out_image.shape)
-
             return out_image, output_info
 
     @torch.jit.script
@@ -720,7 +731,7 @@ def __init__(self,
         self.example_inputs = example_inputs
         self.preprocess_fn = preprocess_fn
         self.postprocess_fn = postprocess_fn
-        print("fucking!!!! : ", self.preprocess_fn , self.postprocess_fn)
+
         self.trace_model = trace_model
         if self.trace_model:
             self.trace_module()
@@ -754,6 +765,11 @@ def forward(self, image):
 
 
 class PreprocessExportWrapper(torch.nn.Module):
+    """
+        split the preprocess that can be wrapped as a preprocess jit model
+        the preproprocess procedure cannot be optimized in an end2end blade model due to dynamic shape problem
+    """
+
     def __init__(self,
                  example_inputs,
                  preprocess_fn: Optional[Callable] = None) -> None:
@@ -765,7 +781,6 @@ def forward(self, image):
             output = self.preprocess_fn(image)
             if isinstance(output, tuple):
                 image = output[0]
-                preprocess_outputs = output[1:]
             else:
                 image = output
-        return image
\ No newline at end of file
+        return image
diff --git a/easycv/models/backbones/repvgg_yolox_backbone.py b/easycv/models/backbones/repvgg_yolox_backbone.py
index 45ef9419..cf029563 100644
--- a/easycv/models/backbones/repvgg_yolox_backbone.py
+++ b/easycv/models/backbones/repvgg_yolox_backbone.py
@@ -338,8 +338,7 @@ def _make_stage(self,
                     add_ppf=False):
         blocks = []
         blocks.append(
-            RepVGGBlock(
-                in_channels, out_channels, ksize=3, stride=stride))
+            RepVGGBlock(in_channels, out_channels, ksize=3, stride=stride))
         for i in range(repeat):
             blocks.append(RepVGGBlock(out_channels, out_channels))
         if add_ppf:
@@ -357,4 +356,4 @@ def forward(self, x):
         outputs.append(x)
         x = self.stage4(x)
         outputs.append(x)
-        return tuple(outputs)
\ No newline at end of file
+        return tuple(outputs)
diff --git a/easycv/models/detection/detectors/detection.py b/easycv/models/detection/detectors/detection.py
index f525d168..fe91fbf8 100644
--- a/easycv/models/detection/detectors/detection.py
+++ b/easycv/models/detection/detectors/detection.py
@@ -8,6 +8,7 @@
 
 @MODELS.register_module
 class Detection(BaseModel):
+
     def __init__(self, backbone, head=None, neck=None, pretrained=True):
         super(Detection, self).__init__()
 
diff --git a/easycv/models/detection/detectors/yolox/postprocess.py b/easycv/models/detection/detectors/yolox/postprocess.py
index a8407be4..e7175086 100644
--- a/easycv/models/detection/detectors/yolox/postprocess.py
+++ b/easycv/models/detection/detectors/yolox/postprocess.py
@@ -3,6 +3,7 @@
 import torch
 from torch import nn
 
+
 class TRT8_NMS(torch.autograd.Function):
     '''TensorRT NMS operation'''
 
@@ -54,6 +55,7 @@ def symbolic(g,
         nums, boxes, scores, classes = out
         return nums, boxes, scores, classes
 
+
 class ONNX_TRT8(nn.Module):
     '''onnx module with TensorRT NMS operation.'''
 
@@ -85,6 +87,7 @@ def forward(self, x):
             self.score_activation, self.score_threshold)
         return num_det, det_boxes, det_scores, det_classes
 
+
 def create_tensorrt_postprocess(example_scores,
                                 iou_thres=0.45,
                                 score_thres=0.25):
diff --git a/easycv/models/detection/detectors/yolox/test.py b/easycv/models/detection/detectors/yolox/test.py
index d0d90b32..e2c8d7e4 100644
--- a/easycv/models/detection/detectors/yolox/test.py
+++ b/easycv/models/detection/detectors/yolox/test.py
@@ -1,21 +1,21 @@
 # from easycv.models.detection.detectors.yolox import YOLOX
-from easycv.models.detection.detectors.yolox.postprocess import create_tensorrt_postprocess
+import sys
+
+import numpy as np
 import torch
+from PIL import Image
 from torchvision.transforms import Compose
 
+from easycv.datasets.registry import PIPELINES
 from easycv.models import build_model
+from easycv.models.detection.detectors.yolox.postprocess import \
+    create_tensorrt_postprocess
+from easycv.models.detection.utils import postprocess
 from easycv.utils.checkpoint import load_checkpoint
 from easycv.utils.config_tools import mmcv_config_fromfile
 from easycv.utils.registry import build_from_cfg
-from easycv.datasets.registry import PIPELINES
-from easycv.models.detection.utils import postprocess
 
-
-import sys
-import numpy as np
-from PIL import Image
-
-if __name__=='__main__':
+if __name__ == '__main__':
     #a = YOLOX(decode_in_inference=False).eval()
     cfg = sys.argv[1]
     ckpt_path = sys.argv[2]
@@ -31,9 +31,10 @@
     pipeline = [build_from_cfg(p, PIPELINES) for p in test_pipeline]
     pipeline = Compose(pipeline)
 
-    # 8400 ishard code, need to reimplement to  sum(img_w / stride_i + img_h /stride_i) 
+    # 8400 ishard code, need to reimplement to  sum(img_w / stride_i + img_h /stride_i)
     example_scores = torch.randn([1, 8400, 85], dtype=torch.float32)
-    trt_ext = create_tensorrt_postprocess(example_scores, iou_thres=model.nms_thre, score_thres=model.test_conf)
+    trt_ext = create_tensorrt_postprocess(
+        example_scores, iou_thres=model.nms_thre, score_thres=model.test_conf)
 
     # img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000129062.jpg'
     img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000254016.jpg'
@@ -51,9 +52,10 @@
     model.decode_in_inference = False
     # print(type(model), model.decode_in_inference)
     c = model.forward_export(img)
-    
+
     # print(type(c), c.shape)
-    print(model.test_conf, model.nms_thre, model.num_classes, model.decode_in_inference)
+    print(model.test_conf, model.nms_thre, model.num_classes,
+          model.decode_in_inference)
     tc = model.head.decode_outputs(c, c[0].type())
     # print(type(tc))
     # print(tc.shape)
@@ -62,25 +64,26 @@
 
     tcback = copy.deepcopy(tc)
 
-    tpa = postprocess(tc, model.num_classes, model.test_conf, model.nms_thre)[0]
+    tpa = postprocess(tc, model.num_classes, model.test_conf,
+                      model.nms_thre)[0]
     # print(tpa)
     tpa[:, 4] = tpa[:, 4] * tpa[:, 5]
     tpa[:, 5] = tpa[:, 6]
     tpa = tpa[:, :6]
     # print("fuck tpa:", len(tpa), tpa[0].shape)
-    box_a = tpa[:,:4]
-    score_a = tpa[:,4]
-    id_a = tpa[:,5]
+    box_a = tpa[:, :4]
+    score_a = tpa[:, 4]
+    id_a = tpa[:, 5]
     # print(tpa)
 
-    # trt_ext must be cuda 
+    # trt_ext must be cuda
     tcback = tcback
     tpb = trt_ext.forward(tcback)
     # print("fuck tpb:",len(tpb))
-     
+
     valid_length = min(len(tpa), tpb[2].shape[1])
     print(valid_length)
-    valid_length = min(valid_length,30)
+    valid_length = min(valid_length, 30)
 
     box_a = box_a[:valid_length]
     score_a = score_a[:valid_length]
@@ -90,16 +93,16 @@
     print(tpb[2].shape)
     print(tpb[3].shape)
 
-    box_b = tpb[1][:,:valid_length,:].cpu().view(box_a.shape)
-    score_b = tpb[2][:,:valid_length].cpu().view(score_a.shape)
-    id_b = tpb[3][:,:valid_length].cpu().view(id_a.shape)
-    
+    box_b = tpb[1][:, :valid_length, :].cpu().view(box_a.shape)
+    score_b = tpb[2][:, :valid_length].cpu().view(score_a.shape)
+    id_b = tpb[3][:, :valid_length].cpu().view(id_a.shape)
+
     def get_diff(input_a, input_b, name='score'):
-        print("name:", name)
-        print("shape:", input_a.shape)
-        print("max_diff  :",torch.max(input_a-input_b))
-        print("avg_diff  :",torch.mean(input_a-input_b))
-        print("totol_diff:",torch.sum(input_a-input_b))
+        print('name:', name)
+        print('shape:', input_a.shape)
+        print('max_diff  :', torch.max(input_a - input_b))
+        print('avg_diff  :', torch.mean(input_a - input_b))
+        print('totol_diff:', torch.sum(input_a - input_b))
 
     get_diff(box_a, box_b, 'box')
     get_diff(score_a, score_b, 'score')
@@ -111,4 +114,4 @@ def get_diff(input_a, input_b, name='score'):
         img = Image.open(img_path)
         pred = TorchYoloXPredictor('models/predict.pt')
         m = pred.predict([img])
-        print(m)
\ No newline at end of file
+        print(m)
diff --git a/easycv/models/detection/detectors/yolox/yolo_head.py b/easycv/models/detection/detectors/yolox/yolo_head.py
index b7a52d4d..d01bc191 100644
--- a/easycv/models/detection/detectors/yolox/yolo_head.py
+++ b/easycv/models/detection/detectors/yolox/yolo_head.py
@@ -46,8 +46,6 @@ def __init__(
             reg_loss_type=reg_loss_type,
             decode_in_inference=decode_in_inference)
 
-
-
     def forward(self, xin, labels=None, imgs=None):
         outputs = []
         origin_preds = []
diff --git a/easycv/models/detection/detectors/yolox/yolo_head_template.py b/easycv/models/detection/detectors/yolox/yolo_head_template.py
index 1476b822..d28c3df1 100644
--- a/easycv/models/detection/detectors/yolox/yolo_head_template.py
+++ b/easycv/models/detection/detectors/yolox/yolo_head_template.py
@@ -12,7 +12,6 @@
 from easycv.models.backbones.repvgg_yolox_backbone import RepVGGBlock
 from easycv.models.detection.utils import bboxes_iou
 from easycv.models.loss import YOLOX_IOULoss
-from easycv.models.backbones.repvgg_yolox_backbone import RepVGGBlock
 
 
 class YOLOXHead_Template(nn.Module):
@@ -63,12 +62,12 @@ def __init__(self,
         self.obj_preds = nn.ModuleList()
         self.stems = nn.ModuleList()
 
-
         default_conv_type_list = ['conv', 'dwconv', 'repconv']
 
         if conv_type not in default_conv_type_list:
             logging.warning(
-                'YOLOX-PAI tood head conv_type must in [conv, dwconv, repconv], otherwise we use repconv as default')
+                'YOLOX-PAI tood head conv_type must in [conv, dwconv, repconv], otherwise we use repconv as default'
+            )
             conv_type = 'repconv'
         if conv_type == 'conv':
             Conv = BaseConv
@@ -88,35 +87,35 @@ def __init__(self,
                 ))
             self.cls_convs.append(
                 nn.Sequential(*[
-                    RepVGGBlock(
+                    Conv(
                         in_channels=int(256 * width),
                         out_channels=int(256 * width),
-                        # ksize=3,
-                        # stride=1,
+                        ksize=3,
+                        stride=1,
                         act=act,
                     ),
-                    RepVGGBlock(
+                    Conv(
                         in_channels=int(256 * width),
                         out_channels=int(256 * width),
-                        # ksize=3,
-                        # stride=1,
+                        ksize=3,
+                        stride=1,
                         act=act,
                     ),
                 ]))
             self.reg_convs.append(
                 nn.Sequential(*[
-                    RepVGGBlock(
+                    Conv(
                         in_channels=int(256 * width),
                         out_channels=int(256 * width),
-                        # ksize=3,
-                        # stride=1,
+                        ksize=3,
+                        stride=1,
                         act=act,
                     ),
-                    RepVGGBlock(
+                    Conv(
                         in_channels=int(256 * width),
                         out_channels=int(256 * width),
-                        # ksize=3,
-                        # stride=1,
+                        ksize=3,
+                        stride=1,
                         act=act,
                     ),
                 ]))
@@ -182,7 +181,8 @@ def get_nmsboxes_num(self, img_scale=(640, 640)):
 
         total_box_count = 0
         for stride in self.strides:
-            total_box_count+= (img_scale[0] / stride) * (img_scale[1] / stride)
+            total_box_count += (img_scale[0] / stride) * (
+                img_scale[1] / stride)
         return total_box_count
 
     @abstractmethod
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index a5e65d63..20ca60d8 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -47,7 +47,7 @@ def __init__(self,
         self.num_classes = self.head.num_classes
         self.test_conf = test_conf
         self.nms_thre = nms_thre
-        self.use_trt_efficientnms = False  # TRT NMS only will be convert during export 
+        self.use_trt_efficientnms = False  # TRT NMS only will be convert during export
         self.trt_efficientnms = None
 
     def get_nmsboxes_num(self, img_scale=(640, 640)):
@@ -163,15 +163,17 @@ def forward_export(self, img):
         with torch.no_grad():
             fpn_outs = self.backbone(img)
             outputs = self.head(fpn_outs)
-            
+
             if self.head.decode_in_inference:
                 if self.use_trt_efficientnms:
                     if self.trt_efficientnms is not None:
                         outputs = self.trt_efficientnms.forward(outputs)
                     else:
-                        logging.error('PAI-YOLOX : using trt_efficientnms set to be True, but model has not attr(trt_efficientnms)')
+                        logging.error(
+                            'PAI-YOLOX : using trt_efficientnms set to be True, but model has not attr(trt_efficientnms)'
+                        )
                 else:
                     outputs = postprocess(outputs, self.num_classes,
-                                        self.test_conf, self.nms_thre)
+                                          self.test_conf, self.nms_thre)
 
         return outputs
diff --git a/easycv/models/detection/utils/boxes.py b/easycv/models/detection/utils/boxes.py
index 7f66a51a..4e22e994 100644
--- a/easycv/models/detection/utils/boxes.py
+++ b/easycv/models/detection/utils/boxes.py
@@ -39,6 +39,7 @@ def bboxes_iou(bboxes_a, bboxes_b, xyxy=True):
 # refer to easycv/models/detection/detectors/yolox/postprocess.py and test.py to rebuild a torch-blade-trtplugin NMS, which is checked by zhoulou in test.py
 # infer docker images is : registry.cn-shanghai.aliyuncs.com/pai-ai-test/eas-service:easycv_blade_181_export
 
+
 def postprocess(prediction, num_classes, conf_thre=0.7, nms_thre=0.45):
     box_corner = prediction.new(prediction.shape)
     box_corner[:, :, 0] = prediction[:, :, 0] - prediction[:, :, 2] / 2
@@ -153,11 +154,7 @@ def generalized_box_iou(boxes1, boxes2):
     return iou - (area - union) / area
 
 
-def bbox_overlaps(bboxes1,
-                  bboxes2,
-                  mode='iou',
-                  is_aligned=False,
-                  eps=1e-6):
+def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):
     """Calculate overlap between two set of bboxes.
 
     FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889
diff --git a/easycv/models/loss/iou_loss.py b/easycv/models/loss/iou_loss.py
index 42a6ffce..8a4af4bb 100644
--- a/easycv/models/loss/iou_loss.py
+++ b/easycv/models/loss/iou_loss.py
@@ -39,8 +39,7 @@ def iou_loss(pred, target, linear=False, mode='log', eps=1e-6):
         warnings.warn('DeprecationWarning: Setting "linear=True" in '
                       'iou_loss is deprecated, please use "mode=`linear`" '
                       'instead.')
-    ious = bbox_overlaps(
-        pred, target, is_aligned=True).clamp(min=eps)
+    ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
     if mode == 'linear':
         loss = 1 - ious
     elif mode == 'square':
@@ -67,8 +66,7 @@ def giou_loss(pred, target, eps=1e-7):
     Return:
         Tensor: Loss tensor.
     """
-    gious = bbox_overlaps(
-        pred, target, mode='giou', is_aligned=True, eps=eps)
+    gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)
     loss = 1 - gious
     return loss
 
diff --git a/easycv/predictors/detector.py b/easycv/predictors/detector.py
index 884896a4..b12f2308 100644
--- a/easycv/predictors/detector.py
+++ b/easycv/predictors/detector.py
@@ -11,6 +11,7 @@
 from torch.hub import load_state_dict_from_url
 from torchvision.transforms import Compose
 
+from easycv.apis.export import reparameterize_models
 from easycv.core.visualization import imshow_bboxes
 from easycv.datasets.registry import PIPELINES
 from easycv.datasets.utils import replace_ImageToTensor
@@ -22,7 +23,6 @@
 from easycv.utils.constant import CACHE_DIR
 from easycv.utils.mmlab_utils import dynamic_adapt_for_mmlab
 from easycv.utils.registry import build_from_cfg
-from easycv.apis.export import reparameterize_models
 from .builder import PREDICTORS
 from .classifier import TorchClassifier
 
@@ -75,7 +75,8 @@ def __init__(self,
             'score_thresh'] if 'score_thresh' in model_config else score_thresh
 
         if self.use_jit:
-            preprocess_path = ".".join(model_path.split('.')[:-1] + ['preprocess'])
+            preprocess_path = '.'.join(
+                model_path.split('.')[:-1] + ['preprocess'])
             if os.path.exists(preprocess_path):
                 with io.open(preprocess_path, 'rb') as infile:
                     map_location = 'cpu' if self.device == 'cpu' else 'cuda'
@@ -121,7 +122,7 @@ def __init__(self,
             map_location = 'cpu' if self.device == 'cpu' else 'cuda'
             self.ckpt = load_checkpoint(
                 self.model, self.model_path, map_location=map_location)
-            
+
             self.model = reparameterize_models(self.model)
 
             self.model.to(self.device)
@@ -196,10 +197,10 @@ def predict(self, input_data_list, batch_size=-1, to_numpy=True):
 
                 if self.use_trt_efficientnms:
                     tmp_out = self.model(img)
-                    det_out={}
-                    det_out['detection_boxes']=tmp_out[1]
-                    det_out['detection_scores']=tmp_out[2]
-                    det_out['detection_classes']=tmp_out[3]
+                    det_out = {}
+                    det_out['detection_boxes'] = tmp_out[1]
+                    det_out['detection_scores'] = tmp_out[2]
+                    det_out['detection_classes'] = tmp_out[3]
                 else:
                     det_out = self.model(img)
 
@@ -217,21 +218,22 @@ def predict(self, input_data_list, batch_size=-1, to_numpy=True):
                 if to_numpy:
                     detection_scores = detection_scores.cpu().detach().numpy()
                     detection_boxes = detection_boxes.cpu().detach().numpy()
-                    detection_classes = detection_classes.cpu().detach().numpy()
+                    detection_classes = detection_classes.cpu().detach().numpy(
+                    )
             else:
-                # data_dict = {'img': img}
-                # data_dict = self.pipeline(data_dict)
-                # img = data_dict['img']
-                # img = torch.unsqueeze(img._data, 0).to(self.device)
-                # data_dict.pop('img')
+                data_dict = {'img': img}
+                data_dict = self.pipeline(data_dict)
+                img = data_dict['img']
+                img = torch.unsqueeze(img._data, 0).to(self.device)
+                data_dict.pop('img')
                 if self.traceable:
                     if self.use_trt_efficientnms:
                         with torch.no_grad():
                             tmp_out = self.model(img)
-                            det_out={}
-                            det_out['detection_boxes']=tmp_out[1]
-                            det_out['detection_scores']=tmp_out[2]
-                            det_out['detection_classes']=tmp_out[3]
+                            det_out = {}
+                            det_out['detection_boxes'] = tmp_out[1]
+                            det_out['detection_scores'] = tmp_out[2]
+                            det_out['detection_classes'] = tmp_out[3]
                     else:
                         with torch.no_grad():
                             det_out = self.post_assign(
@@ -262,7 +264,7 @@ def predict(self, input_data_list, batch_size=-1, to_numpy=True):
                 else:
                     detection_boxes = None
                     detection_classes = None
-            
+
             num_boxes = detection_classes.shape[
                 0] if detection_classes is not None else 0
 
diff --git a/easycv/toolkit/blade/__init__.py b/easycv/toolkit/blade/__init__.py
index ca94a3b4..48d65fd6 100644
--- a/easycv/toolkit/blade/__init__.py
+++ b/easycv/toolkit/blade/__init__.py
@@ -1,2 +1,2 @@
 from .cv_blade_utils import blade_env_assert, blade_optimize
-from .trt_plugin_utils import create_tensorrt_efficientnms
\ No newline at end of file
+from .trt_plugin_utils import create_tensorrt_efficientnms
diff --git a/easycv/toolkit/blade/cv_blade_utils.py b/easycv/toolkit/blade/cv_blade_utils.py
index c11be26f..e8df13a0 100644
--- a/easycv/toolkit/blade/cv_blade_utils.py
+++ b/easycv/toolkit/blade/cv_blade_utils.py
@@ -270,7 +270,6 @@ def blade_optimize(speed_test_model,
                 model_inputs=tuple(inputs),
             )
 
-
     if compute_cost:
         results = []
         inputs_t = inputs
@@ -296,8 +295,6 @@ def blade_optimize(speed_test_model,
         summary = pd.DataFrame(results)
         logging.warning(summary.to_markdown())
 
-    # print(opt_model.forward.code)
-    # print(opt_model.forward.graph)
     torch.cuda.empty_cache()
     # warm-up
     for k in range(warm_up_time):
diff --git a/easycv/toolkit/blade/trt_plugin_utils.py b/easycv/toolkit/blade/trt_plugin_utils.py
index 70df50e5..5aa996ee 100644
--- a/easycv/toolkit/blade/trt_plugin_utils.py
+++ b/easycv/toolkit/blade/trt_plugin_utils.py
@@ -1,13 +1,14 @@
 # This is a TensorRT Plugin Python Wrapper Link implementation, original plugin documents refers to
 # https://github.com/NVIDIA/TensorRT/tree/main/plugin/
-# We use python wrapper to build ONNX-TRTPlugin Engine and then wrapper as a jit script module, after this, 
-# we could replace some original model's OP with this plugin during Blade Export to speed up those are not 
+# We use python wrapper to build ONNX-TRTPlugin Engine and then wrapper as a jit script module, after this,
+# we could replace some original model's OP with this plugin during Blade Export to speed up those are not
 # well optimized by original Blade
-# Here we provide a TRTPlugin-EfficientNMS implementation 
+# Here we provide a TRTPlugin-EfficientNMS implementation
 
 import torch
 from torch import nn
 
+
 class TRT8_NMS(torch.autograd.Function):
     '''TensorRT NMS operation'''
 
@@ -59,6 +60,7 @@ def symbolic(g,
         nums, boxes, scores, classes = out
         return nums, boxes, scores, classes
 
+
 class ONNX_TRT8(nn.Module):
     '''onnx module with TensorRT NMS operation.'''
 
@@ -90,11 +92,12 @@ def forward(self, x):
             self.score_activation, self.score_threshold)
         return num_det, det_boxes, det_scores, det_classes
 
+
 def create_tensorrt_efficientnms(example_scores,
-                                iou_thres=0.45,
-                                score_thres=0.25):
+                                 iou_thres=0.45,
+                                 score_thres=0.25):
     """
-    
+
     """
     from torch_blade import tensorrt
     import torch_blade._torch_blade._backends as backends
@@ -151,14 +154,3 @@ def forward(self, x):
 
     trt_ext = torch.jit.script(Model(state))
     return trt_ext
-
-
-if __name__ == '__main__':
-    bs = 32
-    num_boxes = 100
-    num_classes = 2
-    example_scores = torch.randn([bs, num_boxes, 4 + 1 + num_classes],
-                                 dtype=torch.float32)
-    trt_ext = create_tensorrt_postprocess(example_scores)
-    out = trt_ext.forward(example_scores)
-    print(out)
diff --git a/easycv/utils/mmlab_utils.py b/easycv/utils/mmlab_utils.py
index db7e94e6..bff3040a 100644
--- a/easycv/utils/mmlab_utils.py
+++ b/easycv/utils/mmlab_utils.py
@@ -14,7 +14,7 @@
 
 try:
     from mmcv.runner.hooks import HOOKS
-    # HOOKS._module_dict.pop('YOLOXLrUpdaterHook', None)
+    HOOKS._module_dict.pop('YOLOXLrUpdaterHook', None)
     from mmdet.models.builder import MODELS as MMMODELS
     from mmdet.models.builder import BACKBONES as MMBACKBONES
     from mmdet.models.builder import NECKS as MMNECKS
diff --git a/numeric_test.py b/numeric_test.py
deleted file mode 100644
index 5c136a23..00000000
--- a/numeric_test.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from easycv.models.detection.detectors.yolox.postprocess import create_tensorrt_postprocess
-import torch
-from torchvision.transforms import Compose
-
-from easycv.models import build_model
-from easycv.utils.checkpoint import load_checkpoint
-from easycv.utils.config_tools import mmcv_config_fromfile
-from easycv.utils.registry import build_from_cfg
-from easycv.datasets.registry import PIPELINES
-from easycv.models.detection.utils import postprocess
-
-import sys
-import numpy as np
-from PIL import Image
-import time
-
-from contextlib import contextmanager
-@contextmanager
-def timeit_context(name):
-    startTime = time.time()
-    yield
-    elapsedTime = time.time() - startTime
-    print('[{}] finished in {} ms'.format(name, int(elapsedTime * 1000)))
-
-
-def model_speed_test(name, img, use_trt_efficientnms=False):
-    pred = TorchYoloXPredictor(name, use_trt_efficientnms=use_trt_efficientnms)
-    for i in range(10):
-        m0 = pred.predict([img])
-    with timeit_context('{} speed test'.format(name)):
-        for i in range(100):
-            m0 = pred.predict([img]) 
-    print(m0[0]['detection_classes'])
-    print(m0[0]['detection_scores'])
-
-
-if __name__=='__main__':
-    if 1:
-        img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000254016.jpg'
-        from easycv.predictors import TorchYoloXPredictor
-        img = Image.open(img_path)
-
-        # model_speed_test('models/output_bs1_e2e_f005.blade.jit', img)
-        model_speed_test('models/output_bs1_e2e_f005_trtnms.blade.blade', img, True)
-        # model_speed_test('models/output_bs1_e2e_noblade.pt', img)
-        # model_speed_test('models/output_bs1_e2e_noblade_trtnms.pt', img)
-        # model_speed_test('models/output_bs1_noe2e_noblade.pt', img)
-        # model_speed_test('models/output_bs1_noe2e_noblade_trtnms.pt', img)
-        
-        # model_speed_test('models/output_bs1_e2e_f005_trtnms.blade.jit', img, True)
-        # model_speed_test('models/output_bs1_noe2e_f030.blade.jit', img, False)
-        # model_speed_test('models/output_bs1_noe2e_f030.blade.jit', img, False)
-
-        # model_speed_test('models/output_bs1_e2e_f005_trtnms.blade.jit', img, False)
-        # model_speed_test('models/output_bs1_e2e_f005.blade.jit', img, False)
diff --git a/tests/apis/test_export_blade.py b/tests/apis/test_export_blade.py
index 09ed0e45..7346c2e4 100644
--- a/tests/apis/test_export_blade.py
+++ b/tests/apis/test_export_blade.py
@@ -55,19 +55,20 @@ def test_export_yolox_blade_nojit(self):
         self.assertTrue(os.path.exists(target_path + '.blade'))
         self.assertTrue(os.path.exists(target_path + '.blade.config.json'))
 
-    def test_export_yolox_blade_end2end(self):
-        config_file = 'configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
-        cfg = mmcv_config_fromfile(config_file)
-        cfg.export = dict(use_jit=True, export_blade=True, end2end=True)
-        ori_ckpt = PRETRAINED_MODEL_YOLOXS_EXPORT
-
-        target_path = f'{self.tmp_dir}/export_yolox_s_epoch300_end2end'
-
-        export(cfg, ori_ckpt, target_path)
-        self.assertTrue(os.path.exists(target_path + '.jit'))
-        self.assertTrue(os.path.exists(target_path + '.jit.config.json'))
-        self.assertTrue(os.path.exists(target_path + '.blade'))
-        self.assertTrue(os.path.exists(target_path + '.blade.config.json'))
+    # need a trt env
+    # def test_export_yolox_blade_end2end(self):
+    #     config_file = 'configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
+    #     cfg = mmcv_config_fromfile(config_file)
+    #     cfg.export = dict(use_jit=True, export_blade=True, end2end=True)
+    #     ori_ckpt = PRETRAINED_MODEL_YOLOXS_EXPORT
+    #
+    #     target_path = f'{self.tmp_dir}/export_yolox_s_epoch300_end2end'
+    #
+    #     export(cfg, ori_ckpt, target_path)
+    #     self.assertTrue(os.path.exists(target_path + '.jit'))
+    #     self.assertTrue(os.path.exists(target_path + '.jit.config.json'))
+    #     self.assertTrue(os.path.exists(target_path + '.blade'))
+    #     self.assertTrue(os.path.exists(target_path + '.blade.config.json'))
 
 
 if __name__ == '__main__':

From 68454fd5b1a5f765830bdc2044062650acceb13d Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Wed, 17 Aug 2022 17:31:12 +0800
Subject: [PATCH 48/69] fix config bug

---
 .../detection/yolox/yolox_l_8xb8_300e_coco.py |  6 +--
 .../yolox/yolox_m_8xb16_300e_coco.py          |  6 +--
 .../yolox/yolox_nano_8xb16_300e_coco.py       |  6 +--
 .../yolox/yolox_s_8xb16_300e_coco.py          | 11 ++---
 .../yolox_s_8xb16_300e_coco_asff_reptood2.py  | 12 ++---
 .../yolox_s_8xb16_300e_coco_asff_tood3.py     | 10 ++--
 .../yolox_s_8xb16_300e_coco_asff_tood6.py     |  9 ++--
 .../yolox/yolox_s_8xb16_300e_coco_rep.py      |  8 +---
 .../yolox/yolox_s_8xb16_300e_coco_reptood3.py |  9 ++--
 .../yolox/yolox_s_8xb16_300e_coco_tood3.py    |  9 ++--
 .../yolox_s_8xb16_300e_coco_tood3_rep.py      |  9 ++--
 .../yolox/yolox_tiny_8xb16_300e_coco.py       |  8 ++--
 .../detection/detectors/yolox/yolo_pafpn.py   | 32 ++++++-------
 .../models/detection/detectors/yolox/yolox.py | 46 +++++++++++++++----
 14 files changed, 85 insertions(+), 96 deletions(-)

diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
index 0339f7ad..459a20ad 100644
--- a/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
+++ b/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
@@ -1,11 +1,7 @@
 _base_ = './yolox_s_8xb16_300e_coco.py'
 
 # model settings
-model = dict(
-    backbone=dict(
-        model_type='l',  # s m l x tiny nano
-    ),
-    head=dict(model_type='l', ))
+model = dict(model_type='l')
 
 data = dict(imgs_per_gpu=8, workers_per_gpu=4)
 
diff --git a/configs/detection/yolox/yolox_m_8xb16_300e_coco.py b/configs/detection/yolox/yolox_m_8xb16_300e_coco.py
index 1f0d2d90..3386d4d4 100644
--- a/configs/detection/yolox/yolox_m_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_m_8xb16_300e_coco.py
@@ -1,8 +1,4 @@
 _base_ = './yolox_s_8xb16_300e_coco.py'
 
 # model settings
-model = dict(
-    backbone=dict(
-        model_type='m',  # s m l x tiny nano
-    ),
-    head=dict(model_type='m', ))
+model = dict(model_type='m')
diff --git a/configs/detection/yolox/yolox_nano_8xb16_300e_coco.py b/configs/detection/yolox/yolox_nano_8xb16_300e_coco.py
index 92dc9d65..f942ec47 100644
--- a/configs/detection/yolox/yolox_nano_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_nano_8xb16_300e_coco.py
@@ -1,8 +1,4 @@
 _base_ = './yolox_tiny_8xb16_300e_coco.py'
 
 # model settings
-model = dict(
-    backbone=dict(
-        model_type='nano',  # s m l x tiny nano
-    ),
-    head=dict(model_type='nano', ))
+model = dict(model_type='nano')
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index 9a307571..4aa52b81 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -5,18 +5,15 @@
     type='YOLOX',
     test_conf=0.01,
     nms_thre=0.65,
-    backbone=dict(
-        type='YOLOPAFPN',
-        backbone='CSPDarknet',
-        model_type='s',  # s m l x tiny nano
-        use_att=None,
-        neck='yolo'),
+    backbone='CSPDarknet',
+    model_type='s',  # s m l x tiny nano
     head=dict(
         type='YOLOXHead',
         model_type='s',
         obj_loss_type='BCE',
         reg_loss_type='giou',
-        num_classes=80))
+        num_classes=80)
+)
 
 # s m l x
 img_scale = (640, 640)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py
index 8b4eefba..8d947f10 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py
@@ -5,13 +5,11 @@
     type='YOLOX',
     test_conf=0.01,
     nms_thre=0.65,
-    backbone=dict(
-        type='YOLOPAFPN',
-        backbone='RepVGGYOLOX',
-        model_type='s',  # s m l x tiny nano
-        use_att='ASFF',
-        asff_channel=16,
-        neck='yolo'),
+    backbone='RepVGGYOLOX',
+    model_type='s',  # s m l x tiny nano
+    use_att='ASFF',
+    asff_channel=16,
+
     head=dict(
         type='TOODHead',
         model_type='s',
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
index e49f3afc..4d23a519 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
@@ -5,12 +5,10 @@
     type='YOLOX',
     test_conf=0.01,
     nms_thre=0.65,
-    backbone=dict(
-        type='YOLOPAFPN',
-        backbone='RepVGGYOLOX',
-        model_type='s',  # s m l x tiny nano
-        use_att='ASFF',
-        neck='yolo'),
+    backbone='RepVGGYOLOX',
+    model_type='s',  # s m l x tiny nano
+    use_att='ASFF',
+
     head=dict(
         type='TOODHead',
         model_type='s',
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood6.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood6.py
index c59f811c..a0283bff 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood6.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood6.py
@@ -5,12 +5,9 @@
     type='YOLOX',
     test_conf=0.01,
     nms_thre=0.65,
-    backbone=dict(
-        type='YOLOPAFPN',
-        backbone='RepVGGYOLOX',
-        model_type='s',  # s m l x tiny nano
-        use_att='ASFF',
-        neck='yolo'),
+    backbone='RepVGGYOLOX',
+    model_type='s',  # s m l x tiny nano
+    use_att='ASFF',
     head=dict(
         type='TOODHead',
         model_type='s',
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
index ca26086e..032581a0 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
@@ -5,12 +5,8 @@
     type='YOLOX',
     test_conf=0.01,
     nms_thre=0.65,
-    backbone=dict(
-        type='YOLOPAFPN',
-        backbone='RepVGGYOLOX',
-        model_type='s',  # s m l x tiny nano
-        use_att=None,
-        neck='yolo'),
+    backbone='RepVGGYOLOX',
+    model_type='s',  # s m l x tiny nano
     head=dict(
         type='YOLOXHead',
         model_type='s',
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_reptood3.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_reptood3.py
index eea00cdf..054dbada 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_reptood3.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_reptood3.py
@@ -5,12 +5,9 @@
     type='YOLOX',
     test_conf=0.01,
     nms_thre=0.65,
-    backbone=dict(
-        type='YOLOPAFPN',
-        backbone='RepVGGYOLOX',
-        model_type='s',  # s m l x tiny nano
-        # use_att='ASFF',
-        neck='yolo'),
+    backbone='RepVGGYOLOX',
+    model_type='s',  # s m l x tiny nano
+
     head=dict(
         type='TOODHead',
         model_type='s',
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py
index e534092f..0c28aaa9 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py
@@ -5,12 +5,9 @@
     type='YOLOX',
     test_conf=0.01,
     nms_thre=0.65,
-    backbone=dict(
-        type='YOLOPAFPN',
-        backbone='RepVGGYOLOX',
-        model_type='s',  # s m l x tiny nano
-        use_att=None,
-        neck='yolo'),
+    backbone='RepVGGYOLOX',
+    model_type='s',  # s m l x tiny nano
+
     head=dict(
         type='TOODHead',
         model_type='s',
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3_rep.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3_rep.py
index 8e14d8d4..eedccb30 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3_rep.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3_rep.py
@@ -5,12 +5,9 @@
     type='YOLOX',
     test_conf=0.01,
     nms_thre=0.65,
-    backbone=dict(
-        type='YOLOPAFPN',
-        backbone='RepVGGYOLOX',
-        model_type='s',  # s m l x tiny nano
-        use_att=None,
-        neck='yolo'),
+    backbone='RepVGGYOLOX',
+    model_type='s',  # s m l x tiny nano
+
     head=dict(
         type='TOODHead',
         model_type='s',
diff --git a/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py b/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
index 76ab0b45..f348ce8d 100644
--- a/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
@@ -1,11 +1,9 @@
 _base_ = './yolox_s_8xb16_300e_coco.py'
 
 # model settings
-model = dict(
-    backbone=dict(
-        model_type='tiny',  # s m l x tiny nano
-    ),
-    head=dict(model_type='tiny', ))
+# model settings
+model = dict(model_type='tiny')
+
 
 CLASSES = [
     'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
diff --git a/easycv/models/detection/detectors/yolox/yolo_pafpn.py b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
index 25a34181..f41c2749 100644
--- a/easycv/models/detection/detectors/yolox/yolo_pafpn.py
+++ b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
@@ -12,8 +12,6 @@
 from easycv.models.registry import BACKBONES
 from .asff import ASFF
 
-
-@BACKBONES.register_module
 class YOLOPAFPN(nn.Module):
     """
     YOLOv3 model. Darknet 53 is the default backbone of this model.
@@ -27,23 +25,23 @@ class YOLOPAFPN(nn.Module):
         'x': [1.33, 1.25]
     }
 
-    def __init__(self,
-                 model_type='s',
-                 in_features=('dark3', 'dark4', 'dark5'),
-                 in_channels=[256, 512, 1024],
-                 depthwise=False,
-                 act='silu',
-                 asff_channel=16,
-                 use_att=None,
-                 expand_kernel=3,
-                 backbone='CSPDarknet',
-                 neck='yolo',
-                 neck_mode='all'):
+    def __init__(
+            self,
+            depth=1.0,
+            width=1.0,
+            backbone='CSPDarknet',
+            neck = 'yolo',
+            neck_mode = 'all',
+            in_features=('dark3', 'dark4', 'dark5'),
+            in_channels=[256, 512, 1024],
+            depthwise=False,
+            act='silu',
+            use_att=None,
+            asff_channel=2,
+            expand_kernel=3
+    ):
         super().__init__()
 
-        depth = self.param_map[model_type][0]
-        width = self.param_map[model_type][1]
-
         # build backbone
         if backbone == 'CSPDarknet':
             self.backbone = CSPDarknet(
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index 20ca60d8..a89db7bd 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -8,10 +8,11 @@
 from torch import Tensor
 
 from easycv.models.base import BaseModel
-from easycv.models.builder import (MODELS, build_backbone, build_head,
-                                   build_neck)
+from easycv.models.builder import (MODELS, build_head)
 from easycv.models.detection.utils import postprocess
 
+from .yolo_pafpn import YOLOPAFPN
+
 
 def init_yolo(M):
     for m in M.modules():
@@ -27,20 +28,47 @@ class YOLOX(BaseModel):
     The network returns loss values from three YOLO layers during training
     and detection results during test.
     """
+    param_map = {
+        'nano': [0.33, 0.25],
+        'tiny': [0.33, 0.375],
+        's': [0.33, 0.5],
+        'm': [0.67, 0.75],
+        'l': [1.0, 1.0],
+        'x': [1.33, 1.25]
+    }
 
     def __init__(self,
-                 backbone,
-                 test_conf,
-                 nms_thre,
+                 model_type = 's',
+                 test_conf=0.01,
+                 nms_thre=0.65,
+                 backbone = 'CSPDarknet',
+                 use_att=None,
+                 asff_channel=2,
+                 neck='yolo',
+                 neck_mode='all',
                  head=None,
-                 neck=None,
                  pretrained=True):
         super(YOLOX, self).__init__()
+        print('in')
+        assert model_type in self.param_map, f'invalid model_type for yolox {model_type}, valid ones are {list(self.param_map.keys())}'
 
         self.pretrained = pretrained
-        self.backbone = build_backbone(backbone)
-        if neck is not None:
-            self.neck = build_neck(neck)
+
+        in_channels = [256, 512, 1024]
+        depth = self.param_map[model_type][0]
+        width = self.param_map[model_type][1]
+
+        self.backbone = YOLOPAFPN(
+            depth,
+            width,
+            backbone=backbone,
+            neck=neck,
+            neck_mode=neck_mode,
+            in_channels=in_channels,
+            asff_channel=asff_channel,
+            use_att=use_att
+        )
+
         self.head = build_head(head)
 
         self.apply(init_yolo)  # init_yolo(self)

From a6236d3860ad4dbfa6ee15ac4811259666832059 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Thu, 18 Aug 2022 18:40:42 +0800
Subject: [PATCH 49/69] refractor export and restore yolox_edge

---
 .../detection/yolox/yolox_l_8xb8_300e_coco.py |   2 +-
 .../yolox/yolox_m_8xb16_300e_coco.py          |   2 +-
 .../yolox/yolox_nano_8xb16_300e_coco.py       |   2 +-
 ...olox_pai_8xb16_300e_coco_asff_reptood3.py} |  13 +-
 .../yolox/yolox_s_8xb16_300e_coco.py          |  18 +-
 .../yolox_s_8xb16_300e_coco_asff_tood3.py     | 195 -----------
 .../yolox_s_8xb16_300e_coco_asff_tood6.py     | 195 -----------
 .../yolox/yolox_s_8xb16_300e_coco_rep.py      | 196 -----------
 .../yolox/yolox_s_8xb16_300e_coco_reptood3.py | 195 -----------
 .../yolox/yolox_s_8xb16_300e_coco_tood3.py    | 192 -----------
 .../yolox_s_8xb16_300e_coco_tood3_rep.py      | 193 -----------
 .../yolox/yolox_tiny_8xb16_300e_coco.py       |   4 +-
 .../detection/yolox/yolox_x_8xb8_300e_coco.py |   2 +-
 convert_new.py                                |  78 -----
 easycv/apis/export.py                         | 321 +++++++++---------
 easycv/datasets/detection/raw.py              |   3 -
 .../detection/detectors/yolox/yolo_pafpn.py   |  39 ++-
 .../models/detection/detectors/yolox/yolox.py |  16 +-
 .../detectors/yolox_edge/yolox_edge.py        |  54 ++-
 easycv/models/detection/utils/tensorrt_nms.py |   0
 easycv/predictors/detector.py                 |  20 +-
 numeric_test.py                               |  55 +++
 22 files changed, 323 insertions(+), 1472 deletions(-)
 rename configs/detection/yolox/{yolox_s_8xb16_300e_coco_asff_reptood2.py => yolox_pai_8xb16_300e_coco_asff_reptood3.py} (95%)
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood6.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_reptood3.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py
 delete mode 100644 configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3_rep.py
 delete mode 100644 convert_new.py
 delete mode 100644 easycv/models/detection/utils/tensorrt_nms.py
 create mode 100644 numeric_test.py

diff --git a/configs/detection/yolox/yolox_l_8xb8_300e_coco.py b/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
index 459a20ad..d83816d4 100644
--- a/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
+++ b/configs/detection/yolox/yolox_l_8xb8_300e_coco.py
@@ -1,7 +1,7 @@
 _base_ = './yolox_s_8xb16_300e_coco.py'
 
 # model settings
-model = dict(model_type='l')
+model = dict(model_type='l', head=dict(model_type='l', ))
 
 data = dict(imgs_per_gpu=8, workers_per_gpu=4)
 
diff --git a/configs/detection/yolox/yolox_m_8xb16_300e_coco.py b/configs/detection/yolox/yolox_m_8xb16_300e_coco.py
index 3386d4d4..b90faf5a 100644
--- a/configs/detection/yolox/yolox_m_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_m_8xb16_300e_coco.py
@@ -1,4 +1,4 @@
 _base_ = './yolox_s_8xb16_300e_coco.py'
 
 # model settings
-model = dict(model_type='m')
+model = dict(model_type='m', head=dict(model_type='m', ))
diff --git a/configs/detection/yolox/yolox_nano_8xb16_300e_coco.py b/configs/detection/yolox/yolox_nano_8xb16_300e_coco.py
index f942ec47..4e730744 100644
--- a/configs/detection/yolox/yolox_nano_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_nano_8xb16_300e_coco.py
@@ -1,4 +1,4 @@
 _base_ = './yolox_tiny_8xb16_300e_coco.py'
 
 # model settings
-model = dict(model_type='nano')
+model = dict(model_type='nano', head=dict(model_type='nano', ))
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py b/configs/detection/yolox/yolox_pai_8xb16_300e_coco_asff_reptood3.py
similarity index 95%
rename from configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py
rename to configs/detection/yolox/yolox_pai_8xb16_300e_coco_asff_reptood3.py
index 8d947f10..e10862c0 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py
+++ b/configs/detection/yolox/yolox_pai_8xb16_300e_coco_asff_reptood3.py
@@ -9,7 +9,6 @@
     model_type='s',  # s m l x tiny nano
     use_att='ASFF',
     asff_channel=16,
-
     head=dict(
         type='TOODHead',
         model_type='s',
@@ -18,10 +17,11 @@
         num_classes=80,
         conv_type='repconv',
         la_down_rate=8,
-        stacked_convs=2))
+        decode_in_inference=True  # set to False when speed test
+    ))
 
 # s m l x
-img_scale = (672, 672)
+img_scale = (640, 640)
 random_size = (14, 26)
 scale_ratio = (0.1, 2)
 
@@ -47,10 +47,7 @@
 ]
 
 # dataset settings
-# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/root/workspace/data/coco/'
-data_root = '/apsara/xinyi.zxy/data/coco/'
+data_root = 'data/coco/'
 
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
@@ -196,4 +193,4 @@
         # dict(type='WandbLoggerHookV2'),
     ])
 
-export = dict(use_jit=True, export_blade=True, end2end=False, batch_size=32)
+export = dict(use_jit=True, export_blade=True, end2end=False, batch_size=32, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index 4aa52b81..8439c353 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -12,8 +12,8 @@
         model_type='s',
         obj_loss_type='BCE',
         reg_loss_type='giou',
-        num_classes=80)
-)
+        num_classes=80,
+        decode_in_inference=True))
 
 # s m l x
 img_scale = (640, 640)
@@ -42,8 +42,7 @@
 ]
 
 # dataset settings
-# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
-data_root = '/apsara/xinyi.zxy/data/coco/'
+data_root = 'data/coco/'
 
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
@@ -189,4 +188,13 @@
         # dict(type='WandbLoggerHookV2'),
     ])
 
-export = dict(use_jit=True, export_blade=False, end2end=True, batch_size=1)
+
+# export = dict(export_type = 'ori', end2end = False,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=False)
+# export = dict(export_type = 'jit', end2end = False,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=False)
+export = dict(export_type = 'blade', end2end = False,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=False)
+# export = dict(export_type = 'jit', end2end = True,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=False)
+# export = dict(export_type = 'jit', end2end = True,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=True)
+# export = dict(export_type = 'blade', end2end = True,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=True)
+# export = dict(export_type = 'blade', end2end = True,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=False) # assert error
+# export = dict(export_type = 'jit', end2end = False,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=True)
+# export = dict(export_type = 'blade', end2end = False,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=True)
\ No newline at end of file
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
deleted file mode 100644
index 4d23a519..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py
+++ /dev/null
@@ -1,195 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    test_conf=0.01,
-    nms_thre=0.65,
-    backbone='RepVGGYOLOX',
-    model_type='s',  # s m l x tiny nano
-    use_att='ASFF',
-
-    head=dict(
-        type='TOODHead',
-        model_type='s',
-        obj_loss_type='BCE',
-        reg_loss_type='giou',
-        num_classes=80))
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/root/workspace/data/coco/'
-data_root = '/apsara/xinyi.zxy/data/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True, export_blade=True, end2end=False, batch_size=32)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood6.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood6.py
deleted file mode 100644
index a0283bff..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood6.py
+++ /dev/null
@@ -1,195 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    test_conf=0.01,
-    nms_thre=0.65,
-    backbone='RepVGGYOLOX',
-    model_type='s',  # s m l x tiny nano
-    use_att='ASFF',
-    head=dict(
-        type='TOODHead',
-        model_type='s',
-        obj_loss_type='BCE',
-        reg_loss_type='giou',
-        stacked_convs=6,
-        num_classes=80))
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/root/workspace/data/coco/'
-data_root = '/apsara/xinyi.zxy/data/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True, export_blade=True, end2end=False, batch_size=32)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
deleted file mode 100644
index 032581a0..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_rep.py
+++ /dev/null
@@ -1,196 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    test_conf=0.01,
-    nms_thre=0.65,
-    backbone='RepVGGYOLOX',
-    model_type='s',  # s m l x tiny nano
-    head=dict(
-        type='YOLOXHead',
-        model_type='s',
-        obj_loss_type='BCE',
-        reg_loss_type='giou',
-        num_classes=80))
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
-data_root = '/apsara/xinyi.zxy/data/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True,
-              export_blade=True,  # ????blade
-              end2end=False,      # ??????????nms???jit + blade
-              batch_size=1,       # static_opt=True???????batch_size
-              fp16_failback_ratio=0.05,   # fp16 fallback?fp32 ?layer ??
-              static_opt=True)    # ????static shape ?????True
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_reptood3.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_reptood3.py
deleted file mode 100644
index 054dbada..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_reptood3.py
+++ /dev/null
@@ -1,195 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    test_conf=0.01,
-    nms_thre=0.65,
-    backbone='RepVGGYOLOX',
-    model_type='s',  # s m l x tiny nano
-
-    head=dict(
-        type='TOODHead',
-        model_type='s',
-        obj_loss_type='BCE',
-        reg_loss_type='giou',
-        num_classes=80,
-        conv_type='repconv'))
-
-# s m l x
-img_scale = (672, 672)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
-# data_root = '/mnt/data/nas/data/detection/coco/'
-# data_root = '/root/workspace/data/coco/'
-data_root = '/apsara/xinyi.zxy/data/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True, export_blade=True, end2end=False, batch_size=32)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py
deleted file mode 100644
index 0c28aaa9..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3.py
+++ /dev/null
@@ -1,192 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    test_conf=0.01,
-    nms_thre=0.65,
-    backbone='RepVGGYOLOX',
-    model_type='s',  # s m l x tiny nano
-
-    head=dict(
-        type='TOODHead',
-        model_type='s',
-        obj_loss_type='BCE',
-        reg_loss_type='giou',
-        num_classes=80))
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
-data_root = '/mnt/data/nas/data/detection/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3_rep.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3_rep.py
deleted file mode 100644
index eedccb30..00000000
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco_tood3_rep.py
+++ /dev/null
@@ -1,193 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    test_conf=0.01,
-    nms_thre=0.65,
-    backbone='RepVGGYOLOX',
-    model_type='s',  # s m l x tiny nano
-
-    head=dict(
-        type='TOODHead',
-        model_type='s',
-        obj_loss_type='BCE',
-        reg_loss_type='giou',
-        conv_type='repconv',
-        num_classes=80))
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-# data_root = '/apsarapangu/disk2/xinyi.zxy/data/coco/'
-data_root = '/apsarapangu/disk6/xinyi.zxy/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=False, export_blade=False, end2end=False)
diff --git a/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py b/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
index f348ce8d..b44a2933 100644
--- a/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py
@@ -1,9 +1,7 @@
 _base_ = './yolox_s_8xb16_300e_coco.py'
 
 # model settings
-# model settings
-model = dict(model_type='tiny')
-
+model = dict(model_type='tiny', head=dict(model_type='tiny', ))
 
 CLASSES = [
     'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
diff --git a/configs/detection/yolox/yolox_x_8xb8_300e_coco.py b/configs/detection/yolox/yolox_x_8xb8_300e_coco.py
index 9fe88752..98ab31f4 100644
--- a/configs/detection/yolox/yolox_x_8xb8_300e_coco.py
+++ b/configs/detection/yolox/yolox_x_8xb8_300e_coco.py
@@ -1,7 +1,7 @@
 _base_ = './yolox_s_8xb16_300e_coco.py'
 
 # model settings
-model = dict(model_type='x')
+model = dict(model_type='x', head=dict(model_type='x', ))
 
 data = dict(imgs_per_gpu=8, workers_per_gpu=4)
 
diff --git a/convert_new.py b/convert_new.py
deleted file mode 100644
index b3b0ced1..00000000
--- a/convert_new.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# conver to new
-import torch
-
-from easycv.models import build_model
-from easycv.utils.checkpoint import load_checkpoint
-from easycv.utils.config_tools import (CONFIG_TEMPLATE_ZOO,
-                                       mmcv_config_fromfile, rebuild_config)
-
-if __name__ == '__main__':
-    # cfg_path = '/apsara/xinyi.zxy/code/pr154/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_tood3.py'
-    cfg_path = '/apsara/xinyi.zxy/code/pr154/configs/detection/yolox/yolox_s_8xb16_300e_coco_asff_reptood2.py'
-    cfg = mmcv_config_fromfile(cfg_path)
-    model = build_model(cfg.model)
-    print(model)
-
-    # ckpt_path = '/apsara/xinyi.zxy/pretrain/asff_tood3/epoch_300_new.pth'
-    ckpt_path = '/apsara/xinyi.zxy/pretrain/ab_study/yolox_asff_reptood2.pth'
-    model_ckpt = torch.load(ckpt_path)
-    pretrain_model_state = model_ckpt['state_dict']
-
-    # model.load_state_dict(pretrain_model_state)
-    #
-    # exit()
-
-    model_state_dict = model.state_dict()  # ??model?key
-
-    # of1 = open('new.txt','w')
-    # for key in model_state_dict.keys():
-    #     of1.writelines(key+'\n')
-    #
-    # of2 = open('pre.txt', 'w')
-    # for key in pretrain_model_state.keys():
-    #     of2.writelines(key + '\n')
-
-    key_ori = [
-        'backbone.stem', 'ERBlock_2.0', 'ERBlock_2.1.conv1',
-        'ERBlock_2.1.block.0', 'ERBlock_3.0', 'ERBlock_3.1.conv1',
-        'ERBlock_3.1.block.0', 'ERBlock_3.1.block.1', 'ERBlock_3.1.block.2',
-        'ERBlock_4.0', 'ERBlock_4.1.conv1', 'ERBlock_4.1.block.0',
-        'ERBlock_4.1.block.1', 'ERBlock_4.1.block.2', 'ERBlock_4.1.block.3',
-        'ERBlock_4.1.block.4', 'ERBlock_5.0', 'ERBlock_5.1.conv1',
-        'ERBlock_5.1.block.0', 'ERBlock_5.2'
-    ]
-
-    key_new = [
-        'backbone.stage0', 'stage1.0', 'stage1.1', 'stage1.2', 'stage2.0',
-        'stage2.1', 'stage2.2', 'stage2.3', 'stage2.4', 'stage3.0', 'stage3.1',
-        'stage3.2', 'stage3.3', 'stage3.4', 'stage3.5', 'stage3.6', 'stage4.0',
-        'stage4.1', 'stage4.2', 'stage4.3'
-    ]
-
-    print(len(key_ori) == len(key_new))
-
-    for i, key in enumerate(pretrain_model_state):
-        find = False
-        for t_i, t_k in enumerate(key_ori):
-            if t_k in key:
-                find = True
-                break
-        if find:
-            model_state_dict[key.replace(
-                t_k, key_new[t_i])] = pretrain_model_state[key]
-        else:
-            model_state_dict[key] = pretrain_model_state[key]
-
-    model.load_state_dict(model_state_dict)
-
-    model_ckpt['state_dict'] = model_state_dict
-    ckpt_path_new = '/apsara/xinyi.zxy/pretrain/ab_study/yolox_asff_reptood2_new.pth'
-    torch.save(model_ckpt, ckpt_path_new)
-
-    # load
-    model_ckpt_new = torch.load(ckpt_path_new)
-    pretrain_model_state_new = model_ckpt_new['state_dict']
-
-    model.load_state_dict(pretrain_model_state_new)
-    #
-    # exit()
diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index f462c08e..299eff60 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -179,170 +179,184 @@ def _export_yolox(model, cfg, filename):
         filename (str): filename to save exported models
     """
 
-    if hasattr(cfg, 'export') and (getattr(cfg.export, 'use_jit', False) or
-                                   getattr(cfg.export, 'export_blade', False)):
-
-        # only when we use jit or blade, we need to reparameterize_models before export
-        model = reparameterize_models(model)
-        device = 'cuda' if torch.cuda.is_available() else 'cpu'
-        model = copy.deepcopy(model)
-
-        end2end = cfg.export.get('end2end', False)
-        if LooseVersion(torch.__version__) < LooseVersion('1.7.0') and end2end:
-            raise ValueError('`end2end` only support torch1.7.0 and later!')
-
-        batch_size = cfg.export.get('batch_size', 1)
-        static_opt = cfg.export.get('static_opt', True)
-        use_blade = getattr(cfg.export, 'export_blade', False)
-        use_trt_efficientnms = cfg.export.get('use_trt_efficientnms', False)
-
-        # assert image scale and assgin input
-        img_scale = cfg.get('img_scale', (640, 640))
-
-        assert (
-            len(img_scale) == 2
-        ), 'Export YoloX predictor config contains img_scale must be (int, int) tuple!'
-
-        input = 255 * torch.rand((batch_size, 3) + img_scale)
-        print(input.dtype)
+    if hasattr(cfg, 'export'):
+        export_type = getattr(cfg.export, 'export_type', 'ori')
+        default_export_type_list = ['ori', 'jit', 'blade']
+        if export_type not in default_export_type_list:
+            logging.warning(
+                'YOLOX-PAI only supports the export type as  [ori,jit,blade], otherwise we use ori as default'
+            )
+            export_type = 'ori'
+
+        if export_type!='ori':
+            # only when we use jit or blade, we need to reparameterize_models before export
+            model = reparameterize_models(model)
+            device = 'cuda' if torch.cuda.is_available() else 'cpu'
+            model = copy.deepcopy(model)
+
+            end2end = cfg.export.get('end2end', False)
+            if LooseVersion(torch.__version__) < LooseVersion('1.7.0') and end2end:
+                raise ValueError('`end2end` only support torch1.7.0 and later!')
+
+            batch_size = cfg.export.get('batch_size', 1)
+            static_opt = cfg.export.get('static_opt', True)
+            use_trt_efficientnms = cfg.export.get('use_trt_efficientnms', False)
+            # assert image scale and assgin input
+            img_scale = cfg.get('img_scale', (640, 640))
 
-        # assert use_trt_efficientnms only happens when static_opt=True
-        if static_opt is not True:
             assert (
-                use_trt_efficientnms == False
-            ), 'Export YoloX predictor use_trt_efficientnms=True only when use static_opt=True!'
+                len(img_scale) == 2
+            ), 'Export YoloX predictor config contains img_scale must be (int, int) tuple!'
+
+            input = 255 * torch.rand((batch_size, 3) + img_scale)
+
+            # assert use_trt_efficientnms only happens when static_opt=True
+            if static_opt is not True:
+                assert (
+                    use_trt_efficientnms == False
+                ), 'Export YoloX predictor use_trt_efficientnms=True only when use static_opt=True!'
+
+            # set preprocess_fn, postprocess_fn by config
+            preprocess_fn = None
+            postprocess_fn = None
+
+            # preprocess can not be optimized blade, to accelerate the inference, a preprocess jit model should be saved!
+            save_preprocess_jit = False
+            print('end2end', end2end)
+            print('usetrt', use_trt_efficientnms)
+            if end2end:
+                preprocess_fn = PreProcess(target_size=img_scale, keep_ratio=True)
+                postprocess_fn = DetPostProcess(max_det=100, score_thresh=0.5)
+
+                if use_trt_efficientnms:
+                    logging.warning(
+                        'PAI-YOLOX: use_trt_efficientnms=True during export, we drop DetPostProcess, because trt_efficientnms = detection.boxes.postprocess + DetPostProcess!'
+                    )
+                    postprocess_fn = None
+                else:
+                    assert (export_type != 'blade'
+                            ), 'Export End2end YOLOX Blade model must use_trt_efficientnms'
 
-        # ignore DetPostProcess when use_trt_efficientnms
-        preprocess_fn = None
-        postprocess_fn = None
-        if end2end:
-            preprocess_fn = PreProcess(target_size=img_scale, keep_ratio=True)
-            postprocess_fn = DetPostProcess(max_det=100, score_thresh=0.5)
+                if export_type == 'blade':
+                    logging.warning(
+                        'PAI-YOLOX: End2endModelExportWrapper with preprocess_fn can\'t optimize by blade !'
+                    )
+                    preprocess_fn = None
+                    save_preprocess_jit = True
 
+            # set model use_trt_efficientnms
             if use_trt_efficientnms:
+                from easycv.toolkit.blade import create_tensorrt_efficientnms
+                if hasattr(model, 'get_nmsboxes_num'):
+                    nmsbox_num = int(model.get_nmsboxes_num(img_scale))
+                else:
+                    logging.warning(
+                        'PAI-YOLOX: use_trt_efficientnms encounter model has no attr named get_nmsboxes_num, use 8400 as default!'
+                    )
+                    nmsbox_num = 8400
+
+                tmp_example_scores = torch.randn(
+                    [batch_size, nmsbox_num, 4 + 1 + len(cfg.CLASSES)],
+                    dtype=torch.float32)
                 logging.warning(
-                    'PAI-YOLOX: use_trt_efficientnms=True during export, we drop DetPostProcess, because trt_efficientnms = detection.boxes.postprocess + DetPostProcess!'
-                )
-                postprocess_fn = None
-
-            if use_blade:
-                logging.warning(
-                    'PAI-YOLOX: End2endModelExportWrapper with preprocess_fn can\'t optimize by blade !'
-                )
-                preprocess_fn = None
-
-        # set model use_trt_efficientnms
-        if use_trt_efficientnms:
-            from easycv.toolkit.blade import create_tensorrt_efficientnms
-            if hasattr(model, 'get_nmsboxes_num'):
-                nmsbox_num = int(model.get_nmsboxes_num(img_scale))
+                    'PAI-YOLOX: use_trt_efficientnms with staic shape [{}, {}, {}]'
+                    .format(batch_size, nmsbox_num, 4 + 1 + len(cfg.CLASSES)))
+                model.trt_efficientnms = create_tensorrt_efficientnms(
+                    tmp_example_scores,
+                    iou_thres=model.nms_thre,
+                    score_thres=model.test_conf)
+                model.use_trt_efficientnms = True
+
+            model.eval()
+            model.to(device)
+
+            model_export = End2endModelExportWrapper(
+                model,
+                input.to(device),
+                preprocess_fn=preprocess_fn,
+                postprocess_fn=postprocess_fn,
+                trace_model=True,
+            )
+
+            model_export.eval().to(device)
+
+            # well trained model will generate reasonable result, otherwise, we should change model.test_conf=0.0 to avoid tensor in inference to be empty
+            # use trace is a litter bit faster than script. But it is not supported in an end2end model.
+            if end2end:
+                yolox_trace = torch.jit.script(model_export)
             else:
-                logging.warning(
-                    'PAI-YOLOX: use_trt_efficientnms encounter model has no attr named get_nmsboxes_num, use 8400 as default!'
-                )
-                nmsbox_num = 8400
-
-            tmp_example_scores = torch.randn(
-                [batch_size, nmsbox_num, 4 + 1 + len(cfg.CLASSES)],
-                dtype=torch.float32)
-            logging.warning(
-                'PAI-YOLOX: use_trt_efficientnms with staic shape [{}, {}, {}]'
-                .format(batch_size, nmsbox_num, 4 + 1 + len(cfg.CLASSES)))
-            model.trt_efficientnms = create_tensorrt_efficientnms(
-                tmp_example_scores,
-                iou_thres=model.nms_thre,
-                score_thres=model.test_conf)
-            model.use_trt_efficientnms = True
-
-        model.eval()
-        model.to(device)
-
-        model_export = End2endModelExportWrapper(
-            model,
-            input.to(device),
-            preprocess_fn=preprocess_fn,
-            postprocess_fn=postprocess_fn,
-            trace_model=True,
-        )
-
-        model_export.eval().to(device)
+                yolox_trace = torch.jit.trace(model_export, input.to(device))
+
+            if export_type=='blade':
+                blade_config = cfg.export.get(
+                    'blade_config',
+                    dict(enable_fp16=True, fp16_fallback_op_ratio=0.3))
+
+                from easycv.toolkit.blade import blade_env_assert, blade_optimize
+                assert blade_env_assert()
+
+                # optimize model with blade
+                yolox_blade = blade_optimize(
+                    speed_test_model=model,
+                    model=yolox_trace,
+                    inputs=(input.to(device), ),
+                    blade_config=blade_config,
+                    static_opt=static_opt)
+
+                # save preprocess jit model to accelerate the preprocess procedure
+                if save_preprocess_jit:
+                    tpre_input = 255 * torch.rand((batch_size, ) + img_scale + (3, ))
+                    tpre = PreprocessExportWrapper(
+                        example_inputs=tpre_input.to(device),
+                        preprocess_fn=PreProcess(
+                            target_size=img_scale, keep_ratio=True))
+                    tpre.eval().to(device)
+                    preprocess = torch.jit.script(tpre)
+                    with io.open(filename + '.preprocess', 'wb') as prefile:
+                        torch.jit.save(preprocess, prefile)
+
+                with io.open(filename + '.blade', 'wb') as ofile:
+                    torch.jit.save(yolox_blade, ofile)
+                with io.open(filename + '.blade.config.json', 'w') as ofile:
+                    config = dict(
+                        export=cfg.export,
+                        test_pipeline=cfg.test_pipeline,
+                        classes=cfg.CLASSES)
+
+                    json.dump(config, ofile)
+
+            if export_type=='jit':
+                with io.open(filename + '.jit', 'wb') as ofile:
+                    torch.jit.save(yolox_trace, ofile)
+
+                with io.open(filename + '.jit.config.json', 'w') as ofile:
+                    config = dict(
+                        export=cfg.export,
+                        test_pipeline=cfg.test_pipeline,
+                        classes=cfg.CLASSES)
+
+                    json.dump(config, ofile)
 
-        # well trained model will generate reasonable result, otherwise, we should change model.test_conf=0.0 to avoid tensor in inference to be empty
-        # use trace is a litter bit faster than script. But it is not supported in an end2end model.
-        if end2end:
-            yolox_trace = torch.jit.script(model_export)
         else:
-            yolox_trace = torch.jit.trace(model_export, input.to(device))
-
-        if getattr(cfg.export, 'export_blade', False):
-            blade_config = cfg.export.get(
-                'blade_config',
-                dict(enable_fp16=True, fp16_fallback_op_ratio=0.3))
-
-            from easycv.toolkit.blade import blade_env_assert, blade_optimize
-            assert blade_env_assert()
-
-            # optimize model with blade
-            yolox_blade = blade_optimize(
-                speed_test_model=model,
-                model=yolox_trace,
-                inputs=(input.to(device), ),
-                blade_config=blade_config,
-                static_opt=static_opt)
-
-            # save preprocess jit model to accelerate the preprocess procedure
-            tpre_input = 255 * torch.rand((batch_size, ) + img_scale + (3, ))
-            tpre = PreprocessExportWrapper(
-                example_inputs=tpre_input.to(device),
-                preprocess_fn=PreProcess(
-                    target_size=img_scale, keep_ratio=True))
-            tpre.eval().to(device)
-            preprocess = torch.jit.script(tpre)
-            with io.open(filename + '.preprocess', 'wb') as prefile:
-                torch.jit.save(preprocess, prefile)
-
-            with io.open(filename + '.blade', 'wb') as ofile:
-                torch.jit.save(yolox_blade, ofile)
-            with io.open(filename + '.blade.config.json', 'w') as ofile:
-                config = dict(
-                    export=cfg.export,
-                    test_pipeline=cfg.test_pipeline,
-                    classes=cfg.CLASSES)
-
-                json.dump(config, ofile)
-
-        if getattr(cfg.export, 'use_jit', False):
-            with io.open(filename + '.jit', 'wb') as ofile:
-                torch.jit.save(yolox_trace, ofile)
-
-            with io.open(filename + '.jit.config.json', 'w') as ofile:
-                config = dict(
-                    export=cfg.export,
-                    test_pipeline=cfg.test_pipeline,
-                    classes=cfg.CLASSES)
-
-                json.dump(config, ofile)
-
-    else:
-        if hasattr(cfg, 'test_pipeline'):
-            # with last pipeline Collect
-            test_pipeline = cfg.test_pipeline
-            print(test_pipeline)
-        else:
-            print('test_pipeline not found, using default preprocessing!')
-            raise ValueError('export model config without test_pipeline')
+            if hasattr(cfg, 'test_pipeline'):
+                # with last pipeline Collect
+                test_pipeline = cfg.test_pipeline
+                print(test_pipeline)
+            else:
+                print('test_pipeline not found, using default preprocessing!')
+                raise ValueError('export model config without test_pipeline')
 
-        config = dict(
-            model=cfg.model,
-            test_pipeline=test_pipeline,
-            CLASSES=cfg.CLASSES,
-        )
+            config = dict(
+                model=cfg.model,
+                test_pipeline=test_pipeline,
+                CLASSES=cfg.CLASSES,
+            )
 
-        meta = dict(config=json.dumps(config))
-        checkpoint = dict(
-            state_dict=model.state_dict(), meta=meta, author='EasyCV')
-        with io.open(filename, 'wb') as ofile:
-            torch.save(checkpoint, ofile)
+            meta = dict(config=json.dumps(config))
+            checkpoint = dict(
+                state_dict=model.state_dict(), meta=meta, author='EasyCV')
+            with io.open(filename, 'wb') as ofile:
+                torch.save(checkpoint, ofile)
 
 
 def _export_swav(model, cfg, filename):
@@ -746,7 +760,6 @@ def forward(self, image):
 
         with torch.no_grad():
             if self.preprocess_fn is not None:
-                # print('before', image.shape)
                 output = self.preprocess_fn(image)
                 # if multi values ​​are returned, the first one must be image, others ​​are optional,
                 # and others will all be passed into postprocess_fn
diff --git a/easycv/datasets/detection/raw.py b/easycv/datasets/detection/raw.py
index 3ba9e6b0..8f67ad84 100644
--- a/easycv/datasets/detection/raw.py
+++ b/easycv/datasets/detection/raw.py
@@ -126,9 +126,6 @@ def visualize(self, results, vis_num=10, score_thr=0.3, **kwargs):
                     dict of image meta info, containing filename, img_shape,
                     origin_img_shape, scale_factor and so on.
         """
-        import copy
-        results = copy.deepcopy(results)
-
         class_names = None
         if hasattr(self.data_source, 'CLASSES'):
             class_names = self.data_source.CLASSES
diff --git a/easycv/models/detection/detectors/yolox/yolo_pafpn.py b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
index f41c2749..55b9e5aa 100644
--- a/easycv/models/detection/detectors/yolox/yolo_pafpn.py
+++ b/easycv/models/detection/detectors/yolox/yolo_pafpn.py
@@ -12,6 +12,7 @@
 from easycv.models.registry import BACKBONES
 from .asff import ASFF
 
+
 class YOLOPAFPN(nn.Module):
     """
     YOLOv3 model. Darknet 53 is the default backbone of this model.
@@ -25,21 +26,19 @@ class YOLOPAFPN(nn.Module):
         'x': [1.33, 1.25]
     }
 
-    def __init__(
-            self,
-            depth=1.0,
-            width=1.0,
-            backbone='CSPDarknet',
-            neck = 'yolo',
-            neck_mode = 'all',
-            in_features=('dark3', 'dark4', 'dark5'),
-            in_channels=[256, 512, 1024],
-            depthwise=False,
-            act='silu',
-            use_att=None,
-            asff_channel=2,
-            expand_kernel=3
-    ):
+    def __init__(self,
+                 depth=1.0,
+                 width=1.0,
+                 backbone='CSPDarknet',
+                 neck_type='yolo',
+                 neck_mode='all',
+                 in_features=('dark3', 'dark4', 'dark5'),
+                 in_channels=[256, 512, 1024],
+                 depthwise=False,
+                 act='silu',
+                 use_att=None,
+                 asff_channel=2,
+                 expand_kernel=3):
         super().__init__()
 
         # build backbone
@@ -63,14 +62,14 @@ def __init__(
         self.in_channels = in_channels
 
         Conv = DWConv if depthwise else BaseConv
-        self.neck = neck
+        self.neck_type = neck_type
         self.neck_mode = neck_mode
-        if neck != 'gsconv':
-            if neck != 'yolo':
+        if neck_type != 'gsconv':
+            if neck_type != 'yolo':
                 logging.warning(
                     'YOLOX-PAI backbone must in [yolo, gsconv], otherwise we use yolo as default'
                 )
-            self.neck = 'yolo'
+            self.neck_type = 'yolo'
 
             self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
             self.lateral_conv0 = BaseConv(
@@ -278,7 +277,7 @@ def forward(self, input):
             features = self.backbone(input)
             [x2, x1, x0] = features
 
-        if self.neck == 'yolo':
+        if self.neck_type == 'yolo':
             fpn_out0 = self.lateral_conv0(x0)  # 1024->512/32
             f_out0 = self.upsample(fpn_out0)  # 512/16
             f_out0 = torch.cat([f_out0, x1], 1)  # 512->1024/16
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index a89db7bd..66f82810 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -8,9 +8,8 @@
 from torch import Tensor
 
 from easycv.models.base import BaseModel
-from easycv.models.builder import (MODELS, build_head)
+from easycv.models.builder import MODELS, build_head
 from easycv.models.detection.utils import postprocess
-
 from .yolo_pafpn import YOLOPAFPN
 
 
@@ -38,18 +37,18 @@ class YOLOX(BaseModel):
     }
 
     def __init__(self,
-                 model_type = 's',
+                 model_type='s',
                  test_conf=0.01,
                  nms_thre=0.65,
-                 backbone = 'CSPDarknet',
+                 backbone='CSPDarknet',
                  use_att=None,
                  asff_channel=2,
-                 neck='yolo',
+                 neck_type='yolo',
                  neck_mode='all',
                  head=None,
                  pretrained=True):
         super(YOLOX, self).__init__()
-        print('in')
+
         assert model_type in self.param_map, f'invalid model_type for yolox {model_type}, valid ones are {list(self.param_map.keys())}'
 
         self.pretrained = pretrained
@@ -62,12 +61,11 @@ def __init__(self,
             depth,
             width,
             backbone=backbone,
-            neck=neck,
+            neck_type=neck_type,
             neck_mode=neck_mode,
             in_channels=in_channels,
             asff_channel=asff_channel,
-            use_att=use_att
-        )
+            use_att=use_att)
 
         self.head = build_head(head)
 
diff --git a/easycv/models/detection/detectors/yolox_edge/yolox_edge.py b/easycv/models/detection/detectors/yolox_edge/yolox_edge.py
index cc94362c..4fb55aaa 100644
--- a/easycv/models/detection/detectors/yolox_edge/yolox_edge.py
+++ b/easycv/models/detection/detectors/yolox_edge/yolox_edge.py
@@ -1,8 +1,7 @@
 # Copyright (c) Alibaba, Inc. and its affiliates.
 import torch.nn as nn
 
-from easycv.models.builder import (MODELS, build_backbone, build_head,
-                                   build_neck)
+from easycv.models.builder import MODELS
 from easycv.models.detection.detectors.yolox.yolo_head import YOLOXHead
 from easycv.models.detection.detectors.yolox.yolo_pafpn import YOLOPAFPN
 from easycv.models.detection.detectors.yolox.yolox import YOLOX
@@ -24,21 +23,44 @@ class YOLOX_EDGE(YOLOX):
     """
 
     def __init__(self,
-                 backbone,
-                 test_conf,
-                 nms_thre,
-                 head=None,
-                 neck=None,
-                 pretrained=True):
-        super(YOLOX, self).__init__()
-
-        self.pretrained = pretrained
-        self.backbone = build_backbone(backbone)
-        if neck is not None:
-            self.neck = build_neck(neck)
-        self.head = build_head(head)
+                 stage: str = 'EDGE',
+                 model_type: str = 's',
+                 num_classes: int = 80,
+                 test_size: tuple = (640, 640),
+                 test_conf: float = 0.01,
+                 nms_thre: float = 0.65,
+                 pretrained: str = None,
+                 depth: float = 1.0,
+                 width: float = 1.0,
+                 max_model_params: float = 0.0,
+                 max_model_flops: float = 0.0,
+                 activation: str = 'silu',
+                 in_channels: list = [256, 512, 1024],
+                 backbone=None,
+                 head=None):
+        super(YOLOX_EDGE, self).__init__()
+
+        if backbone is None:
+            self.backbone = YOLOPAFPN(
+                depth,
+                width,
+                in_channels=in_channels,
+                depthwise=True,
+                act=activation)
+        if head is None:
+            self.head = YOLOXHead(
+                num_classes,
+                width,
+                in_channels=in_channels,
+                depthwise=True,
+                act=activation,
+                stage=stage)
 
         self.apply(init_yolo)  # init_yolo(self)
-        self.num_classes = self.head.num_classes
+        self.head.initialize_biases(1e-2)
+
+        self.stage = stage
+        self.num_classes = num_classes
         self.test_conf = test_conf
         self.nms_thre = nms_thre
+        self.test_size = test_size
\ No newline at end of file
diff --git a/easycv/models/detection/utils/tensorrt_nms.py b/easycv/models/detection/utils/tensorrt_nms.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/easycv/predictors/detector.py b/easycv/predictors/detector.py
index b12f2308..bf4d751f 100644
--- a/easycv/predictors/detector.py
+++ b/easycv/predictors/detector.py
@@ -58,12 +58,16 @@ def __init__(self,
         self.model_path = model_path
         self.max_det = max_det
         self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
-        self.use_jit = model_path.endswith('jit') or model_path.endswith(
-            'blade')
-        self.use_blade = model_path.endswith('blade')
+        # set type
+        self.model_type = 'ori'
+        if model_path.endswith('jit'):
+            self.model_type = 'jit'
+        if model_path.endswith('blade'):
+            self.model_type = 'blade'
+
         self.use_trt_efficientnms = use_trt_efficientnms
 
-        if self.use_blade:
+        if self.model_type=='blade':
             import torch_blade
 
         if model_config:
@@ -74,10 +78,12 @@ def __init__(self,
         self.score_thresh = model_config[
             'score_thresh'] if 'score_thresh' in model_config else score_thresh
 
-        if self.use_jit:
+        if self.model_type!='ori':
+            # jit or blade model
             preprocess_path = '.'.join(
                 model_path.split('.')[:-1] + ['preprocess'])
             if os.path.exists(preprocess_path):
+                # use a preprocess jit model to speed up
                 with io.open(preprocess_path, 'rb') as infile:
                     map_location = 'cpu' if self.device == 'cpu' else 'cuda'
                     self.preprocess = torch.jit.load(infile, map_location)
@@ -188,8 +194,8 @@ def predict(self, input_data_list, batch_size=-1, to_numpy=True):
 
             ori_img_shape = img.shape[:2]
             if self.end2end:
+                print('end2end')
                 # the input should also be as the type of uint8 as mmcv
-
                 img = torch.from_numpy(img).to(self.device)
                 img = img.unsqueeze(0)
                 if hasattr(self, 'preprocess'):
@@ -268,6 +274,8 @@ def predict(self, input_data_list, batch_size=-1, to_numpy=True):
             num_boxes = detection_classes.shape[
                 0] if detection_classes is not None else 0
 
+            print(num_boxes)
+
             detection_classes_names = [
                 self.CLASSES[detection_classes[idx]]
                 for idx in range(num_boxes)
diff --git a/numeric_test.py b/numeric_test.py
new file mode 100644
index 00000000..6d48c9dd
--- /dev/null
+++ b/numeric_test.py
@@ -0,0 +1,55 @@
+from easycv.models.detection.detectors.yolox.postprocess import create_tensorrt_postprocess
+import torch
+from torchvision.transforms import Compose
+
+from easycv.models import build_model
+from easycv.utils.checkpoint import load_checkpoint
+from easycv.utils.config_tools import mmcv_config_fromfile
+from easycv.utils.registry import build_from_cfg
+from easycv.datasets.registry import PIPELINES
+from easycv.models.detection.utils import postprocess
+
+import sys
+import numpy as np
+from PIL import Image
+import time
+
+from contextlib import contextmanager
+
+
+@contextmanager
+def timeit_context(name):
+    startTime = time.time()
+    yield
+    elapsedTime = time.time() - startTime
+    print('[{}] finished in {} ms'.format(name, int(elapsedTime * 1000)))
+
+
+def model_speed_test(name, img, use_trt_efficientnms=False):
+    pred = TorchYoloXPredictor(name, use_trt_efficientnms=use_trt_efficientnms)
+    for i in range(10):
+        m0 = pred.predict([img])
+    with timeit_context('{} speed test'.format(name)):
+        for i in range(100):
+            m0 = pred.predict([img])
+    print(m0[0]['detection_classes'])
+    print(m0[0]['detection_scores'])
+
+
+if __name__ == '__main__':
+    if 1:
+        img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000254016.jpg'
+        from easycv.predictors import TorchYoloXPredictor
+
+        img = Image.open(img_path)
+
+        # model_speed_test('/apsara/xinyi.zxy/pretrain/yolox_s/epoch_300.pt', img)
+        # model_speed_test('/apsara/xinyi.zxy/pretrain/yolox_s/epoch_300_j_noe2e.pt.jit', img, False)
+        # # model_speed_test('/apsara/xinyi.zxy/pretrain/yolox_s/epoch_300_j_noe2e_trt.pt.jit', img, True)  # jit ??
+        # model_speed_test('/apsara/xinyi.zxy/pretrain/yolox_s/epoch_300_j_e2e_notrt.pt.jit', img, False)
+        # #model_speed_test('/apsara/xinyi.zxy/pretrain/yolox_s/epoch_300_j_e2e_trt.pt.jit', img, True)
+
+        model_speed_test('/apsara/xinyi.zxy/pretrain/yolox_s/epoch_300_b_noe2e.pt.blade', img, False)
+        # model_speed_test('/apsara/xinyi.zxy/pretrain/yolox_s/epoch_300_b_noe2e_trt.pt.blade', img, True)
+        # model_speed_test('/apsara/xinyi.zxy/pretrain/yolox_s/epoch_300_b_e2e_trt.pt.blade', img, True)
+

From 47c45810f0a842bc61fd3d008463e5d346175f5e Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Fri, 19 Aug 2022 18:02:22 +0800
Subject: [PATCH 50/69] fix jit/blade bug

---
 .../yolox/yolox_s_8xb16_300e_coco.py          |  11 +-
 easycv/apis/export.py                         | 214 +++---------------
 .../detection/detectors/yolox/postprocess.py  | 159 -------------
 .../models/detection/detectors/yolox/test.py  | 117 ----------
 .../models/detection/detectors/yolox/yolox.py |   6 +-
 .../detectors/yolox_edge/yolox_edge.py        |   2 +-
 easycv/predictors/detector.py                 | 121 +++++-----
 numeric_test.py                               |  55 -----
 tests/apis/test_export.py                     |   4 +-
 tests/apis/test_export_blade.py               |  75 ------
 tests/predictors/test_detector.py             |  88 ++++---
 tests/predictors/test_detector_blade.py       | 204 -----------------
 tests/ut_config.py                            |  13 +-
 13 files changed, 145 insertions(+), 924 deletions(-)
 delete mode 100644 easycv/models/detection/detectors/yolox/postprocess.py
 delete mode 100644 easycv/models/detection/detectors/yolox/test.py
 delete mode 100644 numeric_test.py
 delete mode 100644 tests/apis/test_export_blade.py
 delete mode 100644 tests/predictors/test_detector_blade.py

diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index 8439c353..82cd4c6d 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -188,13 +188,4 @@
         # dict(type='WandbLoggerHookV2'),
     ])
 
-
-# export = dict(export_type = 'ori', end2end = False,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=False)
-# export = dict(export_type = 'jit', end2end = False,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=False)
-export = dict(export_type = 'blade', end2end = False,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=False)
-# export = dict(export_type = 'jit', end2end = True,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=False)
-# export = dict(export_type = 'jit', end2end = True,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=True)
-# export = dict(export_type = 'blade', end2end = True,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=True)
-# export = dict(export_type = 'blade', end2end = True,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=False) # assert error
-# export = dict(export_type = 'jit', end2end = False,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=True)
-# export = dict(export_type = 'blade', end2end = False,  batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=True)
\ No newline at end of file
+export = dict(export_type = 'ori', preprocess_jit = False, batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=False)
diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index 299eff60..3db5cbc7 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -8,6 +8,7 @@
 
 import cv2
 import torch
+import torchvision
 import torchvision.transforms.functional as t_f
 from mmcv.utils import Config
 
@@ -19,7 +20,7 @@
 from easycv.utils.checkpoint import load_checkpoint
 
 __all__ = [
-    'export', 'PreProcess', 'DetPostProcess', 'End2endModelExportWrapper',
+    'export', 'PreProcess', 'ModelExportWrapper', 'ProcessExportWrapper',
     'reparameterize_models'
 ]
 
@@ -188,19 +189,18 @@ def _export_yolox(model, cfg, filename):
             )
             export_type = 'ori'
 
-        if export_type!='ori':
+        if export_type != 'ori':
             # only when we use jit or blade, we need to reparameterize_models before export
             model = reparameterize_models(model)
             device = 'cuda' if torch.cuda.is_available() else 'cpu'
             model = copy.deepcopy(model)
 
-            end2end = cfg.export.get('end2end', False)
-            if LooseVersion(torch.__version__) < LooseVersion('1.7.0') and end2end:
-                raise ValueError('`end2end` only support torch1.7.0 and later!')
+            preprocess_jit = cfg.export.get('preprocess_jit', False)
 
             batch_size = cfg.export.get('batch_size', 1)
             static_opt = cfg.export.get('static_opt', True)
-            use_trt_efficientnms = cfg.export.get('use_trt_efficientnms', False)
+            use_trt_efficientnms = cfg.export.get('use_trt_efficientnms',
+                                                  False)
             # assert image scale and assgin input
             img_scale = cfg.get('img_scale', (640, 640))
 
@@ -216,33 +216,11 @@ def _export_yolox(model, cfg, filename):
                     use_trt_efficientnms == False
                 ), 'Export YoloX predictor use_trt_efficientnms=True only when use static_opt=True!'
 
-            # set preprocess_fn, postprocess_fn by config
-            preprocess_fn = None
-            postprocess_fn = None
-
             # preprocess can not be optimized blade, to accelerate the inference, a preprocess jit model should be saved!
             save_preprocess_jit = False
-            print('end2end', end2end)
-            print('usetrt', use_trt_efficientnms)
-            if end2end:
-                preprocess_fn = PreProcess(target_size=img_scale, keep_ratio=True)
-                postprocess_fn = DetPostProcess(max_det=100, score_thresh=0.5)
 
-                if use_trt_efficientnms:
-                    logging.warning(
-                        'PAI-YOLOX: use_trt_efficientnms=True during export, we drop DetPostProcess, because trt_efficientnms = detection.boxes.postprocess + DetPostProcess!'
-                    )
-                    postprocess_fn = None
-                else:
-                    assert (export_type != 'blade'
-                            ), 'Export End2end YOLOX Blade model must use_trt_efficientnms'
-
-                if export_type == 'blade':
-                    logging.warning(
-                        'PAI-YOLOX: End2endModelExportWrapper with preprocess_fn can\'t optimize by blade !'
-                    )
-                    preprocess_fn = None
-                    save_preprocess_jit = True
+            if preprocess_jit:
+                save_preprocess_jit = True
 
             # set model use_trt_efficientnms
             if use_trt_efficientnms:
@@ -270,24 +248,19 @@ def _export_yolox(model, cfg, filename):
             model.eval()
             model.to(device)
 
-            model_export = End2endModelExportWrapper(
+            model_export = ModelExportWrapper(
                 model,
                 input.to(device),
-                preprocess_fn=preprocess_fn,
-                postprocess_fn=postprocess_fn,
                 trace_model=True,
             )
 
             model_export.eval().to(device)
 
-            # well trained model will generate reasonable result, otherwise, we should change model.test_conf=0.0 to avoid tensor in inference to be empty
-            # use trace is a litter bit faster than script. But it is not supported in an end2end model.
-            if end2end:
-                yolox_trace = torch.jit.script(model_export)
-            else:
-                yolox_trace = torch.jit.trace(model_export, input.to(device))
+            # trace model
+            yolox_trace = torch.jit.trace(model_export, input.to(device))
 
-            if export_type=='blade':
+            # save export model
+            if export_type == 'blade':
                 blade_config = cfg.export.get(
                     'blade_config',
                     dict(enable_fp16=True, fp16_fallback_op_ratio=0.3))
@@ -303,40 +276,44 @@ def _export_yolox(model, cfg, filename):
                     blade_config=blade_config,
                     static_opt=static_opt)
 
-                # save preprocess jit model to accelerate the preprocess procedure
-                if save_preprocess_jit:
-                    tpre_input = 255 * torch.rand((batch_size, ) + img_scale + (3, ))
-                    tpre = PreprocessExportWrapper(
-                        example_inputs=tpre_input.to(device),
-                        preprocess_fn=PreProcess(
-                            target_size=img_scale, keep_ratio=True))
-                    tpre.eval().to(device)
-                    preprocess = torch.jit.script(tpre)
-                    with io.open(filename + '.preprocess', 'wb') as prefile:
-                        torch.jit.save(preprocess, prefile)
-
                 with io.open(filename + '.blade', 'wb') as ofile:
                     torch.jit.save(yolox_blade, ofile)
                 with io.open(filename + '.blade.config.json', 'w') as ofile:
                     config = dict(
+                        model=cfg.model,
                         export=cfg.export,
                         test_pipeline=cfg.test_pipeline,
                         classes=cfg.CLASSES)
 
                     json.dump(config, ofile)
 
-            if export_type=='jit':
+            if export_type == 'jit':
                 with io.open(filename + '.jit', 'wb') as ofile:
                     torch.jit.save(yolox_trace, ofile)
 
                 with io.open(filename + '.jit.config.json', 'w') as ofile:
                     config = dict(
+                        model=cfg.model,
                         export=cfg.export,
                         test_pipeline=cfg.test_pipeline,
                         classes=cfg.CLASSES)
 
                     json.dump(config, ofile)
 
+            # save export preprocess/postprocess
+            if save_preprocess_jit:
+                tpre_input = 255 * torch.rand((batch_size, ) + img_scale +
+                                              (3, ))
+                tpre = ProcessExportWrapper(
+                    example_inputs=tpre_input.to(device),
+                    process_fn=PreProcess(
+                        target_size=img_scale, keep_ratio=True))
+                tpre.eval().to(device)
+
+                preprocess = torch.jit.script(tpre)
+                with io.open(filename + '.preprocess', 'wb') as prefile:
+                    torch.jit.save(preprocess, prefile)
+
         else:
             if hasattr(cfg, 'test_pipeline'):
                 # with last pipeline Collect
@@ -628,113 +605,15 @@ def __call__(
 
             return out_image, output_info
 
-    @torch.jit.script
-    class DetPostProcess:
-        """Process output values of detection models.
-
-        Args:
-            max_det: max number of detections to keep.
-        """
-
-        def __init__(self, max_det: int = 100, score_thresh: float = 0.5):
-            self.max_det = max_det
-            self.score_thresh = score_thresh
-
-        def __call__(
-            self, output: List[torch.Tensor], sample_info: Dict[str,
-                                                                Tuple[float,
-                                                                      float]]
-        ) -> Dict[str, torch.Tensor]:
-            """
-            Args:
-                output (List[torch.Tensor]): model output
-                sample_info (dict): sample infomation containing keys:
-                    pad: Pixel size of each side width and height padding
-                    scale_factor: the preprocessing scale factor
-                    ori_img_shape: original image shape
-                    img_shape: processed image shape
-            """
-            pad = sample_info['pad']
-            scale_factor = sample_info['scale_factor']
-            ori_h, ori_w = sample_info['ori_img_shape']
-            h, w = sample_info['img_shape']
-
-            output = output[0]
-
-            det_out = output[:self.max_det]
-
-            det_out = scale_coords((int(h), int(w)), det_out,
-                                   (int(ori_h), int(ori_w)),
-                                   (scale_factor, pad))
-
-            detection_boxes = det_out[:, :4].cpu()
-            detection_scores = (det_out[:, 4] * det_out[:, 5]).cpu()
-            detection_classes = det_out[:, 6].cpu().int()
-
-            out = {
-                'detection_boxes': detection_boxes,
-                'detection_scores': detection_scores,
-                'detection_classes': detection_classes,
-            }
-
-            return out
 else:
     PreProcess = None
-    DetPostProcess = None
-
 
-class End2endModelExportWrapper(torch.nn.Module):
-    """Model export wrapper that supports end-to-end export of pre-processing and post-processing.
-    We support some built-in preprocessing and postprocessing functions.
-    If the requirements are not met, you can customize the preprocessing and postprocessing functions.
-    The custom functions must support satisfy requirements of `torch.jit.script`,
-    please refer to: https://pytorch.org/docs/stable/jit_language_reference_v2.html
 
-    Args:
-        model (torch.nn.Module):  `torch.nn.Module` that will be run with `example_inputs`.
-            `model` arguments and return values must be tensors or (possibly nested) tuples
-            that contain tensors. When a module is passed `torch.jit.trace`, only the
-            ``forward_export`` method is run and traced (see :func:`torch.jit.trace
-            <torch.jit.trace_module>` for details).
-        example_inputs (tuple or torch.Tensor):  A tuple of example inputs that
-            will be passed to the function while tracing. The resulting trace
-            can be run with inputs of different types and shapes assuming the
-            traced operations support those types and shapes. `example_inputs`
-            may also be a single Tensor in which case it is automatically
-            wrapped in a tuple.
-        preprocess_fn (callable or None): A Python function for processing example_input.
-            If there is only one return value, it will be passed to `model.forward_export`.
-            If there are multiple return values, the first return value will be passed to `model.forward_export`,
-            and the remaining return values ​​will be passed to `postprocess_fn`.
-        postprocess_fn (callable or None): A Python function for processing the output value of the model.
-            If `preprocess_fn` has multiple outputs, the output value of `preprocess_fn`
-            will also be passed to `postprocess_fn`. For details, please refer to: `preprocess_fn`.
-        trace_model (bool): If True, before exporting the end-to-end model,
-            `torch.jit.trace` will be used to export the `model` first.
-            Traceing an ``nn.Module`` by default will compile the ``forward_export`` method and recursively.
-
-    Examples:
-        import torch
-
-        batch_size = 1
-        example_inputs = 255 * torch.rand((batch_size, 3, 640, 640), device='cuda')
-        end2end_model = End2endModelExportWrapper(
-            model,
-            example_inputs,
-            preprocess_fn=PreProcess(target_size=(640, 640)),  # `PreProcess` refer to ev_torch.apis.export.PreProcess
-            postprocess_fn=DetPostProcess()  # `DetPostProcess` refer to ev_torch.apis.export.DetPostProcess
-            trace_model=True)
-
-        model_script = torch.jit.script(end2end_model)
-        with io.open('/tmp/model.jit', 'wb') as f:
-            torch.jit.save(model_script, f)
-    """
+class ModelExportWrapper(torch.nn.Module):
 
     def __init__(self,
                  model,
                  example_inputs,
-                 preprocess_fn: Optional[Callable] = None,
-                 postprocess_fn: Optional[Callable] = None,
                  trace_model: bool = True) -> None:
         super().__init__()
 
@@ -743,8 +622,6 @@ def __init__(self,
             self.model.export_init()
 
         self.example_inputs = example_inputs
-        self.preprocess_fn = preprocess_fn
-        self.postprocess_fn = postprocess_fn
 
         self.trace_model = trace_model
         if self.trace_model:
@@ -756,28 +633,14 @@ def trace_module(self, **kwargs):
         self.model = trace_model
 
     def forward(self, image):
-        preprocess_outputs = ()
 
         with torch.no_grad():
-            if self.preprocess_fn is not None:
-                output = self.preprocess_fn(image)
-                # if multi values ​​are returned, the first one must be image, others ​​are optional,
-                # and others will all be passed into postprocess_fn
-                if isinstance(output, tuple):
-                    image = output[0]
-                    preprocess_outputs = output[1:]
-                else:
-                    image = output
-
             model_output = self.model.forward_export(image)
-            if self.postprocess_fn is not None:
-                model_output = self.postprocess_fn(model_output,
-                                                   *preprocess_outputs)
 
         return model_output
 
 
-class PreprocessExportWrapper(torch.nn.Module):
+class ProcessExportWrapper(torch.nn.Module):
     """
         split the preprocess that can be wrapped as a preprocess jit model
         the preproprocess procedure cannot be optimized in an end2end blade model due to dynamic shape problem
@@ -785,15 +648,12 @@ class PreprocessExportWrapper(torch.nn.Module):
 
     def __init__(self,
                  example_inputs,
-                 preprocess_fn: Optional[Callable] = None) -> None:
+                 process_fn: Optional[Callable] = None) -> None:
         super().__init__()
-        self.preprocess_fn = preprocess_fn
+        self.process_fn = process_fn
 
     def forward(self, image):
         with torch.no_grad():
-            output = self.preprocess_fn(image)
-            if isinstance(output, tuple):
-                image = output[0]
-            else:
-                image = output
-        return image
+            output = self.process_fn(image)
+
+        return output
diff --git a/easycv/models/detection/detectors/yolox/postprocess.py b/easycv/models/detection/detectors/yolox/postprocess.py
deleted file mode 100644
index e7175086..00000000
--- a/easycv/models/detection/detectors/yolox/postprocess.py
+++ /dev/null
@@ -1,159 +0,0 @@
-# !!!ignore it for cr, we are still work for tensorrt nms problem
-
-import torch
-from torch import nn
-
-
-class TRT8_NMS(torch.autograd.Function):
-    '''TensorRT NMS operation'''
-
-    @staticmethod
-    def forward(
-        ctx,
-        boxes,
-        scores,
-        background_class=-1,
-        box_coding=1,
-        iou_threshold=0.45,
-        max_output_boxes=100,
-        plugin_version='1',
-        score_activation=0,
-        score_threshold=0.25,
-    ):
-        batch_size, num_boxes, num_classes = scores.shape
-        num_det = torch.randint(
-            0, max_output_boxes, (batch_size, 1), dtype=torch.int32)
-        det_boxes = torch.randn(batch_size, max_output_boxes, 4)
-        det_scores = torch.randn(batch_size, max_output_boxes)
-        det_classes = torch.randint(
-            0, num_classes, (batch_size, max_output_boxes), dtype=torch.int32)
-        return num_det, det_boxes, det_scores, det_classes
-
-    @staticmethod
-    def symbolic(g,
-                 boxes,
-                 scores,
-                 background_class=-1,
-                 box_coding=1,
-                 iou_threshold=0.45,
-                 max_output_boxes=100,
-                 plugin_version='1',
-                 score_activation=0,
-                 score_threshold=0.25):
-        out = g.op(
-            'TRT::EfficientNMS_TRT',
-            boxes,
-            scores,
-            background_class_i=background_class,
-            box_coding_i=box_coding,
-            iou_threshold_f=iou_threshold,
-            max_output_boxes_i=max_output_boxes,
-            plugin_version_s=plugin_version,
-            score_activation_i=score_activation,
-            score_threshold_f=score_threshold,
-            outputs=4)
-        nums, boxes, scores, classes = out
-        return nums, boxes, scores, classes
-
-
-class ONNX_TRT8(nn.Module):
-    '''onnx module with TensorRT NMS operation.'''
-
-    def __init__(self,
-                 max_obj=100,
-                 iou_thres=0.45,
-                 score_thres=0.25,
-                 max_wh=None,
-                 device=None):
-        super().__init__()
-        assert max_wh is None
-        self.device = device if device else torch.device('cpu')
-        self.background_class = -1,
-        self.box_coding = 1,
-        self.iou_threshold = iou_thres
-        self.max_obj = max_obj
-        self.plugin_version = '1'
-        self.score_activation = 0
-        self.score_threshold = score_thres
-
-    def forward(self, x):
-        box = x[:, :, :4]
-        conf = x[:, :, 4:5]
-        score = x[:, :, 5:]
-        score *= conf
-        num_det, det_boxes, det_scores, det_classes = TRT8_NMS.apply(
-            box, score, self.background_class, self.box_coding,
-            self.iou_threshold, self.max_obj, self.plugin_version,
-            self.score_activation, self.score_threshold)
-        return num_det, det_boxes, det_scores, det_classes
-
-
-def create_tensorrt_postprocess(example_scores,
-                                iou_thres=0.45,
-                                score_thres=0.25):
-    from torch_blade import tensorrt
-    import torch_blade._torch_blade._backends as backends
-    import io
-
-    model = torch.jit.trace(
-        ONNX_TRT8(iou_thres=iou_thres, score_thres=score_thres),
-        example_scores)
-    example_outputs = model(example_scores)
-
-    input_names = ['input']
-    output_names = [
-        'num_det', 'detection_boxes', 'detection_scores', 'detection_classes'
-    ]
-    with io.BytesIO() as onnx_proto_f:
-        torch.onnx.export(
-            model,
-            example_scores,
-            onnx_proto_f,
-            input_names=input_names,
-            output_names=output_names,
-            example_outputs=example_outputs)
-        onnx_proto = onnx_proto_f.getvalue()
-
-    def _copy_meta(data, name, dtype, sizes):
-        data.name = name
-        if dtype.is_floating_point:
-            data.dtype = 'Float'
-        else:
-            data.dtype = 'Int'
-        data.sizes = sizes
-        return data
-
-    state = backends.EngineState()
-    state.inputs = [
-        _copy_meta(backends.TensorInfo(), name, tensor.dtype,
-                   list(tensor.shape))
-        for name, tensor in zip(input_names, [example_scores])
-    ]
-    state.outputs = [
-        _copy_meta(backends.TensorInfo(), name, tensor.dtype, [])
-        for name, tensor in zip(output_names, example_outputs)
-    ]
-    state = tensorrt.cvt_onnx_to_tensorrt(onnx_proto, state, [], dict())
-
-    class Model(torch.nn.Module):
-
-        def __init__(self, state):
-            super().__init__()
-            self._trt_engine_ext = backends.create_engine(state)
-
-        def forward(self, x):
-            return self._trt_engine_ext.execute([x])
-
-    trt_ext = torch.jit.script(Model(state))
-    return trt_ext
-
-
-if __name__ == '__main__':
-    bs = 32
-    num_boxes = 100
-    num_classes = 2
-    example_scores = torch.randn([bs, num_boxes, 4 + 1 + num_classes],
-                                 dtype=torch.float32)
-    trt_ext = create_tensorrt_postprocess(example_scores)
-    out = trt_ext.forward(example_scores)
-    print(out)
diff --git a/easycv/models/detection/detectors/yolox/test.py b/easycv/models/detection/detectors/yolox/test.py
deleted file mode 100644
index e2c8d7e4..00000000
--- a/easycv/models/detection/detectors/yolox/test.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# from easycv.models.detection.detectors.yolox import YOLOX
-import sys
-
-import numpy as np
-import torch
-from PIL import Image
-from torchvision.transforms import Compose
-
-from easycv.datasets.registry import PIPELINES
-from easycv.models import build_model
-from easycv.models.detection.detectors.yolox.postprocess import \
-    create_tensorrt_postprocess
-from easycv.models.detection.utils import postprocess
-from easycv.utils.checkpoint import load_checkpoint
-from easycv.utils.config_tools import mmcv_config_fromfile
-from easycv.utils.registry import build_from_cfg
-
-if __name__ == '__main__':
-    #a = YOLOX(decode_in_inference=False).eval()
-    cfg = sys.argv[1]
-    ckpt_path = sys.argv[2]
-
-    cfg = mmcv_config_fromfile(cfg)
-    model = build_model(cfg.model)
-    load_checkpoint(model, ckpt_path, map_location='cpu')
-    model = model.eval()
-
-    test_pipeline = cfg.test_pipeline
-    CLASSES = cfg.CLASSES
-
-    pipeline = [build_from_cfg(p, PIPELINES) for p in test_pipeline]
-    pipeline = Compose(pipeline)
-
-    # 8400 ishard code, need to reimplement to  sum(img_w / stride_i + img_h /stride_i)
-    example_scores = torch.randn([1, 8400, 85], dtype=torch.float32)
-    trt_ext = create_tensorrt_postprocess(
-        example_scores, iou_thres=model.nms_thre, score_thres=model.test_conf)
-
-    # img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000129062.jpg'
-    img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000254016.jpg'
-    # img = cv2.imread(img_path)
-    img = Image.open(img_path)
-    if type(img) is not np.ndarray:
-        img = np.asarray(img)
-
-    # ori_img_shape = img.shape[:2]
-    data_dict = {'img': img}
-    data_dict = pipeline(data_dict)
-    img = data_dict['img']
-    img = torch.unsqueeze(img._data, 0)
-    # print(img.shape)
-    model.decode_in_inference = False
-    # print(type(model), model.decode_in_inference)
-    c = model.forward_export(img)
-
-    # print(type(c), c.shape)
-    print(model.test_conf, model.nms_thre, model.num_classes,
-          model.decode_in_inference)
-    tc = model.head.decode_outputs(c, c[0].type())
-    # print(type(tc))
-    # print(tc.shape)
-
-    import copy
-
-    tcback = copy.deepcopy(tc)
-
-    tpa = postprocess(tc, model.num_classes, model.test_conf,
-                      model.nms_thre)[0]
-    # print(tpa)
-    tpa[:, 4] = tpa[:, 4] * tpa[:, 5]
-    tpa[:, 5] = tpa[:, 6]
-    tpa = tpa[:, :6]
-    # print("fuck tpa:", len(tpa), tpa[0].shape)
-    box_a = tpa[:, :4]
-    score_a = tpa[:, 4]
-    id_a = tpa[:, 5]
-    # print(tpa)
-
-    # trt_ext must be cuda
-    tcback = tcback
-    tpb = trt_ext.forward(tcback)
-    # print("fuck tpb:",len(tpb))
-
-    valid_length = min(len(tpa), tpb[2].shape[1])
-    print(valid_length)
-    valid_length = min(valid_length, 30)
-
-    box_a = box_a[:valid_length]
-    score_a = score_a[:valid_length]
-    id_a = id_a[:valid_length]
-
-    print(tpb[1].shape)
-    print(tpb[2].shape)
-    print(tpb[3].shape)
-
-    box_b = tpb[1][:, :valid_length, :].cpu().view(box_a.shape)
-    score_b = tpb[2][:, :valid_length].cpu().view(score_a.shape)
-    id_b = tpb[3][:, :valid_length].cpu().view(id_a.shape)
-
-    def get_diff(input_a, input_b, name='score'):
-        print('name:', name)
-        print('shape:', input_a.shape)
-        print('max_diff  :', torch.max(input_a - input_b))
-        print('avg_diff  :', torch.mean(input_a - input_b))
-        print('totol_diff:', torch.sum(input_a - input_b))
-
-    get_diff(box_a, box_b, 'box')
-    get_diff(score_a, score_b, 'score')
-    get_diff(id_a, id_a, 'id')
-
-    if 0:
-        from easycv.predictors import TorchYoloXPredictor
-
-        img = Image.open(img_path)
-        pred = TorchYoloXPredictor('models/predict.pt')
-        m = pred.predict([img])
-        print(m)
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index 66f82810..bb9760ac 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -198,8 +198,8 @@ def forward_export(self, img):
                         logging.error(
                             'PAI-YOLOX : using trt_efficientnms set to be True, but model has not attr(trt_efficientnms)'
                         )
-                else:
-                    outputs = postprocess(outputs, self.num_classes,
-                                          self.test_conf, self.nms_thre)
+                # else:
+                #     outputs = postprocess(outputs, self.num_classes,
+                #                           self.test_conf, self.nms_thre)
 
         return outputs
diff --git a/easycv/models/detection/detectors/yolox_edge/yolox_edge.py b/easycv/models/detection/detectors/yolox_edge/yolox_edge.py
index 4fb55aaa..7fbf7a2b 100644
--- a/easycv/models/detection/detectors/yolox_edge/yolox_edge.py
+++ b/easycv/models/detection/detectors/yolox_edge/yolox_edge.py
@@ -63,4 +63,4 @@ def __init__(self,
         self.num_classes = num_classes
         self.test_conf = test_conf
         self.nms_thre = nms_thre
-        self.test_size = test_size
\ No newline at end of file
+        self.test_size = test_size
diff --git a/easycv/predictors/detector.py b/easycv/predictors/detector.py
index bf4d751f..2d23a0f0 100644
--- a/easycv/predictors/detector.py
+++ b/easycv/predictors/detector.py
@@ -18,6 +18,7 @@
 from easycv.file import io
 from easycv.file.utils import is_url_path, url_path_exists
 from easycv.models import build_model
+from easycv.models.detection.utils import postprocess
 from easycv.utils.checkpoint import load_checkpoint
 from easycv.utils.config_tools import mmcv_config_fromfile
 from easycv.utils.constant import CACHE_DIR
@@ -67,7 +68,7 @@ def __init__(self,
 
         self.use_trt_efficientnms = use_trt_efficientnms
 
-        if self.model_type=='blade':
+        if self.model_type == 'blade' or self.use_trt_efficientnms:
             import torch_blade
 
         if model_config:
@@ -78,7 +79,7 @@ def __init__(self,
         self.score_thresh = model_config[
             'score_thresh'] if 'score_thresh' in model_config else score_thresh
 
-        if self.model_type!='ori':
+        if self.model_type != 'ori':
             # jit or blade model
             preprocess_path = '.'.join(
                 model_path.split('.')[:-1] + ['preprocess'])
@@ -87,6 +88,7 @@ def __init__(self,
                 with io.open(preprocess_path, 'rb') as infile:
                     map_location = 'cpu' if self.device == 'cpu' else 'cuda'
                     self.preprocess = torch.jit.load(infile, map_location)
+
             with io.open(model_path, 'rb') as infile:
                 map_location = 'cpu' if self.device == 'cpu' else 'cuda'
                 self.model = torch.jit.load(infile, map_location)
@@ -94,12 +96,12 @@ def __init__(self,
                 self.cfg = json.load(infile)
                 test_pipeline = self.cfg['test_pipeline']
                 self.CLASSES = self.cfg['classes']
-                self.end2end = self.cfg['export']['end2end']
+                self.preprocess_jit = self.cfg['export']['preprocess_jit']
 
             self.traceable = True
 
         else:
-            self.end2end = False
+            self.preprocess_jit = False
             with io.open(self.model_path, 'rb') as infile:
                 checkpoint = torch.load(infile, map_location='cpu')
 
@@ -139,6 +141,10 @@ def __init__(self,
         # build pipeline
         pipeline = [build_from_cfg(p, PIPELINES) for p in test_pipeline]
         self.pipeline = Compose(pipeline)
+        print(self.cfg)
+        self.test_conf = self.cfg['model'].get('test_conf', 0.01)
+        self.nms_thre = self.cfg['model'].get('nms_thre', 0.65)
+        self.num_classes = len(self.CLASSES)
 
     def post_assign(self, outputs, img_metas):
         detection_boxes = []
@@ -151,6 +157,7 @@ def post_assign(self, outputs, img_metas):
                 img_metas_list.append(img_metas[i])
             if outputs[i].requires_grad == True:
                 outputs[i] = outputs[i].detach()
+
             if outputs[i] is not None:
                 bboxes = outputs[i][:, 0:4] if outputs[i] is not None else None
                 if img_metas:
@@ -193,83 +200,61 @@ def predict(self, input_data_list, batch_size=-1, to_numpy=True):
                 img = np.asarray(img)
 
             ori_img_shape = img.shape[:2]
-            if self.end2end:
-                print('end2end')
+            if self.preprocess_jit:
                 # the input should also be as the type of uint8 as mmcv
                 img = torch.from_numpy(img).to(self.device)
                 img = img.unsqueeze(0)
-                if hasattr(self, 'preprocess'):
-                    img = self.preprocess(img)
-
-                if self.use_trt_efficientnms:
-                    tmp_out = self.model(img)
-                    det_out = {}
-                    det_out['detection_boxes'] = tmp_out[1]
-                    det_out['detection_scores'] = tmp_out[2]
-                    det_out['detection_classes'] = tmp_out[3]
-                else:
-                    det_out = self.model(img)
 
-                detection_scores = det_out['detection_scores']
+                if hasattr(self, 'preprocess'):
+                    img, img_info = self.preprocess(img)
 
-                if detection_scores is not None:
-                    sel_ids = detection_scores > self.score_thresh
-                    detection_scores = detection_scores[sel_ids]
-                    detection_boxes = det_out['detection_boxes'][sel_ids]
-                    detection_classes = det_out['detection_classes'][sel_ids]
-                else:
-                    detection_boxes = []
-                    detection_classes = []
-
-                if to_numpy:
-                    detection_scores = detection_scores.cpu().detach().numpy()
-                    detection_boxes = detection_boxes.cpu().detach().numpy()
-                    detection_classes = detection_classes.cpu().detach().numpy(
-                    )
             else:
                 data_dict = {'img': img}
                 data_dict = self.pipeline(data_dict)
                 img = data_dict['img']
                 img = torch.unsqueeze(img._data, 0).to(self.device)
                 data_dict.pop('img')
-                if self.traceable:
-                    if self.use_trt_efficientnms:
-                        with torch.no_grad():
-                            tmp_out = self.model(img)
-                            det_out = {}
-                            det_out['detection_boxes'] = tmp_out[1]
-                            det_out['detection_scores'] = tmp_out[2]
-                            det_out['detection_classes'] = tmp_out[3]
-                    else:
-                        with torch.no_grad():
-                            det_out = self.post_assign(
-                                self.model(img),
-                                img_metas=[data_dict['img_metas']._data])
-                else:
+                img_info = data_dict['img_metas']._data
+
+            if self.traceable:
+                if self.use_trt_efficientnms:
                     with torch.no_grad():
-                        det_out = self.model(
-                            img,
-                            mode='test',
-                            img_metas=[data_dict['img_metas']._data])
-
-                # print(det_out)
-                # det_out = det_out[:self.max_det]
-                # scale box to original image scale, this logic has some operation
-                # that can not be traced, see
-                # https://discuss.pytorch.org/t/windows-libtorch-c-load-cuda-module-with-std-runtime-error-message-shape-4-is-invalid-for-input-if-size-40/63073/4
-                # det_out = scale_coords(img.shape[2:], det_out, ori_img_shape, (scale_factor, pad))
-
-                detection_scores = det_out['detection_scores'][0]
-
-                if detection_scores is not None:
-                    sel_ids = detection_scores > self.score_thresh
-                    detection_scores = detection_scores[sel_ids]
-                    detection_boxes = det_out['detection_boxes'][0][sel_ids]
-                    detection_classes = det_out['detection_classes'][0][
-                        sel_ids]
+                        tmp_out = self.model(img)
+                        det_out = {}
+                        det_out['detection_boxes'] = tmp_out[1] / img_info[
+                            'scale_factor'][0]
+                        det_out['detection_scores'] = tmp_out[2]
+                        det_out['detection_classes'] = tmp_out[3]
+
                 else:
-                    detection_boxes = None
-                    detection_classes = None
+                    with torch.no_grad():
+                        det_out = self.post_assign(
+                            postprocess(
+                                self.model(img), self.num_classes,
+                                self.test_conf, self.nms_thre),
+                            img_metas=[img_info])
+            else:
+                with torch.no_grad():
+                    det_out = self.model(
+                        img, mode='test', img_metas=[img_info])
+
+            # print(det_out)
+            # det_out = det_out[:self.max_det]
+            # scale box to original image scale, this logic has some operation
+            # that can not be traced, see
+            # https://discuss.pytorch.org/t/windows-libtorch-c-load-cuda-module-with-std-runtime-error-message-shape-4-is-invalid-for-input-if-size-40/63073/4
+            # det_out = scale_coords(img.shape[2:], det_out, ori_img_shape, (scale_factor, pad))
+
+            detection_scores = det_out['detection_scores'][0]
+
+            if detection_scores is not None:
+                sel_ids = detection_scores > self.score_thresh
+                detection_scores = detection_scores[sel_ids]
+                detection_boxes = det_out['detection_boxes'][0][sel_ids]
+                detection_classes = det_out['detection_classes'][0][sel_ids]
+            else:
+                detection_boxes = None
+                detection_classes = None
 
             num_boxes = detection_classes.shape[
                 0] if detection_classes is not None else 0
diff --git a/numeric_test.py b/numeric_test.py
deleted file mode 100644
index 6d48c9dd..00000000
--- a/numeric_test.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from easycv.models.detection.detectors.yolox.postprocess import create_tensorrt_postprocess
-import torch
-from torchvision.transforms import Compose
-
-from easycv.models import build_model
-from easycv.utils.checkpoint import load_checkpoint
-from easycv.utils.config_tools import mmcv_config_fromfile
-from easycv.utils.registry import build_from_cfg
-from easycv.datasets.registry import PIPELINES
-from easycv.models.detection.utils import postprocess
-
-import sys
-import numpy as np
-from PIL import Image
-import time
-
-from contextlib import contextmanager
-
-
-@contextmanager
-def timeit_context(name):
-    startTime = time.time()
-    yield
-    elapsedTime = time.time() - startTime
-    print('[{}] finished in {} ms'.format(name, int(elapsedTime * 1000)))
-
-
-def model_speed_test(name, img, use_trt_efficientnms=False):
-    pred = TorchYoloXPredictor(name, use_trt_efficientnms=use_trt_efficientnms)
-    for i in range(10):
-        m0 = pred.predict([img])
-    with timeit_context('{} speed test'.format(name)):
-        for i in range(100):
-            m0 = pred.predict([img])
-    print(m0[0]['detection_classes'])
-    print(m0[0]['detection_scores'])
-
-
-if __name__ == '__main__':
-    if 1:
-        img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000254016.jpg'
-        from easycv.predictors import TorchYoloXPredictor
-
-        img = Image.open(img_path)
-
-        # model_speed_test('/apsara/xinyi.zxy/pretrain/yolox_s/epoch_300.pt', img)
-        # model_speed_test('/apsara/xinyi.zxy/pretrain/yolox_s/epoch_300_j_noe2e.pt.jit', img, False)
-        # # model_speed_test('/apsara/xinyi.zxy/pretrain/yolox_s/epoch_300_j_noe2e_trt.pt.jit', img, True)  # jit ??
-        # model_speed_test('/apsara/xinyi.zxy/pretrain/yolox_s/epoch_300_j_e2e_notrt.pt.jit', img, False)
-        # #model_speed_test('/apsara/xinyi.zxy/pretrain/yolox_s/epoch_300_j_e2e_trt.pt.jit', img, True)
-
-        model_speed_test('/apsara/xinyi.zxy/pretrain/yolox_s/epoch_300_b_noe2e.pt.blade', img, False)
-        # model_speed_test('/apsara/xinyi.zxy/pretrain/yolox_s/epoch_300_b_noe2e_trt.pt.blade', img, True)
-        # model_speed_test('/apsara/xinyi.zxy/pretrain/yolox_s/epoch_300_b_e2e_trt.pt.blade', img, True)
-
diff --git a/tests/apis/test_export.py b/tests/apis/test_export.py
index 0ce5965d..355e7d5e 100644
--- a/tests/apis/test_export.py
+++ b/tests/apis/test_export.py
@@ -49,7 +49,7 @@ def test_export_yolox(self):
     def test_export_yolox_jit(self):
         config_file = 'configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
         cfg = mmcv_config_fromfile(config_file)
-        cfg.export = dict(use_jit=True, export_blade=False, end2end=False)
+        cfg.export = dict(export_type='jit', end2end=False)
         ori_ckpt = PRETRAINED_MODEL_YOLOXS_EXPORT
 
         target_path = f'{self.tmp_dir}/export_yolox_s_epoch300_export'
@@ -61,7 +61,7 @@ def test_export_yolox_jit(self):
     def test_export_yolox_jit_end2end(self):
         config_file = 'configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
         cfg = mmcv_config_fromfile(config_file)
-        cfg.export = dict(use_jit=True, export_blade=False, end2end=True)
+        cfg.export = dict(export_type='jit', end2end=True)
         ori_ckpt = PRETRAINED_MODEL_YOLOXS_EXPORT
 
         target_path = f'{self.tmp_dir}/export_yolox_s_epoch300_end2end'
diff --git a/tests/apis/test_export_blade.py b/tests/apis/test_export_blade.py
deleted file mode 100644
index 7346c2e4..00000000
--- a/tests/apis/test_export_blade.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright (c) Alibaba, Inc. and its affiliates.
-import json
-import os
-import subprocess
-import tempfile
-import unittest
-
-import numpy as np
-import torch
-from tests.ut_config import (PRETRAINED_MODEL_RESNET50,
-                             PRETRAINED_MODEL_YOLOXS_EXPORT)
-
-from easycv.apis.export import export
-from easycv.utils.config_tools import mmcv_config_fromfile
-from easycv.utils.test_util import clean_up, get_tmp_dir
-
-
-@unittest.skipIf(torch.__version__ != '1.8.1+cu102',
-                 'Blade need another environment')
-class ModelExportTest(unittest.TestCase):
-
-    def setUp(self):
-        print(('Testing %s.%s' % (type(self).__name__, self._testMethodName)))
-        self.tmp_dir = get_tmp_dir()
-        print('tmp dir %s' % self.tmp_dir)
-
-    def tearDown(self):
-        clean_up(self.tmp_dir)
-
-    def test_export_yolox_blade(self):
-        config_file = 'configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
-        cfg = mmcv_config_fromfile(config_file)
-        cfg.export = dict(use_jit=True, export_blade=True, end2end=False)
-        ori_ckpt = PRETRAINED_MODEL_YOLOXS_EXPORT
-
-        target_path = f'{self.tmp_dir}/export_yolox_s_epoch300_export'
-
-        export(cfg, ori_ckpt, target_path)
-        self.assertTrue(os.path.exists(target_path + '.jit'))
-        self.assertTrue(os.path.exists(target_path + '.jit.config.json'))
-        self.assertTrue(os.path.exists(target_path + '.blade'))
-        self.assertTrue(os.path.exists(target_path + '.blade.config.json'))
-
-    def test_export_yolox_blade_nojit(self):
-        config_file = 'configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
-        cfg = mmcv_config_fromfile(config_file)
-        cfg.export = dict(use_jit=False, export_blade=True, end2end=False)
-        ori_ckpt = PRETRAINED_MODEL_YOLOXS_EXPORT
-
-        target_path = f'{self.tmp_dir}/export_yolox_s_epoch300_export'
-
-        export(cfg, ori_ckpt, target_path)
-        self.assertFalse(os.path.exists(target_path + '.jit'))
-        self.assertFalse(os.path.exists(target_path + '.jit.config.json'))
-        self.assertTrue(os.path.exists(target_path + '.blade'))
-        self.assertTrue(os.path.exists(target_path + '.blade.config.json'))
-
-    # need a trt env
-    # def test_export_yolox_blade_end2end(self):
-    #     config_file = 'configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
-    #     cfg = mmcv_config_fromfile(config_file)
-    #     cfg.export = dict(use_jit=True, export_blade=True, end2end=True)
-    #     ori_ckpt = PRETRAINED_MODEL_YOLOXS_EXPORT
-    #
-    #     target_path = f'{self.tmp_dir}/export_yolox_s_epoch300_end2end'
-    #
-    #     export(cfg, ori_ckpt, target_path)
-    #     self.assertTrue(os.path.exists(target_path + '.jit'))
-    #     self.assertTrue(os.path.exists(target_path + '.jit.config.json'))
-    #     self.assertTrue(os.path.exists(target_path + '.blade'))
-    #     self.assertTrue(os.path.exists(target_path + '.blade.config.json'))
-
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/tests/predictors/test_detector.py b/tests/predictors/test_detector.py
index 5d4b4189..e31601a2 100644
--- a/tests/predictors/test_detector.py
+++ b/tests/predictors/test_detector.py
@@ -39,39 +39,36 @@ def test_yolox_detector(self):
         self.assertIn('detection_classes', output)
         self.assertIn('detection_class_names', output)
         self.assertIn('ori_img_shape', output)
-        self.assertEqual(len(output['detection_boxes']), 9)
+        self.assertEqual(len(output['detection_boxes']), 7)
         self.assertEqual(output['ori_img_shape'], [230, 352])
 
         self.assertListEqual(
             output['detection_classes'].tolist(),
-            np.array([72, 69, 60, 56, 49, 49, 72, 46, 49],
-                     dtype=np.int32).tolist())
+            np.array([72, 69, 49, 60, 49, 46, 49], dtype=np.int32).tolist())
 
         self.assertListEqual(output['detection_class_names'], [
-            'refrigerator', 'oven', 'dining table', 'chair', 'orange',
-            'orange', 'refrigerator', 'banana', 'orange'
+            'refrigerator', 'oven', 'orange', 'dining table', 'orange',
+            'banana', 'orange'
         ])
 
         assert_array_almost_equal(
             output['detection_scores'],
             np.array([
-                0.93252, 0.88439, 0.75048, 0.74093, 0.67255, 0.65550, 0.63942,
-                0.60507, 0.56973
+                0.9150531, 0.79622537, 0.68270016, 0.6374498, 0.600583,
+                0.56233376, 0.5553793
             ],
                      dtype=np.float32),
             decimal=2)
 
         assert_array_almost_equal(
             output['detection_boxes'],
-            np.array([[298.28256, 76.26037, 352., 228.91579],
-                      [137.9849, 124.92237, 196.49876, 193.12375],
-                      [76.42237, 170.30052, 292.4093, 227.32962],
-                      [117.317, 188.9916, 165.43694, 212.3457],
-                      [231.36719, 199.89865, 248.27888, 217.50288],
-                      [217.1154, 200.18729, 232.20607, 214.38866],
-                      [121.948105, 90.01667, 193.17673, 194.04584],
-                      [240.4494, 188.07112, 258.7406, 206.78226],
-                      [204.21452, 187.11292, 220.3842, 207.25877]]),
+            np.array([[297.19852, 80.219, 351.66287, 230.79648],
+                      [138.09207, 124.47283, 196.79352, 192.22653],
+                      [231.14838, 200.19218, 248.22963, 217.3257],
+                      [83.67468, 170.77133, 292.94174, 228.3555],
+                      [217.1804, 200.14183, 232.14671, 214.6331],
+                      [238.3924, 187.66922, 258.8048, 205.60503],
+                      [204.3168, 187.50964, 220.10815, 207.12463]]),
             decimal=1)
 
     def test_yolox_detector_jit_end2end(self):
@@ -92,34 +89,36 @@ def test_yolox_detector_jit_end2end(self):
 
         self.assertListEqual(
             output_jit['detection_classes'].tolist(),
-            np.array([72, 69, 60, 56, 49, 49, 72, 46, 49],
-                     dtype=np.int32).tolist())
+            np.array([72, 69, 49, 60, 49, 46, 49], dtype=np.int32).tolist())
 
         self.assertListEqual(output_jit['detection_class_names'], [
-            'refrigerator', 'oven', 'dining table', 'chair', 'orange',
-            'orange', 'refrigerator', 'banana', 'orange'
+            'refrigerator', 'oven', 'orange', 'dining table', 'orange',
+            'banana', 'orange'
         ])
 
         assert_array_almost_equal(
             output_jit['detection_scores'],
             np.array([
-                0.93252, 0.88439, 0.75048, 0.74093, 0.67255, 0.65550, 0.63942,
-                0.60507, 0.56973
+                0.9150531, 0.79622537, 0.68270016, 0.6374498, 0.600583,
+                0.56233376, 0.5553793
             ],
                      dtype=np.float32),
             decimal=2)
 
         assert_array_almost_equal(
             output_jit['detection_boxes'],
-            np.array([[298.28256, 76.26037, 352., 228.91579],
-                      [137.9849, 124.92237, 196.49876, 193.12375],
-                      [76.42237, 170.30052, 292.4093, 227.32962],
-                      [117.317, 188.9916, 165.43694, 212.3457],
-                      [231.36719, 199.89865, 248.27888, 217.50288],
-                      [217.1154, 200.18729, 232.20607, 214.38866],
-                      [121.948105, 90.01667, 193.17673, 194.04584],
-                      [240.4494, 188.07112, 258.7406, 206.78226],
-                      [204.21452, 187.11292, 220.3842, 207.25877]]),
+            np.array([[
+                297.19852, 80.219, 351.66287, 230.79648
+            ][138.09207, 124.47283, 196.79352,
+              192.22653][231.14838, 200.19218, 248.22963,
+                         217.3257][83.67468, 170.77133, 292.94174,
+                                   228.3555][217.1804, 200.14183, 232.14671,
+                                             214.6331][238.3924, 187.66922,
+                                                       258.8048,
+                                                       205.60503][204.3168,
+                                                                  187.50964,
+                                                                  220.10815,
+                                                                  207.12463]]),
             decimal=1)
 
     def test_yolox_detector_jit(self):
@@ -141,34 +140,31 @@ def test_yolox_detector_jit(self):
 
         self.assertListEqual(
             output_jit['detection_classes'].tolist(),
-            np.array([72, 69, 60, 56, 49, 49, 72, 46, 49],
-                     dtype=np.int32).tolist())
+            np.array([72, 69, 49, 60, 49, 46, 49], dtype=np.int32).tolist())
 
         self.assertListEqual(output_jit['detection_class_names'], [
-            'refrigerator', 'oven', 'dining table', 'chair', 'orange',
-            'orange', 'refrigerator', 'banana', 'orange'
+            'refrigerator', 'oven', 'orange', 'dining table', 'orange',
+            'banana', 'orange'
         ])
 
         assert_array_almost_equal(
             output_jit['detection_scores'],
             np.array([
-                0.93252, 0.88439, 0.75048, 0.74093, 0.67255, 0.65550, 0.63942,
-                0.60507, 0.56973
+                0.9150531, 0.79622537, 0.68270016, 0.6374498, 0.600583,
+                0.56233376, 0.5553793
             ],
                      dtype=np.float32),
             decimal=2)
 
         assert_array_almost_equal(
             output_jit['detection_boxes'],
-            np.array([[298.28256, 76.26037, 352., 228.91579],
-                      [137.9849, 124.92237, 196.49876, 193.12375],
-                      [76.42237, 170.30052, 292.4093, 227.32962],
-                      [117.317, 188.9916, 165.43694, 212.3457],
-                      [231.36719, 199.89865, 248.27888, 217.50288],
-                      [217.1154, 200.18729, 232.20607, 214.38866],
-                      [121.948105, 90.01667, 193.17673, 194.04584],
-                      [240.4494, 188.07112, 258.7406, 206.78226],
-                      [204.21452, 187.11292, 220.3842, 207.25877]]),
+            np.array([[297.19852, 80.219, 351.66287, 230.79648],
+                      [138.09207, 124.47283, 196.79352, 192.22653],
+                      [231.14838, 200.19218, 248.22963, 217.3257],
+                      [83.67468, 170.77133, 292.94174, 228.3555],
+                      [217.1804, 200.14183, 232.14671, 214.6331],
+                      [238.3924, 187.66922, 258.8048, 205.60503],
+                      [204.3168, 187.50964, 220.10815, 207.12463]]),
             decimal=1)
 
     def test_vitdet_detector(self):
diff --git a/tests/predictors/test_detector_blade.py b/tests/predictors/test_detector_blade.py
deleted file mode 100644
index f079944a..00000000
--- a/tests/predictors/test_detector_blade.py
+++ /dev/null
@@ -1,204 +0,0 @@
-# Copyright (c) Alibaba, Inc. and its affiliates.
-"""
-isort:skip_file
-"""
-import os
-import tempfile
-import unittest
-import cv2
-import numpy as np
-from PIL import Image
-from easycv.predictors.detector import TorchYoloXPredictor
-from tests.ut_config import (PRETRAINED_MODEL_YOLOXS_EXPORT,
-                             PRETRAINED_MODEL_YOLOXS_EXPORT_JIT,
-                             PRETRAINED_MODEL_YOLOXS_EXPORT_BLADE,
-                             PRETRAINED_MODEL_YOLOXS_END2END_JIT,
-                             PRETRAINED_MODEL_YOLOXS_END2END_BLADE,
-                             DET_DATA_SMALL_COCO_LOCAL)
-
-from easycv.utils.test_util import benchmark
-import logging
-import pandas as pd
-import torch
-from numpy.testing import assert_array_almost_equal
-
-
-@unittest.skipIf(torch.__version__ != '1.8.1+cu102',
-                 'Blade need another environment')
-class DetectorTest(unittest.TestCase):
-
-    def setUp(self):
-        print(('Testing %s.%s' % (type(self).__name__, self._testMethodName)))
-
-    def test_end2end(self):
-        img = os.path.join(DET_DATA_SMALL_COCO_LOCAL,
-                           'val2017/000000037777.jpg')
-
-        input_data_list = [np.asarray(Image.open(img))]
-
-        jit_path = PRETRAINED_MODEL_YOLOXS_END2END_JIT
-        blade_path = PRETRAINED_MODEL_YOLOXS_END2END_BLADE
-
-        predictor_jit = TorchYoloXPredictor(
-            model_path=jit_path, score_thresh=0.5)
-
-        predictor_blade = TorchYoloXPredictor(
-            model_path=blade_path, score_thresh=0.5)
-
-        output_jit = predictor_jit.predict(input_data_list)[0]
-        output_blade = predictor_blade.predict(input_data_list)[0]
-
-        self.assertIn('detection_boxes', output_jit)
-        self.assertIn('detection_scores', output_jit)
-        self.assertIn('detection_classes', output_jit)
-
-        self.assertIn('detection_boxes', output_blade)
-        self.assertIn('detection_scores', output_blade)
-        self.assertIn('detection_classes', output_blade)
-
-        assert_array_almost_equal(
-            output_jit['detection_boxes'],
-            output_blade['detection_boxes'],
-            decimal=3)
-        assert_array_almost_equal(
-            output_jit['detection_classes'],
-            output_blade['detection_classes'],
-            decimal=3)
-        assert_array_almost_equal(
-            output_jit['detection_scores'],
-            output_blade['detection_scores'],
-            decimal=3)
-
-        self.assertListEqual(
-            output_jit['detection_classes'].tolist(),
-            np.array([72, 69, 60, 56, 49, 49, 72, 46, 49],
-                     dtype=np.int32).tolist())
-
-        self.assertListEqual(output_jit['detection_class_names'], [
-            'refrigerator', 'oven', 'dining table', 'chair', 'orange',
-            'orange', 'refrigerator', 'banana', 'orange'
-        ])
-
-        assert_array_almost_equal(
-            output_jit['detection_scores'],
-            np.array([
-                0.93252, 0.88439, 0.75048, 0.74093, 0.67255, 0.65550, 0.63942,
-                0.60507, 0.56973
-            ],
-                     dtype=np.float32),
-            decimal=2)
-
-        assert_array_almost_equal(
-            output_jit['detection_boxes'],
-            np.array([[298.28256, 76.26037, 352., 228.91579],
-                      [137.9849, 124.92237, 196.49876, 193.12375],
-                      [76.42237, 170.30052, 292.4093, 227.32962],
-                      [117.317, 188.9916, 165.43694, 212.3457],
-                      [231.36719, 199.89865, 248.27888, 217.50288],
-                      [217.1154, 200.18729, 232.20607, 214.38866],
-                      [121.948105, 90.01667, 193.17673, 194.04584],
-                      [240.4494, 188.07112, 258.7406, 206.78226],
-                      [204.21452, 187.11292, 220.3842, 207.25877]]),
-            decimal=1)
-
-    def test_export(self):
-        img = os.path.join(DET_DATA_SMALL_COCO_LOCAL,
-                           'val2017/000000037777.jpg')
-
-        input_data_list = [np.asarray(Image.open(img))]
-
-        jit_path = PRETRAINED_MODEL_YOLOXS_EXPORT_JIT
-        blade_path = PRETRAINED_MODEL_YOLOXS_EXPORT_BLADE
-
-        predictor_jit = TorchYoloXPredictor(
-            model_path=jit_path, score_thresh=0.5)
-
-        predictor_blade = TorchYoloXPredictor(
-            model_path=blade_path, score_thresh=0.5)
-
-        output_jit = predictor_jit.predict(input_data_list)[0]
-        output_blade = predictor_blade.predict(input_data_list)[0]
-
-        self.assertIn('detection_boxes', output_jit)
-        self.assertIn('detection_scores', output_jit)
-        self.assertIn('detection_classes', output_jit)
-
-        self.assertIn('detection_boxes', output_blade)
-        self.assertIn('detection_scores', output_blade)
-        self.assertIn('detection_classes', output_blade)
-
-        assert_array_almost_equal(
-            output_jit['detection_boxes'],
-            output_blade['detection_boxes'],
-            decimal=3)
-        assert_array_almost_equal(
-            output_jit['detection_classes'],
-            output_blade['detection_classes'],
-            decimal=3)
-        assert_array_almost_equal(
-            output_jit['detection_scores'],
-            output_blade['detection_scores'],
-            decimal=3)
-
-        self.assertListEqual(
-            output_jit['detection_classes'].tolist(),
-            np.array([72, 69, 60, 56, 49, 49, 72, 46, 49],
-                     dtype=np.int32).tolist())
-
-        self.assertListEqual(output_jit['detection_class_names'], [
-            'refrigerator', 'oven', 'dining table', 'chair', 'orange',
-            'orange', 'refrigerator', 'banana', 'orange'
-        ])
-
-        assert_array_almost_equal(
-            output_jit['detection_scores'],
-            np.array([
-                0.93252, 0.88439, 0.75048, 0.74093, 0.67255, 0.65550, 0.63942,
-                0.60507, 0.56973
-            ],
-                     dtype=np.float32),
-            decimal=2)
-
-        assert_array_almost_equal(
-            output_jit['detection_boxes'],
-            np.array([[298.28256, 76.26037, 352., 228.91579],
-                      [137.9849, 124.92237, 196.49876, 193.12375],
-                      [76.42237, 170.30052, 292.4093, 227.32962],
-                      [117.317, 188.9916, 165.43694, 212.3457],
-                      [231.36719, 199.89865, 248.27888, 217.50288],
-                      [217.1154, 200.18729, 232.20607, 214.38866],
-                      [121.948105, 90.01667, 193.17673, 194.04584],
-                      [240.4494, 188.07112, 258.7406, 206.78226],
-                      [204.21452, 187.11292, 220.3842, 207.25877]]),
-            decimal=1)
-
-    def test_time(self):
-        img = os.path.join(DET_DATA_SMALL_COCO_LOCAL,
-                           'val2017/000000037777.jpg')
-
-        jit_path = PRETRAINED_MODEL_YOLOXS_EXPORT_JIT
-        blade_path = PRETRAINED_MODEL_YOLOXS_EXPORT_BLADE
-
-        predictor_jit = TorchYoloXPredictor(
-            model_path=jit_path, score_thresh=0.5)
-
-        predictor_blade = TorchYoloXPredictor(
-            model_path=blade_path, score_thresh=0.5)
-
-        input_data_list = [np.asarray(Image.open(img))]
-
-        results = []
-
-        results.append(
-            benchmark(
-                predictor_jit, input_data_list, model_name='easycv script'))
-        results.append(
-            benchmark(predictor_blade, input_data_list, model_name='blade'))
-
-        logging.info('Model Summary:')
-        summary = pd.DataFrame(results)
-        logging.warning(summary.to_markdown())
-
-
-if __name__ == '__main__':
-    unittest.main()
diff --git a/tests/ut_config.py b/tests/ut_config.py
index f874327e..4432f1de 100644
--- a/tests/ut_config.py
+++ b/tests/ut_config.py
@@ -82,23 +82,22 @@
 PRETRAINED_MODEL_FACEID = os.path.join(BASE_LOCAL_PATH,
                                        'pretrained_models/faceid')
 PRETRAINED_MODEL_YOLOXS_EXPORT = os.path.join(
-    BASE_LOCAL_PATH, 'pretrained_models/detection/yolox_s/epoch_300_export.pt')
+    BASE_LOCAL_PATH, 'pretrained_models/detection/yolox_s/epoch_300.pt')
 PRETRAINED_MODEL_YOLOXS_END2END_JIT = os.path.join(
     BASE_LOCAL_PATH,
-    'pretrained_models/detection/yolox_s/epoch_300_end2end.jit')
+    'pretrained_models/detection/yolox_s/epoch_300_j_e2e_notrt.pt.jit')
 PRETRAINED_MODEL_YOLOXS_END2END_BLADE = os.path.join(
     BASE_LOCAL_PATH,
-    'pretrained_models/detection/yolox_s/epoch_300_end2end.blade')
+    'pretrained_models/detection/yolox_s/epoch_300_b_e2e_trt.pt.blade')
 PRETRAINED_MODEL_YOLOXS_EXPORT_JIT = os.path.join(
     BASE_LOCAL_PATH,
-    'pretrained_models/detection/yolox_s/epoch_300_export.jit')
+    'pretrained_models/detection/yolox_s/epoch_300_j_noe2e.pt.jit')
 PRETRAINED_MODEL_YOLOXS_EXPORT_BLADE = os.path.join(
     BASE_LOCAL_PATH,
-    'pretrained_models/detection/yolox_s/epoch_300_export.blade')
+    'pretrained_models/detection/yolox_s/epoch_300_b_noe2e_trt.pt.blade')
 
 PRETRAINED_MODEL_YOLOXS = os.path.join(
-    BASE_LOCAL_PATH,
-    'pretrained_models/detection/yolox_s/yolox_s_epoch_300.pth')
+    BASE_LOCAL_PATH, 'pretrained_models/detection/yolox_s/epoch_300.pth')
 
 PRETRAINED_MODEL_POSE_HRNET_EXPORT = os.path.join(
     BASE_LOCAL_PATH,

From eeb269abc9a543ccffeca402fd0c42f0b33e4eb3 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Sun, 21 Aug 2022 00:57:44 +0800
Subject: [PATCH 51/69] change ut

---
 ...yolox_pai_8xb16_300e_coco_asff_reptood3.py | 196 ------------------
 .../yolox/yolox_s_8xb16_300e_coco.py          |   5 +-
 .../models/detection/detectors/yolox/asff.py  |   3 +-
 easycv/predictors/detector.py                 |   4 +-
 easycv/utils/mmlab_utils.py                   |   2 +-
 numeric_test.py                               |  62 ++++++
 tests/apis/test_export.py                     |  11 +-
 tests/predictors/test_detector.py             | 134 +++++-------
 tests/ut_config.py                            |  33 ++-
 9 files changed, 152 insertions(+), 298 deletions(-)
 delete mode 100644 configs/detection/yolox/yolox_pai_8xb16_300e_coco_asff_reptood3.py
 create mode 100644 numeric_test.py

diff --git a/configs/detection/yolox/yolox_pai_8xb16_300e_coco_asff_reptood3.py b/configs/detection/yolox/yolox_pai_8xb16_300e_coco_asff_reptood3.py
deleted file mode 100644
index e10862c0..00000000
--- a/configs/detection/yolox/yolox_pai_8xb16_300e_coco_asff_reptood3.py
+++ /dev/null
@@ -1,196 +0,0 @@
-_base_ = '../../base.py'
-
-# model settings s m l x
-model = dict(
-    type='YOLOX',
-    test_conf=0.01,
-    nms_thre=0.65,
-    backbone='RepVGGYOLOX',
-    model_type='s',  # s m l x tiny nano
-    use_att='ASFF',
-    asff_channel=16,
-    head=dict(
-        type='TOODHead',
-        model_type='s',
-        obj_loss_type='BCE',
-        reg_loss_type='giou',
-        num_classes=80,
-        conv_type='repconv',
-        la_down_rate=8,
-        decode_in_inference=True  # set to False when speed test
-    ))
-
-# s m l x
-img_scale = (640, 640)
-random_size = (14, 26)
-scale_ratio = (0.1, 2)
-
-# tiny nano without mixup
-# img_scale = (416, 416)
-# random_size = (10, 20)
-# scale_ratio = (0.5, 1.5)
-
-CLASSES = [
-    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',
-    'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
-    'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
-    'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag',
-    'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite',
-    'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
-    'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon',
-    'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
-    'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant',
-    'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
-    'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink',
-    'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
-    'hair drier', 'toothbrush'
-]
-
-# dataset settings
-data_root = 'data/coco/'
-
-img_norm_cfg = dict(
-    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
-
-train_pipeline = [
-    dict(type='MMMosaic', img_scale=img_scale, pad_val=114.0),
-    dict(
-        type='MMRandomAffine',
-        scaling_ratio_range=scale_ratio,
-        border=(-img_scale[0] // 2, -img_scale[1] // 2)),
-    dict(
-        type='MMMixUp',  # s m x l; tiny nano will detele
-        img_scale=img_scale,
-        ratio_range=(0.8, 1.6),
-        pad_val=114.0),
-    dict(
-        type='MMPhotoMetricDistortion',
-        brightness_delta=32,
-        contrast_range=(0.5, 1.5),
-        saturation_range=(0.5, 1.5),
-        hue_delta=18),
-    dict(type='MMRandomFlip', flip_ratio=0.5),
-    dict(type='MMResize', keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
-]
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-train_dataset = dict(
-    type='DetImagesMixDataset',
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_train2017.json',
-        img_prefix=data_root + 'train2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=True,
-        iscrowd=False),
-    pipeline=train_pipeline,
-    dynamic_scale=img_scale)
-
-val_dataset = dict(
-    type='DetImagesMixDataset',
-    imgs_per_gpu=2,
-    data_source=dict(
-        type='DetSourceCoco',
-        ann_file=data_root + 'annotations/instances_val2017.json',
-        img_prefix=data_root + 'val2017/',
-        pipeline=[
-            dict(type='LoadImageFromFile', to_float32=True),
-            dict(type='LoadAnnotations', with_bbox=True)
-        ],
-        classes=CLASSES,
-        filter_empty_gt=False,
-        test_mode=True,
-        iscrowd=True),
-    pipeline=test_pipeline,
-    dynamic_scale=None,
-    label_padding=False)
-
-data = dict(
-    imgs_per_gpu=16, workers_per_gpu=4, train=train_dataset, val=val_dataset)
-
-# additional hooks
-interval = 10
-custom_hooks = [
-    dict(
-        type='YOLOXModeSwitchHook',
-        no_aug_epochs=15,
-        skip_type_keys=('MMMosaic', 'MMRandomAffine', 'MMMixUp'),
-        priority=48),
-    dict(
-        type='SyncRandomSizeHook',
-        ratio_range=random_size,
-        img_scale=img_scale,
-        interval=interval,
-        priority=48),
-    dict(
-        type='SyncNormHook',
-        num_last_epochs=15,
-        interval=interval,
-        priority=48)
-]
-
-# evaluation
-eval_config = dict(
-    interval=10,
-    gpu_collect=False,
-    visualization_config=dict(
-        vis_num=10,
-        score_thr=0.5,
-    )  # show by TensorboardLoggerHookV2 and WandbLoggerHookV2
-)
-eval_pipelines = [
-    dict(
-        mode='test',
-        data=data['val'],
-        evaluators=[dict(type='CocoDetectionEvaluator', classes=CLASSES)],
-    )
-]
-
-checkpoint_config = dict(interval=interval)
-
-# optimizer
-optimizer = dict(
-    type='SGD', lr=0.02, momentum=0.9, weight_decay=5e-4, nesterov=True)
-optimizer_config = {}
-
-# learning policy
-lr_config = dict(
-    policy='YOLOX',
-    warmup='exp',
-    by_epoch=False,
-    warmup_by_epoch=True,
-    warmup_ratio=1,
-    warmup_iters=5,  # 5 epoch
-    num_last_epochs=15,
-    min_lr_ratio=0.05)
-
-# exponetial model average
-ema = dict(decay=0.9998)
-
-# runtime settings
-total_epochs = 300
-
-# yapf:disable
-log_config = dict(
-    interval=100,
-    hooks=[
-        dict(type='TextLoggerHook'),
-        dict(type='TensorboardLoggerHookV2'),
-        # dict(type='WandbLoggerHookV2'),
-    ])
-
-export = dict(use_jit=True, export_blade=True, end2end=False, batch_size=32, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=True)
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index 82cd4c6d..0839f0b2 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -42,7 +42,8 @@
 ]
 
 # dataset settings
-data_root = 'data/coco/'
+# data_root = 'data/coco/'
+data_root = '/apsara/xinyi.zxy/data/coco/'
 
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
@@ -188,4 +189,4 @@
         # dict(type='WandbLoggerHookV2'),
     ])
 
-export = dict(export_type = 'ori', preprocess_jit = False, batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=False)
+export = dict(export_type = 'ori', preprocess_jit = False, batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=False)
\ No newline at end of file
diff --git a/easycv/models/detection/detectors/yolox/asff.py b/easycv/models/detection/detectors/yolox/asff.py
index d4c62c3c..980c5a3c 100644
--- a/easycv/models/detection/detectors/yolox/asff.py
+++ b/easycv/models/detection/detectors/yolox/asff.py
@@ -5,7 +5,6 @@
 
 from easycv.models.backbones.network_blocks import BaseConv
 
-
 class ASFF(nn.Module):
 
     def __init__(self,
@@ -166,4 +165,4 @@ def forward(self, x):  # l,m,s
                                                                                                                                                   2:, :, :]
         out = self.expand(fused_out_reduced)
 
-        return out
+        return out
\ No newline at end of file
diff --git a/easycv/predictors/detector.py b/easycv/predictors/detector.py
index 2d23a0f0..6647e329 100644
--- a/easycv/predictors/detector.py
+++ b/easycv/predictors/detector.py
@@ -141,7 +141,7 @@ def __init__(self,
         # build pipeline
         pipeline = [build_from_cfg(p, PIPELINES) for p in test_pipeline]
         self.pipeline = Compose(pipeline)
-        print(self.cfg)
+
         self.test_conf = self.cfg['model'].get('test_conf', 0.01)
         self.nms_thre = self.cfg['model'].get('nms_thre', 0.65)
         self.num_classes = len(self.CLASSES)
@@ -259,8 +259,6 @@ def predict(self, input_data_list, batch_size=-1, to_numpy=True):
             num_boxes = detection_classes.shape[
                 0] if detection_classes is not None else 0
 
-            print(num_boxes)
-
             detection_classes_names = [
                 self.CLASSES[detection_classes[idx]]
                 for idx in range(num_boxes)
diff --git a/easycv/utils/mmlab_utils.py b/easycv/utils/mmlab_utils.py
index bff3040a..db7e94e6 100644
--- a/easycv/utils/mmlab_utils.py
+++ b/easycv/utils/mmlab_utils.py
@@ -14,7 +14,7 @@
 
 try:
     from mmcv.runner.hooks import HOOKS
-    HOOKS._module_dict.pop('YOLOXLrUpdaterHook', None)
+    # HOOKS._module_dict.pop('YOLOXLrUpdaterHook', None)
     from mmdet.models.builder import MODELS as MMMODELS
     from mmdet.models.builder import BACKBONES as MMBACKBONES
     from mmdet.models.builder import NECKS as MMNECKS
diff --git a/numeric_test.py b/numeric_test.py
new file mode 100644
index 00000000..3ce9e3d1
--- /dev/null
+++ b/numeric_test.py
@@ -0,0 +1,62 @@
+import sys
+import time
+from contextlib import contextmanager
+
+import numpy as np
+import torch
+from PIL import Image
+from torchvision.transforms import Compose
+
+from easycv.datasets.registry import PIPELINES
+from easycv.models import build_model
+from easycv.models.detection.detectors.yolox.postprocess import \
+    create_tensorrt_postprocess
+from easycv.models.detection.utils import postprocess
+from easycv.utils.checkpoint import load_checkpoint
+from easycv.utils.config_tools import mmcv_config_fromfile
+from easycv.utils.registry import build_from_cfg
+
+
+@contextmanager
+def timeit_context(name):
+    startTime = time.time()
+    yield
+    elapsedTime = time.time() - startTime
+    print('[{}] finished in {} ms'.format(name, int(elapsedTime * 1000)))
+
+
+def model_speed_test(name, img, use_trt_efficientnms=False):
+    pred = TorchYoloXPredictor(name, use_trt_efficientnms=use_trt_efficientnms)
+    for i in range(10):
+        m0 = pred.predict([img])
+    with timeit_context('{} speed test'.format(name)):
+        for i in range(1000):
+            m0 = pred.predict([img])
+    print(m0[0]['detection_classes'])
+    print(m0[0]['detection_scores'])
+    print(m0[0]['detection_boxes'])
+    print(len(m0[0]['detection_classes']))
+
+
+if __name__ == '__main__':
+    if 1:
+        img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000037777.jpg'
+        from easycv.predictors import TorchYoloXPredictor
+
+        img = Image.open(img_path)
+
+        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300.pt', img)
+        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300_nopre_notrt.pt.jit', img, False)
+        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300_nopre_trt.pt.jit', img, True)  # jit ??
+        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300_pre_notrt.pt.jit', img, False)
+        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300_pre_trt.pt.jit', img, True)
+        #
+        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300_nopre_notrt.pt.blade', img, False)
+        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300_nopre_trt.pt.blade', img, True)
+        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300_pre_notrt.pt.blade', img, False)
+        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300_pre_trt.pt.blade', img, True)
+
+        model_speed_test('/apsara/xinyi.zxy/pretrain/base_export/s.pt.blade', img, False)
+        model_speed_test('/apsara/xinyi.zxy/pretrain/base_export/m.pt.blade', img, False)
+        model_speed_test('/apsara/xinyi.zxy/pretrain/base_export/l.pt.blade', img, False)
+        model_speed_test('/apsara/xinyi.zxy/pretrain/base_export/x.pt.blade', img, False)
diff --git a/tests/apis/test_export.py b/tests/apis/test_export.py
index 355e7d5e..8f71d6c3 100644
--- a/tests/apis/test_export.py
+++ b/tests/apis/test_export.py
@@ -46,10 +46,10 @@ def test_export_yolox(self):
             print(output)
         self.assertTrue(stat == 0, 'export model failed')
 
-    def test_export_yolox_jit(self):
+    def test_export_yolox_jit_nopre_notrt(self):
         config_file = 'configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
         cfg = mmcv_config_fromfile(config_file)
-        cfg.export = dict(export_type='jit', end2end=False)
+        cfg.export = dict(export_type='jit', preprocess_jit=False, use_trt_efficientnms=False)
         ori_ckpt = PRETRAINED_MODEL_YOLOXS_EXPORT
 
         target_path = f'{self.tmp_dir}/export_yolox_s_epoch300_export'
@@ -58,10 +58,10 @@ def test_export_yolox_jit(self):
         self.assertTrue(os.path.exists(target_path + '.jit'))
         self.assertTrue(os.path.exists(target_path + '.jit.config.json'))
 
-    def test_export_yolox_jit_end2end(self):
+    def test_export_yolox_jit_pre_notrt(self):
         config_file = 'configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
         cfg = mmcv_config_fromfile(config_file)
-        cfg.export = dict(export_type='jit', end2end=True)
+        cfg.export = dict(export_type='jit', preprocess_jit=True, use_trt_efficientnms=False)
         ori_ckpt = PRETRAINED_MODEL_YOLOXS_EXPORT
 
         target_path = f'{self.tmp_dir}/export_yolox_s_epoch300_end2end'
@@ -69,6 +69,9 @@ def test_export_yolox_jit_end2end(self):
         export(cfg, ori_ckpt, target_path)
         self.assertTrue(os.path.exists(target_path + '.jit'))
         self.assertTrue(os.path.exists(target_path + '.jit.config.json'))
+        self.assertTrue(os.path.exists(target_path + '.preprocess'))
+
+    # TOOD we will test the export of use_trt_efficientnms=True and blade in a docker environment.
 
     def test_export_classification_jit(self):
         config_file = 'configs/classification/imagenet/resnet/imagenet_resnet50_jpg.py'
diff --git a/tests/predictors/test_detector.py b/tests/predictors/test_detector.py
index e31601a2..fc825130 100644
--- a/tests/predictors/test_detector.py
+++ b/tests/predictors/test_detector.py
@@ -12,8 +12,8 @@
 
 from easycv.predictors.detector import TorchYoloXPredictor, TorchViTDetPredictor
 from tests.ut_config import (PRETRAINED_MODEL_YOLOXS_EXPORT,
-                             PRETRAINED_MODEL_YOLOXS_EXPORT_JIT,
-                             PRETRAINED_MODEL_YOLOXS_END2END_JIT,
+                             PRETRAINED_MODEL_YOLOXS_NOPRE_NOTRT_JIT,
+                             PRETRAINED_MODEL_YOLOXS_PRE_NOTRT_JIT,
                              DET_DATA_SMALL_COCO_LOCAL)
 from numpy.testing import assert_array_almost_equal
 
@@ -27,7 +27,7 @@ def test_yolox_detector(self):
         detection_model_path = PRETRAINED_MODEL_YOLOXS_EXPORT
 
         img = os.path.join(DET_DATA_SMALL_COCO_LOCAL,
-                           'val2017/000000037777.jpg')
+                           'val2017/000000522713.jpg')
 
         input_data_list = [np.asarray(Image.open(img))]
         predictor = TorchYoloXPredictor(
@@ -39,132 +39,108 @@ def test_yolox_detector(self):
         self.assertIn('detection_classes', output)
         self.assertIn('detection_class_names', output)
         self.assertIn('ori_img_shape', output)
-        self.assertEqual(len(output['detection_boxes']), 7)
-        self.assertEqual(output['ori_img_shape'], [230, 352])
+
+        self.assertEqual(len(output['detection_boxes']), 4)
+        self.assertEqual(output['ori_img_shape'], [480, 640])
 
         self.assertListEqual(
             output['detection_classes'].tolist(),
-            np.array([72, 69, 49, 60, 49, 46, 49], dtype=np.int32).tolist())
+            np.array([13, 8, 8, 8], dtype=np.int32).tolist())
 
-        self.assertListEqual(output['detection_class_names'], [
-            'refrigerator', 'oven', 'orange', 'dining table', 'orange',
-            'banana', 'orange'
-        ])
+        self.assertListEqual(output['detection_class_names'], ['bench', 'boat', 'boat', 'boat'])
 
         assert_array_almost_equal(
             output['detection_scores'],
-            np.array([
-                0.9150531, 0.79622537, 0.68270016, 0.6374498, 0.600583,
-                0.56233376, 0.5553793
-            ],
+            np.array([0.92593855, 0.60268813, 0.57775956, 0.5750004 ],
                      dtype=np.float32),
             decimal=2)
 
         assert_array_almost_equal(
             output['detection_boxes'],
-            np.array([[297.19852, 80.219, 351.66287, 230.79648],
-                      [138.09207, 124.47283, 196.79352, 192.22653],
-                      [231.14838, 200.19218, 248.22963, 217.3257],
-                      [83.67468, 170.77133, 292.94174, 228.3555],
-                      [217.1804, 200.14183, 232.14671, 214.6331],
-                      [238.3924, 187.66922, 258.8048, 205.60503],
-                      [204.3168, 187.50964, 220.10815, 207.12463]]),
+            np.array([[407.89523, 284.62598, 561.4984 , 356.7296],
+                     [439.37653,263.42395, 467.01526, 271.79144],
+                     [480.8597,  269.64435, 502.18765, 274.80127],
+                     [510.37033, 268.4982,  527.67017, 273.04935]]),
             decimal=1)
 
-    def test_yolox_detector_jit_end2end(self):
+    def test_yolox_detector_jit_nopre_notrt(self):
         img = os.path.join(DET_DATA_SMALL_COCO_LOCAL,
-                           'val2017/000000037777.jpg')
+                           'val2017/000000522713.jpg')
 
         input_data_list = [np.asarray(Image.open(img))]
 
-        jit_path = PRETRAINED_MODEL_YOLOXS_END2END_JIT
+        jit_path = PRETRAINED_MODEL_YOLOXS_NOPRE_NOTRT_JIT
         predictor_jit = TorchYoloXPredictor(
             model_path=jit_path, score_thresh=0.5)
 
-        output_jit = predictor_jit.predict(input_data_list)[0]
+        output = predictor_jit.predict(input_data_list)[0]
+        self.assertIn('detection_boxes', output)
+        self.assertIn('detection_scores', output)
+        self.assertIn('detection_classes', output)
+        self.assertIn('detection_class_names', output)
+        self.assertIn('ori_img_shape', output)
 
-        self.assertIn('detection_boxes', output_jit)
-        self.assertIn('detection_scores', output_jit)
-        self.assertIn('detection_classes', output_jit)
+        self.assertEqual(len(output['detection_boxes']), 4)
+        self.assertEqual(output['ori_img_shape'], [480, 640])
 
         self.assertListEqual(
-            output_jit['detection_classes'].tolist(),
-            np.array([72, 69, 49, 60, 49, 46, 49], dtype=np.int32).tolist())
+            output['detection_classes'].tolist(),
+            np.array([13, 8, 8, 8], dtype=np.int32).tolist())
 
-        self.assertListEqual(output_jit['detection_class_names'], [
-            'refrigerator', 'oven', 'orange', 'dining table', 'orange',
-            'banana', 'orange'
-        ])
+        self.assertListEqual(output['detection_class_names'], ['bench', 'boat', 'boat', 'boat'])
 
         assert_array_almost_equal(
-            output_jit['detection_scores'],
-            np.array([
-                0.9150531, 0.79622537, 0.68270016, 0.6374498, 0.600583,
-                0.56233376, 0.5553793
-            ],
+            output['detection_scores'],
+            np.array([0.92593855, 0.60268813, 0.57775956, 0.5750004],
                      dtype=np.float32),
             decimal=2)
 
         assert_array_almost_equal(
-            output_jit['detection_boxes'],
-            np.array([[
-                297.19852, 80.219, 351.66287, 230.79648
-            ][138.09207, 124.47283, 196.79352,
-              192.22653][231.14838, 200.19218, 248.22963,
-                         217.3257][83.67468, 170.77133, 292.94174,
-                                   228.3555][217.1804, 200.14183, 232.14671,
-                                             214.6331][238.3924, 187.66922,
-                                                       258.8048,
-                                                       205.60503][204.3168,
-                                                                  187.50964,
-                                                                  220.10815,
-                                                                  207.12463]]),
+            output['detection_boxes'],
+            np.array([[407.89523, 284.62598, 561.4984, 356.7296],
+                      [439.37653, 263.42395, 467.01526, 271.79144],
+                      [480.8597, 269.64435, 502.18765, 274.80127],
+                      [510.37033, 268.4982, 527.67017, 273.04935]]),
             decimal=1)
 
-    def test_yolox_detector_jit(self):
+    def test_yolox_detector_jit_pre_trt(self):
         img = os.path.join(DET_DATA_SMALL_COCO_LOCAL,
-                           'val2017/000000037777.jpg')
+                           'val2017/000000522713.jpg')
 
         input_data_list = [np.asarray(Image.open(img))]
 
-        jit_path = PRETRAINED_MODEL_YOLOXS_EXPORT_JIT
-
+        jit_path = PRETRAINED_MODEL_YOLOXS_PRE_NOTRT_JIT
         predictor_jit = TorchYoloXPredictor(
             model_path=jit_path, score_thresh=0.5)
 
-        output_jit = predictor_jit.predict(input_data_list)[0]
+        output = predictor_jit.predict(input_data_list)[0]
+        self.assertIn('detection_boxes', output)
+        self.assertIn('detection_scores', output)
+        self.assertIn('detection_classes', output)
+        self.assertIn('detection_class_names', output)
+        self.assertIn('ori_img_shape', output)
 
-        self.assertIn('detection_boxes', output_jit)
-        self.assertIn('detection_scores', output_jit)
-        self.assertIn('detection_classes', output_jit)
+        self.assertEqual(len(output['detection_boxes']), 4)
+        self.assertEqual(output['ori_img_shape'], [480, 640])
 
         self.assertListEqual(
-            output_jit['detection_classes'].tolist(),
-            np.array([72, 69, 49, 60, 49, 46, 49], dtype=np.int32).tolist())
+            output['detection_classes'].tolist(),
+            np.array([13, 8, 8, 8], dtype=np.int32).tolist())
 
-        self.assertListEqual(output_jit['detection_class_names'], [
-            'refrigerator', 'oven', 'orange', 'dining table', 'orange',
-            'banana', 'orange'
-        ])
+        self.assertListEqual(output['detection_class_names'], ['bench', 'boat', 'boat', 'boat'])
 
         assert_array_almost_equal(
-            output_jit['detection_scores'],
-            np.array([
-                0.9150531, 0.79622537, 0.68270016, 0.6374498, 0.600583,
-                0.56233376, 0.5553793
-            ],
+            output['detection_scores'],
+            np.array([0.92593855, 0.60268813, 0.57775956, 0.5750004],
                      dtype=np.float32),
             decimal=2)
 
         assert_array_almost_equal(
-            output_jit['detection_boxes'],
-            np.array([[297.19852, 80.219, 351.66287, 230.79648],
-                      [138.09207, 124.47283, 196.79352, 192.22653],
-                      [231.14838, 200.19218, 248.22963, 217.3257],
-                      [83.67468, 170.77133, 292.94174, 228.3555],
-                      [217.1804, 200.14183, 232.14671, 214.6331],
-                      [238.3924, 187.66922, 258.8048, 205.60503],
-                      [204.3168, 187.50964, 220.10815, 207.12463]]),
+            output['detection_boxes'],
+            np.array([[407.89523, 284.62598, 561.4984, 356.7296],
+                      [439.37653, 263.42395, 467.01526, 271.79144],
+                      [480.8597, 269.64435, 502.18765, 274.80127],
+                      [510.37033, 268.4982, 527.67017, 273.04935]]),
             decimal=1)
 
     def test_vitdet_detector(self):
diff --git a/tests/ut_config.py b/tests/ut_config.py
index 4432f1de..78c887d2 100644
--- a/tests/ut_config.py
+++ b/tests/ut_config.py
@@ -82,22 +82,33 @@
 PRETRAINED_MODEL_FACEID = os.path.join(BASE_LOCAL_PATH,
                                        'pretrained_models/faceid')
 PRETRAINED_MODEL_YOLOXS_EXPORT = os.path.join(
-    BASE_LOCAL_PATH, 'pretrained_models/detection/yolox_s/epoch_300.pt')
-PRETRAINED_MODEL_YOLOXS_END2END_JIT = os.path.join(
+    BASE_LOCAL_PATH, 'pretrained_models/detection/infer_yolox/epoch_300.pt')
+PRETRAINED_MODEL_YOLOXS_NOPRE_NOTRT_JIT = os.path.join(
     BASE_LOCAL_PATH,
-    'pretrained_models/detection/yolox_s/epoch_300_j_e2e_notrt.pt.jit')
-PRETRAINED_MODEL_YOLOXS_END2END_BLADE = os.path.join(
+    'pretrained_models/detection/infer_yolox/epoch_300_nopre_notrt.pt.jit')
+PRETRAINED_MODEL_YOLOXS_PRE_NOTRT_JIT = os.path.join(
     BASE_LOCAL_PATH,
-    'pretrained_models/detection/yolox_s/epoch_300_b_e2e_trt.pt.blade')
-PRETRAINED_MODEL_YOLOXS_EXPORT_JIT = os.path.join(
+    'pretrained_models/detection/infer_yolox/epoch_300_pre_notrt.pt.jit')
+PRETRAINED_MODEL_YOLOXS_NOPRE_TRT_JIT = os.path.join(
     BASE_LOCAL_PATH,
-    'pretrained_models/detection/yolox_s/epoch_300_j_noe2e.pt.jit')
-PRETRAINED_MODEL_YOLOXS_EXPORT_BLADE = os.path.join(
+    'pretrained_models/detection/infer_yolox/epoch_300_nopre_trt.pt.jit')
+PRETRAINED_MODEL_YOLOXS_PRE_TRT_JIT = os.path.join(
     BASE_LOCAL_PATH,
-    'pretrained_models/detection/yolox_s/epoch_300_b_noe2e_trt.pt.blade')
-
+    'pretrained_models/detection/infer_yolox/epoch_300_pre_trt.pt.jit')
+PRETRAINED_MODEL_YOLOXS_NOPRE_NOTRT_BLADE = os.path.join(
+    BASE_LOCAL_PATH,
+    'pretrained_models/detection/infer_yolox/epoch_300_nopre_notrt.pt.blade')
+PRETRAINED_MODEL_YOLOXS_PRE_NOTRT_BLADE = os.path.join(
+    BASE_LOCAL_PATH,
+    'pretrained_models/detection/infer_yolox/epoch_300_pre_notrt.pt.blade')
+PRETRAINED_MODEL_YOLOXS_NOPRE_TRT_BLADE = os.path.join(
+    BASE_LOCAL_PATH,
+    'pretrained_models/detection/infer_yolox/epoch_300_nopre_trt.pt.blade')
+PRETRAINED_MODEL_YOLOXS_PRE_TRT_BLADE = os.path.join(
+    BASE_LOCAL_PATH,
+    'pretrained_models/detection/infer_yolox/epoch_300_pre_trt.pt.blade')
 PRETRAINED_MODEL_YOLOXS = os.path.join(
-    BASE_LOCAL_PATH, 'pretrained_models/detection/yolox_s/epoch_300.pth')
+    BASE_LOCAL_PATH, 'pretrained_models/detection/infer_yolox/epoch_300.pth')
 
 PRETRAINED_MODEL_POSE_HRNET_EXPORT = os.path.join(
     BASE_LOCAL_PATH,

From 53183ec31e8fc39c1bfc199ebcc1978f7ef5cd32 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Sun, 21 Aug 2022 01:03:26 +0800
Subject: [PATCH 52/69] save conflict of YOLOXLrUpdaterHook

---
 .../yolox/yolox_s_8xb16_300e_coco.py          |  7 +--
 .../models/detection/detectors/yolox/asff.py  |  3 +-
 easycv/utils/mmlab_utils.py                   |  5 +-
 numeric_test.py                               | 62 -------------------
 tests/apis/test_export.py                     |  8 ++-
 tests/predictors/test_detector.py             | 34 +++++-----
 6 files changed, 32 insertions(+), 87 deletions(-)
 delete mode 100644 numeric_test.py

diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index 0839f0b2..93c20319 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -42,9 +42,8 @@
 ]
 
 # dataset settings
-# data_root = 'data/coco/'
-data_root = '/apsara/xinyi.zxy/data/coco/'
-
+data_root = 'data/coco/'
+data_root = '/apsarapangu/disk6/xinyi.zxy/coco/'
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
@@ -189,4 +188,4 @@
         # dict(type='WandbLoggerHookV2'),
     ])
 
-export = dict(export_type = 'ori', preprocess_jit = False, batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=False)
\ No newline at end of file
+export = dict(export_type = 'ori', preprocess_jit = False, batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=False)
diff --git a/easycv/models/detection/detectors/yolox/asff.py b/easycv/models/detection/detectors/yolox/asff.py
index 980c5a3c..d4c62c3c 100644
--- a/easycv/models/detection/detectors/yolox/asff.py
+++ b/easycv/models/detection/detectors/yolox/asff.py
@@ -5,6 +5,7 @@
 
 from easycv.models.backbones.network_blocks import BaseConv
 
+
 class ASFF(nn.Module):
 
     def __init__(self,
@@ -165,4 +166,4 @@ def forward(self, x):  # l,m,s
                                                                                                                                                   2:, :, :]
         out = self.expand(fused_out_reduced)
 
-        return out
\ No newline at end of file
+        return out
diff --git a/easycv/utils/mmlab_utils.py b/easycv/utils/mmlab_utils.py
index db7e94e6..3216350e 100644
--- a/easycv/utils/mmlab_utils.py
+++ b/easycv/utils/mmlab_utils.py
@@ -14,7 +14,10 @@
 
 try:
     from mmcv.runner.hooks import HOOKS
-    # HOOKS._module_dict.pop('YOLOXLrUpdaterHook', None)
+    if 'easycv' not in HOOKS.module_dict['YOLOXLrUpdaterHook'].__module__:
+        # the latest mmcv has registed YOLOXLrUpdaterHook and will occur conflict with our YOLOXLrUpdaterHook
+        # however, we can not find the exact version of such change!
+        HOOKS._module_dict.pop('YOLOXLrUpdaterHook', None)
     from mmdet.models.builder import MODELS as MMMODELS
     from mmdet.models.builder import BACKBONES as MMBACKBONES
     from mmdet.models.builder import NECKS as MMNECKS
diff --git a/numeric_test.py b/numeric_test.py
deleted file mode 100644
index 3ce9e3d1..00000000
--- a/numeric_test.py
+++ /dev/null
@@ -1,62 +0,0 @@
-import sys
-import time
-from contextlib import contextmanager
-
-import numpy as np
-import torch
-from PIL import Image
-from torchvision.transforms import Compose
-
-from easycv.datasets.registry import PIPELINES
-from easycv.models import build_model
-from easycv.models.detection.detectors.yolox.postprocess import \
-    create_tensorrt_postprocess
-from easycv.models.detection.utils import postprocess
-from easycv.utils.checkpoint import load_checkpoint
-from easycv.utils.config_tools import mmcv_config_fromfile
-from easycv.utils.registry import build_from_cfg
-
-
-@contextmanager
-def timeit_context(name):
-    startTime = time.time()
-    yield
-    elapsedTime = time.time() - startTime
-    print('[{}] finished in {} ms'.format(name, int(elapsedTime * 1000)))
-
-
-def model_speed_test(name, img, use_trt_efficientnms=False):
-    pred = TorchYoloXPredictor(name, use_trt_efficientnms=use_trt_efficientnms)
-    for i in range(10):
-        m0 = pred.predict([img])
-    with timeit_context('{} speed test'.format(name)):
-        for i in range(1000):
-            m0 = pred.predict([img])
-    print(m0[0]['detection_classes'])
-    print(m0[0]['detection_scores'])
-    print(m0[0]['detection_boxes'])
-    print(len(m0[0]['detection_classes']))
-
-
-if __name__ == '__main__':
-    if 1:
-        img_path = '/apsara/xinyi.zxy/data/coco/val2017/000000037777.jpg'
-        from easycv.predictors import TorchYoloXPredictor
-
-        img = Image.open(img_path)
-
-        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300.pt', img)
-        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300_nopre_notrt.pt.jit', img, False)
-        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300_nopre_trt.pt.jit', img, True)  # jit ??
-        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300_pre_notrt.pt.jit', img, False)
-        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300_pre_trt.pt.jit', img, True)
-        #
-        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300_nopre_notrt.pt.blade', img, False)
-        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300_nopre_trt.pt.blade', img, True)
-        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300_pre_notrt.pt.blade', img, False)
-        # model_speed_test('/apsara/xinyi.zxy/pretrain/infer_yolox/epoch_300_pre_trt.pt.blade', img, True)
-
-        model_speed_test('/apsara/xinyi.zxy/pretrain/base_export/s.pt.blade', img, False)
-        model_speed_test('/apsara/xinyi.zxy/pretrain/base_export/m.pt.blade', img, False)
-        model_speed_test('/apsara/xinyi.zxy/pretrain/base_export/l.pt.blade', img, False)
-        model_speed_test('/apsara/xinyi.zxy/pretrain/base_export/x.pt.blade', img, False)
diff --git a/tests/apis/test_export.py b/tests/apis/test_export.py
index 8f71d6c3..c69b9725 100644
--- a/tests/apis/test_export.py
+++ b/tests/apis/test_export.py
@@ -49,7 +49,10 @@ def test_export_yolox(self):
     def test_export_yolox_jit_nopre_notrt(self):
         config_file = 'configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
         cfg = mmcv_config_fromfile(config_file)
-        cfg.export = dict(export_type='jit', preprocess_jit=False, use_trt_efficientnms=False)
+        cfg.export = dict(
+            export_type='jit',
+            preprocess_jit=False,
+            use_trt_efficientnms=False)
         ori_ckpt = PRETRAINED_MODEL_YOLOXS_EXPORT
 
         target_path = f'{self.tmp_dir}/export_yolox_s_epoch300_export'
@@ -61,7 +64,8 @@ def test_export_yolox_jit_nopre_notrt(self):
     def test_export_yolox_jit_pre_notrt(self):
         config_file = 'configs/detection/yolox/yolox_s_8xb16_300e_coco.py'
         cfg = mmcv_config_fromfile(config_file)
-        cfg.export = dict(export_type='jit', preprocess_jit=True, use_trt_efficientnms=False)
+        cfg.export = dict(
+            export_type='jit', preprocess_jit=True, use_trt_efficientnms=False)
         ori_ckpt = PRETRAINED_MODEL_YOLOXS_EXPORT
 
         target_path = f'{self.tmp_dir}/export_yolox_s_epoch300_end2end'
diff --git a/tests/predictors/test_detector.py b/tests/predictors/test_detector.py
index fc825130..f4aaa045 100644
--- a/tests/predictors/test_detector.py
+++ b/tests/predictors/test_detector.py
@@ -43,24 +43,24 @@ def test_yolox_detector(self):
         self.assertEqual(len(output['detection_boxes']), 4)
         self.assertEqual(output['ori_img_shape'], [480, 640])
 
-        self.assertListEqual(
-            output['detection_classes'].tolist(),
-            np.array([13, 8, 8, 8], dtype=np.int32).tolist())
+        self.assertListEqual(output['detection_classes'].tolist(),
+                             np.array([13, 8, 8, 8], dtype=np.int32).tolist())
 
-        self.assertListEqual(output['detection_class_names'], ['bench', 'boat', 'boat', 'boat'])
+        self.assertListEqual(output['detection_class_names'],
+                             ['bench', 'boat', 'boat', 'boat'])
 
         assert_array_almost_equal(
             output['detection_scores'],
-            np.array([0.92593855, 0.60268813, 0.57775956, 0.5750004 ],
+            np.array([0.92593855, 0.60268813, 0.57775956, 0.5750004],
                      dtype=np.float32),
             decimal=2)
 
         assert_array_almost_equal(
             output['detection_boxes'],
-            np.array([[407.89523, 284.62598, 561.4984 , 356.7296],
-                     [439.37653,263.42395, 467.01526, 271.79144],
-                     [480.8597,  269.64435, 502.18765, 274.80127],
-                     [510.37033, 268.4982,  527.67017, 273.04935]]),
+            np.array([[407.89523, 284.62598, 561.4984, 356.7296],
+                      [439.37653, 263.42395, 467.01526, 271.79144],
+                      [480.8597, 269.64435, 502.18765, 274.80127],
+                      [510.37033, 268.4982, 527.67017, 273.04935]]),
             decimal=1)
 
     def test_yolox_detector_jit_nopre_notrt(self):
@@ -83,11 +83,11 @@ def test_yolox_detector_jit_nopre_notrt(self):
         self.assertEqual(len(output['detection_boxes']), 4)
         self.assertEqual(output['ori_img_shape'], [480, 640])
 
-        self.assertListEqual(
-            output['detection_classes'].tolist(),
-            np.array([13, 8, 8, 8], dtype=np.int32).tolist())
+        self.assertListEqual(output['detection_classes'].tolist(),
+                             np.array([13, 8, 8, 8], dtype=np.int32).tolist())
 
-        self.assertListEqual(output['detection_class_names'], ['bench', 'boat', 'boat', 'boat'])
+        self.assertListEqual(output['detection_class_names'],
+                             ['bench', 'boat', 'boat', 'boat'])
 
         assert_array_almost_equal(
             output['detection_scores'],
@@ -123,11 +123,11 @@ def test_yolox_detector_jit_pre_trt(self):
         self.assertEqual(len(output['detection_boxes']), 4)
         self.assertEqual(output['ori_img_shape'], [480, 640])
 
-        self.assertListEqual(
-            output['detection_classes'].tolist(),
-            np.array([13, 8, 8, 8], dtype=np.int32).tolist())
+        self.assertListEqual(output['detection_classes'].tolist(),
+                             np.array([13, 8, 8, 8], dtype=np.int32).tolist())
 
-        self.assertListEqual(output['detection_class_names'], ['bench', 'boat', 'boat', 'boat'])
+        self.assertListEqual(output['detection_class_names'],
+                             ['bench', 'boat', 'boat', 'boat'])
 
         assert_array_almost_equal(
             output['detection_scores'],

From 1fefb7eb7b87b7704b4d6b23a8188723666a4944 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Sun, 21 Aug 2022 09:24:56 +0800
Subject: [PATCH 53/69] remove useless uttest

---
 .github/workflows/citest.yaml | 66 +++++++++++++++++------------------
 1 file changed, 33 insertions(+), 33 deletions(-)

diff --git a/.github/workflows/citest.yaml b/.github/workflows/citest.yaml
index 24482283..a785c2ef 100644
--- a/.github/workflows/citest.yaml
+++ b/.github/workflows/citest.yaml
@@ -66,36 +66,36 @@ jobs:
           conda activate evtorch_torch1.8.0
           PYTHONPATH=. python tests/run.py
 
-
-  ut-torch181-blade:
-    # The type of runner that the job will run on
-    runs-on: [unittest-t4]
-    steps:
-      - name: Checkout
-        uses: actions/checkout@v2
-      - name: Run unittest
-        shell: bash
-        run: |
-          set -e
-          UNITTEST_OSS_CONFIG=~/.ossutilconfig.unittest
-          if [ ! -e $UNITTEST_OSS_CONFIG ]; then
-              echo "$UNITTEST_OSS_CONFIG does not exists"
-              exit
-          fi
-
-          export OSS_CONFIG_FILE=$UNITTEST_OSS_CONFIG
-
-          export PYTHONPATH=.
-          export CUDA_HOME=/apsarapangu/disk6/xinyi.zxy/cuda-10.2
-          export LD_LIBRARY_PATH=${CUDA_HOME}/lib64
-          export PATH=${CUDA_HOME}/bin:${PATH}
-          export TEST_DIR="/tmp/easycv_test_${USER}_`date +%s`"
-
-          # do not uncomments, casue faild in Online UT, install requirements by yourself on UT machine
-          # pip install -r requirements.txt
-          #run test
-          export CUDA_VISIBLE_DEVICES=6
-          source ~/workspace/anaconda2/etc/profile.d/conda.sh
-          conda activate torch1.8.1_blade
-          PYTHONPATH=. python tests/predictors/test_detector_blade.py
-          PYTHONPATH=. python tests/apis/test_export_blade.py
+# blade test env will be updated!
+#  ut-torch181-blade:
+#    # The type of runner that the job will run on
+#    runs-on: [unittest-t4]
+#    steps:
+#      - name: Checkout
+#        uses: actions/checkout@v2
+#      - name: Run unittest
+#        shell: bash
+#        run: |
+#          set -e
+#          UNITTEST_OSS_CONFIG=~/.ossutilconfig.unittest
+#          if [ ! -e $UNITTEST_OSS_CONFIG ]; then
+#              echo "$UNITTEST_OSS_CONFIG does not exists"
+#              exit
+#          fi
+#
+#          export OSS_CONFIG_FILE=$UNITTEST_OSS_CONFIG
+#
+#          export PYTHONPATH=.
+#          export CUDA_HOME=/apsarapangu/disk6/xinyi.zxy/cuda-10.2
+#          export LD_LIBRARY_PATH=${CUDA_HOME}/lib64
+#          export PATH=${CUDA_HOME}/bin:${PATH}
+#          export TEST_DIR="/tmp/easycv_test_${USER}_`date +%s`"
+#
+#          # do not uncomments, casue faild in Online UT, install requirements by yourself on UT machine
+#          # pip install -r requirements.txt
+#          #run test
+#          export CUDA_VISIBLE_DEVICES=6
+#          source ~/workspace/anaconda2/etc/profile.d/conda.sh
+#          conda activate torch1.8.1_blade
+#          PYTHONPATH=. python tests/predictors/test_detector_blade.py
+#          PYTHONPATH=. python tests/apis/test_export_blade.py

From 93c02547a503e79e79b448780e18cf7752fe168a Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Mon, 22 Aug 2022 10:01:08 +0800
Subject: [PATCH 54/69] ut

---
 README.md                      |  2 ++
 docs/source/tutorials/yolox.md | 18 +++++++++++++++++-
 easycv/apis/export.py          |  2 +-
 3 files changed, 20 insertions(+), 2 deletions(-)

diff --git a/README.md b/README.md
index 49bbfedc..4e62d212 100644
--- a/README.md
+++ b/README.md
@@ -22,6 +22,8 @@ English | [简体中文](README_zh-CN.md)
 
 EasyCV is an all-in-one computer vision toolbox based on PyTorch, mainly focus on self-supervised learning, transformer based models, and SOTA CV tasks including image classification, metric-learning, object detection, pose estimation and so on.
 
+[Latest News!!] We have released our YOLOX-PAI that reveives SOTA results within 40~50 mAP (less than 1ms). And we also provide a convenient and fast export/predictor api for end2end object detection. To get a quick start of YOLOX-PAI, click [here](docs/source/tutorials/yolox.md)!
+
 ### Major features
 
 - **SOTA SSL Algorithms**
diff --git a/docs/source/tutorials/yolox.md b/docs/source/tutorials/yolox.md
index f3729500..0276b44a 100644
--- a/docs/source/tutorials/yolox.md
+++ b/docs/source/tutorials/yolox.md
@@ -1,4 +1,18 @@
-# yolox tutorial
+# YOLOX-PAI Turtorial
+
+## Introduction
+Welcome to YOLOX-PAI! YOLOX-PAI is an incremental work of YOLOX based on PAI-EasyCV.
+We use various existing detection methods and PAI-BLADE to boost the performance.
+We also provide an efficient way for end2end object detction.
+
+In breif, our main contributions are:
+- Investigate various detection methods upon YOLOX to achieve SOTA object detection results.
+- Provide an easy way to use PAI-BLADE to accelerate the inference process.
+- Provide a convenient way to train/evaluate/export YOLOX-PAI model and conduct end2end object detection.
+
+To learn more details of YOLOX-PAI, you can refer to our technical paper [to be done].
+
+img
 
 ## Data preparation
 To download the dataset, please refer to [prepare_data.md](../prepare_data.md).
@@ -11,6 +25,8 @@ To use coco data to train detection, you can refer to [configs/detection/yolox/y
 ### PAI-Itag detection format
 To use pai-itag detection format data to train detection, you can refer to [configs/detection/yolox/yolox_s_8xb16_300e_coco_pai.py](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_s_8xb16_300e_coco_pai.py) for more configuration details.
 
+## Docker (Recommended)
+
 ## Local & PAI-DSW
 
 To use COCO format data, use config file `configs/detection/yolox/yolox_s_8xb16_300e_coco.py`
diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index 3db5cbc7..0f6dbd35 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -229,7 +229,7 @@ def _export_yolox(model, cfg, filename):
                     nmsbox_num = int(model.get_nmsboxes_num(img_scale))
                 else:
                     logging.warning(
-                        'PAI-YOLOX: use_trt_efficientnms encounter model has no attr named get_nmsboxes_num, use 8400 as default!'
+                        'PAI-YOLOX: use_trt_efficientnms encounter model has no attr named get_nmsboxes_num, use 8400 (80*80+40*40+20*20)cas default!'
                     )
                     nmsbox_num = 8400
 

From c22bf9828c08dcc9a0dda19a36b079be55b1545b Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Mon, 22 Aug 2022 14:16:44 +0800
Subject: [PATCH 55/69] update yolox.md

---
 assets/result.jpg              | Bin 0 -> 232937 bytes
 docs/source/tutorials/yolox.md |  31 +++++++++++++++++++++----------
 2 files changed, 21 insertions(+), 10 deletions(-)
 create mode 100644 assets/result.jpg

diff --git a/assets/result.jpg b/assets/result.jpg
new file mode 100644
index 0000000000000000000000000000000000000000..d262a484fe44391404f9f0c69b9c4b0bb07ccbf4
GIT binary patch
literal 232937
zcmeFZ1yo$Yk}!OS!Ga7Bg1cKXxJyWI4ek(};0zutg9LX74k2hDxVu||1(z9uTd?3E
zB;lL9{deEKJ-grT+1-El|Ic|nXJ)GUcK5BWuCA(kySncd@74f9WqBoe00aU65b6Wm
zZJ@C$$;dp{(0V4Xq$>B93uxp3DxvcLfU}FIyOzRJdILiv`agO9m@KS3-2M^%SN>7$
zUd{h$9ROw?{VUi1y{voIHXc@}Lf=qdPj}SKQN&uK;0Jd90Q>%dE&l<I{R6w}Xvv_;
z<fCAAyMG2-{xjIe)6ElAX5>$KD>wH)FcJkzI6Ha$x!0fchhjV%7hP@CzX9q?4R`=r
zfCBIo1^>6Ezw*<y002ZT0RWu%*F4Ki0BC*<0OZSm&12370D>?8Xqt4haJTpiG3cmY
zG+SE$I4cDJd_w>r{tN&(#{Wp@U+eyLEdGtY(xd87qx$KJ`q%+ZfDJ$oC;={j6#zv+
zd;kx?3kcmI02!2c{MXW-M*eFpplaXs07Tem2AI-d5D|bz1OgL*?gjw5KLQQL_#@f>
znC^kFu+hL6m^irTr~<DE0RRJxh6YB*C&tIc#sE=(P%s97iG@wXz<clh6XFM&FcMNS
zMqzymZ=c}W#wH3rArVn+9p9wnH)$E~rsn1szA^FZ8d!Rzrf1eQ^iJcDv#<(0)pMVj
zU1w&~^6(6)KV=t`k&{>8khLjMva+?a4-I=A9+{$O_@bb&xUYY37)1sxib4=N_@C2>
z1_EQ<!@$Hw$3b1ULiGuRhK7NTi-&=Yg@K8I0ift4LTA9hywCdtOA}U$P0Yy0FJ$2v
zT-W<R;HkTZSJJ!shN*8%g2GxM_ejV^w3F9Q$(Utyya%R9QwoY^SY&lA`)1|zLc>xE
zS*_p5-z@`pU=WIPU?Si#aD6JxjsfI9K!a=Gfc_5toezH(g1=9OziWd3eT`@_@lLQZ
z=3*(!bM7@CHOuZ+Rax~taa=l6UMGfy`o$IA)14s2n56U8CimDJ#S#uCT0BeZ==yZ*
z5)YPnQ8=z{MOR9k<cn8)_A%Jo^TM;QXUhGf*WvXv%S4H1gjcPP-oe#=4qmk1>@~x9
zB8;}<pJ1sPeqt{ucqfPEg<}=$?xkBrZCK=3t*$3`3ie;|kt*dwJbaw6eyI5UC0T4S
zQdVg~qy>szyvu-*TTey-xwI#%7`&bRl=qRxK*0fRY2VJ|+%I|_vqNCy4xkw_I?WR6
zu0TK;PpsTTQ~U!8Jr~pFHa_gLrxR5qh8h;keC1QZoV1Rtr6Td*%uIdo2|d=e3vtpT
z3-|)?2qwUVp$3LOY_aI;_643F2~U;v^^#SaTXlmlEv!-d905pheUJ`j{{p7N+-L#)
z`qF2Yx`7vn*60a2fr)n-(&g`ypVk$qZzYx&$R0<qQ*Ut_^kCB3lbMJV8dRROs|eAa
z*HoJ@BtakArJUxeCq#vJ9@&ousa?RVX?Y{FauZ+}BH6@By`wxSo-0Vlv=5x@oYn5Y
zqeq3)#l@rVhRRD;9=9Gn6fIIpJ*s&9p6gwhwv)nR_d@d6idl?M#y(5=otEuKSI1fJ
z*pbfR*8GFP4Soq}YVJjtT>Se7Gw-zH+m6*l4`@gG?pG%{AT$=_nIoK|N2WFg)hago
zcst&%K4f@9%xWp+Z!64!U54>A1-6KHIs3cIO)u?Y_}&MlZM!-lhSn-4O((<|c`tcp
zY4XmPzU-qihfzUgTT9T}q#}Q@sj-aQeOkd6x;>R7-Wg4V{EV!c9b*#K{cd8;Hib({
zjQW5UXNrDhif57@8u@-mn3`6|YYaSTVPmUECq{HlJ7~s=)J8?S$J$TF7?0l{&R8Fd
zCXSqDIo0J?W&8{{Y`?czWq8|kP?xX58h^`8LBhm(2~Pc(d%rw$1Tf1Lxkxdm(cQ#L
z4_qG--kLjzSnj5dDZNo7P1|v9DmH95*)pd!CQgoJ41cN#VjpZ*_x&~zh9K1`kQ0M@
zn}DWqOLJxIpUN2M&Cb605L-E7zb}3ay>Xbh|84k;v=39yeW9aZCg`-m`6F$s|G?qr
zpriVTAx?Z+Q7~u^H?GK3bNR<m@6~)@ol|Utm7!0;*(am8IuljZhldx;PW6jTWZDMb
z%gR2><E$xV+Pow2ZP))WC~mv%trnkOpuyX0M#Cnu`NU`EEw9&T*!i(*_a)o*9WZt_
z+e=6)dzgVKdRDr%o80^Id78NGT6H@4#OaC6mynh51)pP@ap)lyUD1~}CIsab{oJML
zLaZ-l<^y8jf}asYU(x2GnxIj)fl?cGT4{(Fhs}29kKq+hG)@Wxa`^V{L0}5ynPWiE
zVZ`)@k`Bjead6=m=i(pdkG8NyMCGqNO(eu)tz48%>gr%ZzcM{0xh0l$Un7J%EMsad
z!-xk+9Jb#LE~$GY!k>@ZI8sr)NFeREsVK(vnLy0E#2Fc?%v^eCS;P`095hyVk8s<%
z#FidGD%jpQQaGpWT^Ke6;(L@X0z${DbE!2Ee$or{$W5tPg02ogIt=O>JV2yZ6OU_6
z0wvh*f5L<8uY?^a(Co3HseZ*}H@aTgbskVdT-`V$o#}8nyhlb&&Rz@*G{)JxO&Y_Z
zaVGVZc#qF2ufSM5U*LVnm(xFQ8BCjWt0Ls@07cz~<@3Cd2>GwAPA1gz%VDl!m-W&)
zSunah>*w$AdgA?WepE8$wsOhN^5K)|;3UB_W+&K`Sfs<*ycpDVX4lFs-BVXa*34>U
z$_7dohuE<peGFsxJ;WYJzaA<VP^~QDkfG&}7175q0ImRa&0@sOelhogd){VvXk!aN
z^;?E|T3qF8AbDk!^M)^2Ior8Nw(dLa+uB2|e?*wX>GRnTiE4`^=)rrvJ2|dwnhPqS
z4&6)}u_;_I2bZx1J98N6{pOV|`&l~|17Us#S^l7t5lo;r`4KgtjV-cYSoAhYl_OW{
zf%Flj*8a2Jws)h?c);zr0`>=@r8pbg;e&?1r(zn9{p*kpAyqWvM#s*Gki+I}BmO7t
zDQ%n{vYtpPC=R{mGY7QkU8zh}pNK}`B+qw_`@;Hd@Q>u>n9D8I3K{a#gVXa!E8JT9
zB8Ij}m!I`^iWGKO!O6%jd5|*pv36v959afIut9vI9i8`x*o=C?#g0(+yi@e3hKA5X
zs?o?Q#2X7Q*R<j8lK~T97gqF~%(JlvoW(q>S+5~A!I}Ba!+9HMOg$ozPN15TW*u2+
znR4&|fd2AG8ohr+2!!7ypkU5@(`m7Ye*|E|@_E?U9EFVOJcI2E<rEE)rjsbn$*nu!
zKYr=KX6(gb{t(996nW8{drW%vjH#JN7l`uxfowsl(3G4gF61XHut;e!!uYc`|0Kx9
z=!vZ2n7C4_q8*ka`IKE1v2-~NAB9y>aCDN6#%tn8P`p-TBq(<I;XFiiK@SZ+tIiS8
zZvJpI-tVY6bND%4!R9e~p5e?%Wh+ywBR;p-!25?cA)gv~nNxWYQFvvV8ghKWZ+b9~
z7Dcd~Xgy_R^7=qEm<Y)2DJ{51n2<5XbcI3G_*v!*=Dvce2t-&TD{lPia@Aj<*Hkzw
z4I{y6Nl$AE&z5EPSYFcyJxiVBm7B~v+Nm(yZw4Q#ymX|A0g?Nf@hsei+4ra3jC>;!
z%DrU(JvcsT$}i5Dl<A$U8Yf|~EVQNJiS=L}PpN+x>N7fAuiIzMS?ZJ&&&*Eq87sUG
zjbRMkdwoC%L`d_acR0~~M*u^%$27EpTBaP-0z|b&2FKDf)c}!);C>H$fQZK(5$Qs8
zwmydCd~<#PxJcoE)qA9TWh@p|S`Ha8h``wJNO-V<`69Y7QYJ7Q{ys7T@PQ(w0hL8@
zcy{<^Phn4oFybvOpo8T+ppf?SI}Ac~DdPrg0aRzJX$TEUA6oJFUzYJJW~@AL7niDA
zzztbUv?vP87X%`B*IS$iWL2pF2`bkybRPuViD?MlV7UA6Q7A9QQQG}spWg;rWAXNk
zsy;CpeM---MOTz-Votc6Uk{2q_lbBS>0(1e9_J<~Rv3wil86nekx0LOS$zzzaP(m_
z^tE*?94%58=df1%SeDH>1~WJBW~m}acx4aQyOI;OemB0x-l?F4wp6S$pwYwV$-O~f
zp4$2LORVf29u=g0{^8~e1K}#o%({Bn65m%AOZrkQjaG#j;#RK&LK;q}wkt=;nD>_t
zwVYzJxD^ZKR14jS-&=v$Eo#VpkP5IxFbwH?3ZjAIF`V}Hpufcame0wwMg#W&Hs_H(
zsIfV1fA!^ee!c)f-kK)OCx))g(GOi~8tbj0i3tE9(r4=dhAh$>zZoJ&C*BeX1gq5$
z0IM)UBshL^;3*u*9(gi=Wr0Wx?_Z{t7Qw=gh`+!r{~ve`L<8ReZKTaB#y=E>#LB~5
z>aL|?z1YIe2VOL*e&tLRz<k<7^My0A1kJL**hO-!mEq^c`v+U1>e|@YM7@<Uyy}S(
zCsow-&I!qi`(WAcTw@bzHJQUnH_w<%w9|{PDlDVCB5@nThd*<4puf^ljMw7q)DV3N
zA3;3k64)o&f4;Zgu@M{l=0mW6n5ogq>DK*9+SgM#hb1*leV+=@tW(^k`PrBYm~}w1
zsjB6K;IbY(zg|DV*2bvmqZx6>al4AvNEm*12Cq!>=M1y2{uamuuSte*rw1u!W~ZV2
zT$v5lOx}m-306cry=)Nb&U@%9rAWmLvfXEi%Tb?}iT1^&?CYzZwef{VdpPggt_L7@
z_A}x?PM?xry_}nK7@joYiDz0F7xAL8eT4pfVyrH?ayuvKd#h+q57z!{N$W|R=7ss5
zKh64ww(SN%f~s}|wf&IodK1gr0n>Tw#<%W6hGL0%au-J9+wWGSONo6ul+Kj%+es)x
zwsp$c7V;%I7q8Zqu58L2DG(ot5%WJ~#is8+LykNSXz+hPCE|B_dzmBQ-To%h>y2UG
zS5A){RV&Dv09Hm<wlN*QJ(lohobih<2hqa|X^p!_xsY9vQl*R}ExS+Ug7rt#72{|b
zQ9>Pu&78766)ChE`&#*BE7v{mYvZwu_DQGX;z}x*rf~Dq%FtT$*&O8uB4$mpQ!+}r
zcTCk0^}=}DU+w_+Jk=Qwj}*X$nM6a^Tz+hV^+}z5B7thFWq%!1l_c>LdC1I_4N;*&
z))1_dS&n@k2qVXOffl%B{kmL<pFwfGTa)j)?R`m|sT?s!G*%R-rF8WU@Tljvk@CG#
zpB<EB)*}@gnp}O@-d|}Y?U)+bPairrQ81CoBa2HtCZ`K(xBV!ns+`5lAbnNqsWj5J
zU*K*@9U)L*c0b^^05MPM+GS&W!3YggWcVl60`qsWSZ??KDDYs&E98uHZ`stV1ine#
zh7$Sl<}2+HQHN7azC7>U&SKZrW-T+dE<!SBdW`#a|ASr+s?KS}`v3r)#ggkNu!8wq
zE0bGi9HCoucVWq-$5uIpM5)FZBPSoW-u80o9kx(4qmgXIxQn>;V#NbPVt7Qd=sod0
z=z<r)AMBTYap}9HoHnVnJ&Z$lcr3D^@Z(l}G1_!TwC<d%WI=Aw#o+g|X3HI++2<@K
zNykKO`H2A|C5EBiUz=ptGG7o_ii+$0;FvOz3r*lV_U@w$j5;f$oS`!o9=~-kl!MvJ
z&=V2$V}Ej2i;gceV(u|4sbkTVs`p48<{dH5r$-c*ij65;gkNyk=1LfV%Q38(yo@>Q
zj7jlVToejl4vU|of9;HFv!TvIKO9B&+qAC@h-6!mdsCwA8<P9z`-SaO40p2rsO$aw
z<8i-{L@$Lh=}ZI7bD_a~f3rRHKIJmlnCXPGz3{jZUzquh+UEU-F;6K-U)*~l6D2Dh
zrjoARrtqmvc0R3kyHPvX`Im<oWx27|cdfZMAFn)vq$8qc_T-Jphx#4yD*E`6dd0rZ
zzLJWyK(InP-eO@Y4!U9T5I>qe$*nY1_tdbrlUh<&{JQvQX;n$TLbTVC)6H4fSGG+x
zItnW!c!7vl!2&<RZClz;E9MTEOS(NjA#MMD8!Wv0^)p@D|AZ?Xd<PsyV?N*T{Dj{j
z&z1*u=`9Yq$73+{MKxS5?2L!`3Aki!3}Y=H65JM5#qQNBhyw!yWRBR9``Kr%56GB@
z_S7D#+v!<wvp9!~+K7)eDP~EqN`wf!zSt3g5-{Ah)bJ0fQ-Xi*K&aj)Qt+aQctF|5
z+v@ihC&k?H)oqc_N`1%a)^FRsHS)f>7DW3h5A%P^)L<QUzf6>{QH42}I?GRQ`HU5|
zSQDu_Ejie7AAVwFGfp8>xU;eEJR|U1x4VA4vM^R>^s)KP&uWn$C+!=XRbzT@PO0ov
zOs16|af|fkY|XG1bbJ`DPH{rwK!bUN)+{bp-sL{AhDHYzPv3KMRMBwvOaIs+bu}i8
zU-bLC+X-g1(_`ksw8h2}0XOYs-fzlJ7(XT_k24x0@3n{*M?qzx@HE&~3q%YA_ZmN^
zHq`J)Gd8rxec`4*X<^OA+(Rh7znlzw2;dVcG3M&NAx?a)8qPa_1Fj**je|C{DfyW?
znJTZh3}UG+gBt6goZ5UP$Ps}t3d_UjyzsZp6*rn3XnJ$?^KUVyA(ZUvp_!3OvXQj|
zC{Hhr7b-Xup|-2&w%3~XHuodlef!U@TDwKA7+ECE$uA>DT)EcmO$gaIhv=Bwue<Tq
z&_T^Gw4nE_XqxWZy;T{I1R4*dShat6WkB4$kh6|F!HgNB>sjnUew#QuWS$%{t{0}F
zT%T+AFj+;uGsbta<qh9^jpJ=jI67&|bXSpce1yMG(X@)GSdLG*giP>wE~61!(pCgT
ztX!5|Rp!#!ymF}k{^Gn^ih$0WH|^wV>LT7p%VtWUI#~s9it!JJr&*W1*=-`8nikt!
z-5`VI-2Q9Q0*_z&vDOKopw=d5QlU*DJ>6>eM7-Z59QN70p{-;|RaD)KL<P~%Y0e0a
zp2jPwHz$&eo@aX=)O2Fj9h1?_4J&!2PK-7mFI{dY#ytI1=y_7%xJ|O8Eq08#VoqNz
zNRID`DmbSlip$b4PqllG1n=LnoesSu3+#GQHHF_y@S83P|Cj0a^Q&8S0`mFS+ewGq
z$NJEN?E7ZgSC}&P?=kToWtT5yziGCy${Te|o{Zwf<-$2i4N-kA>{owJ_P(3!m!Veg
z@sBz#ZDkpFg|jYiJ%kk|*rwSO1!!w78ItQh#FmyK{Dq8!Q4jhoyfN-9rK$L|IfoRm
z?-^+g43^7;hnq!}-`}?uM>8HFuDKyTXjmrmm|?X$FZRFo;IY=31;Aw<+7EMXwZ3`y
zb0OI|f~QVnL64W~#Rs)nnu%V2ON1w}W7bm~26@eAkfp@d7w-_lUqLRHRv(NN9v<`y
z>~jg&Jn~*zcAgq6i>=+xblpe$IG!#yl?1)^#;<<N;$vfy9&NGBjc(Sc>_lEPzCAF&
zoIstLGC=eG#ts5>EHlgwxd)cgri}6Mw-x2}En3m>wh6RnH<FoK<5{H5N3|BZr0!IH
zr1(&Nf4PMvZ`n~=7CLPq!^I5ZaXG$xF2Vw?dXd-vNrb9km)f(=E>QI=n0E*1Se&Uc
zKTyYdCOHsb;H^zQe$;Bw*jD+#EPv=M@rmgBOr^~BGK#1OVzmTDf?n4-cI<1PyciF2
zf|Jz7ON!|ku4yovYrINRYD|w<42{GG-!|~4CkAm&T8lDKD~mKEW1DKw3cgS6(&gf3
zy3#r%`OGm!H;%eS*&kQXV=F5XQW|Psea+;`+|dmWd&%o08fkjPm6O@R2*nlC_9l2P
z=rngOujay@9^J194ZSLIx+(OU0<-hXb9MPi`WXq_lyb>vWDaw$o0?fT6+iYcHtf0s
zGDjvV5!aMYOw)N(mZybrTd`{5Y&1iyAqE2OG3c|po>|RukOCXDMf*aGt%*#kNiE8P
zamu1u)<*;L(3c(G%&_`>xfv!bM@F#60={qz#EpBjH{=W`Bi-v<*o>0eJh)c{8C7RF
z<C?T1=}L=VY4|in9##_WK@k&_;n)=jw=sEZ<gVQ^U#7mM%Ekd7qJ?#3$f&ldxRYPt
zcq`*2FvN>Q1}IMxO+mL<-na)NXf~v+SQ9+jEBLFO`HZrp+r<>l=848<o;lIT8u@h&
z+*~-vh?wG{SB|A%_-`X@*b-xk_77`5)>w#zv9tEJlu_#pPl66TrLi)C-j>eh(|hQw
zfo4{wyg?(LcR;xYEz^L+#F+x`jsTY1{=+U;M`P1K!bH;XbuZ)3`e{Ybz0ocR=6YI0
zRNJOz^M<ax)K*-A?@G=Ax{m0qT2U4xJ4q%gI)mJYkVn=HVme=~`b@EkthG_)?P*@F
zlQ=H+XxiYADCJsXu1Fh;ld&SPJoHKSz?bD(NxM(uVz`=P{Fxkj`(epdSt7n6Yes5#
z_QRUXgW2;^3H1~8jS!LNvZ5i{)Eh5v@BavVv8|FcuwCu?1^qew7CTWwTNV1Fo%1om
zXs%W-kPT>2@p{?v=>v!(ooABWk*Fuvu4DOr7aJd`SFh3QHi0*G+xN)VTcK=Di>tg%
zsgg^q<CrnkFl6PUo(%6yP<y(Thv}B4Qg1v*4jxup{n>ZQm0`m>KuOf6xS7Xcd8PI6
z<?Zk3F>+$9XKr82;$A+YtWs$vZ9LAVerWt8w56D33J!CsZAR9D8^j>W<>uz#mBw7o
zAKr6KJ6l=oy%&SsxSU7PAKyohttbvPd2bhte5fnqT(VJ*8gT=0M9mR`AIxIe{7<Y(
z@rD|S;L~XCD`@-(;3dcHXpcC#z0dNJ=^iCdcB^xgNaRbFH=K?Xgb6`MSaJqs?4GZo
zErfj<?7<W(kU*-Nb_Gi5af4Oedc8PA9>3R#zf|k)ky5G81Ev_!?OHmQCD>3P0}1Jx
zUV<WvdK;SqM*&os1N$;g+UQG|RCLZAkWY07=v?m<y5RC{CbzFFe{V)>ZK%SkKW1t;
zx8Sp&u<J+C|6t@AiPDSBH=ouMEPE@&?XK};Bnr2MBBBtV!sZaasw>%9c2%L_Swf>}
zN(EIj>HYimv#)zn)tcjuxU^K$B`l|$K1aIqC064tk)9MO7i!8`v1i6}5JCpu#j$Qx
zirAVZ9==W`ZIJlj;HXV3YDY&&YhzC`YWq>76;G!s)pWS#DO6S~%}LjgxqeGp?&<Xu
zJa`_Bq>Zm3ab;LdsPQ<)@95Ef2|lB#cCv_Ep&ORaWq0N!zEfid->e6upuC=ODuIu>
z_x6JuanV+|s-g^`nzY|4fN;Yf_P~a0cHQ0q21k?W020d&jl)mR(?0z&-QM1vfS}Tz
z^_IZl<eu$8$AZ&LhHQe3)j=Ptdq5J7Q1lzJ8CLU;mi3nJD4Dl|%I!Si<mwvSu8Mi#
zm4QL~U$i00D+H&%R!IaY{5Co=A+rfAwO3D@<Do|f!I&kc?{f2())+qwDRrV^7K(Iw
zml<jN&;*Vjs?BVt@Mh-<d?E8rbf!{YkEY^0o}H`Z@=~<czj6%o?=zBTB6~#pF;(s(
z7d4yoDDAJHn4lX2>F7w{GtFBgNn)Rbf!IdB$DUFuPgV{Rst>sWo<9DhJ>eXwQs6Q?
zT1)ZtHa9OlJ|fS3?sAwjbu5dWB&HYROHyV|l*%d^mertEu6RURT?Mi=*b}<?z(<I5
z`|Jui(HvIXLz>U?&^jb3DV|q`^yGPNtv4g5lv8HgE=UA`p)DIO)9lmjopqkYbx)0Q
z1@q;lpS{_4w^nrGe#>Ti$oAcAubstoY@)v2B~mfFv7@n~qwqT6euU9#tRY_}v)oNE
zuPFRzQKj(K@cR!BEx)5r`a#e!Q|4A<st@Lt;=U#ymkqda5w%BK3HNM!#&U7vZG3n6
z>ZY}{&#fpIPv-n*)gRT+W9s0(4fbPpT7)LV1{YUcoi<62wS_qRzMagptI*fdx*2++
zH7?Xvu%)M16*;los;sYJ@gwV_0N>H=JMlGD4Q`>hhTNpMQYtX{Md)INKh#D-L`gO9
z#g<U?4NP(&cFpE(V@*kMa>i@R%5FC<$FxTrEe$Iw1^1)c-IOqJnmy=gRCL5>M>5$=
zb15?qNa||WYp#-v?b3V>WikafSd4^5YzD2InO;0;igBNp6Zly=h8L_CNUhgt8#qRG
zyn8JDJTKWv=Dno!iv^yw6~4z!jC|g@k+HSo{66aK%^L+3qap5m;tVZJB{28VNA#7^
zoqb^*(n^dXLG{-0+i2vU2qA1l7moV`bn6{~@pSv=B#Ix6=JC57M^!2vYrQv~nx-;&
zyHA&T)|r>@zGQ~vMh`Fcae9S`90IZr6J(oeDEDw=WQhoPjYmxED`QmH*OY{>D+-kE
z0RNTtz*326(ig$bP@ZP=hy3#6h3~Q{<qFE#3uNen6|s0|^T>;=DFmc%?@c*{C=k;0
zsAt(QG|_%O01hRtc8B1<bp8hdUeJv%I`^LmI?^d5e@}Q8W$3~eA_}JYQuF9!|Jr-7
z?r>gih7QI#chOHCwSXTy!iyelDgUS?!xsw&?c-+5Lg|y7&LrHI`)_4cok|iOeIL!z
zqS7Yw4C^=cYO6H**c}^hjMbnt8=@Lnca-)1vX;yQJ1U16lLQB2YN$<ktZ7n(6gJ<n
zba=L*nott(EP8{gCeM54Q2tx}E_WHnnh&0Dsxj03dnQTSmfw>d%C~x2Xv%3u3i7FS
zg5cHSg~q1LFZEd{m$ku;!ocR3AfWis2DvhFpNq6-(Z?$4g_o74Jr&YKAkmDaPm5d5
zJ*n<f0X%1bo~Vp}>T-rNEs>8v{(@Fv=E=pfvDQINg~{?)^-b4wmeZoPlib=mJLeD{
zJuynf_aj{Qh24p#Wu2zlbwj=w4DiXLojdT$n6j~7JlkiJ5!24;PZGtMtf~DXI$L<t
z5hv)-@TUGK(uSzz2yy?|&Tg!g!_Jz}d`^jtN0yFB^-z4Fja`$S?D~lwo6^wRhzI7Q
z-O7GK1AbQZg&*<RugUC2(~di-@<U-nDe5%Gv`vXQ*>95g5^PLc(_h#@a#~E#^UYO(
zy5_62Hf^2+#jDaw$D&yK66*8>!FyJgqXv{Vu~Tk~+NV(1`RM+~3E#yr8}EE;WZDV)
zgSGDkXmEJC#TWRfE5{16QS!8R70-Mb%q16=yPM3j^2>aKcZkB<`$u>nueaf}MqKzo
zAX9v$ZvGPMuWw$+CVNgxVKx&Y-X?a?keL|mxJM_>f?4nqhb^X0#<X+)YeQ(qdBb4>
zR4b{PN=PjIo+ac|#9Xs4aPHgFTrIK+`p<n-*rZ$<rj*Nhf@S%!m&LcubjHgQQ*vRX
zL~E}41Pg;Np%X2P6#N}W9GN9*Q^~eTMH}y|3vBO^AE5j}tupeUqj^<G=WEX=)6e`a
z-r65L|Iyh}V@W7-LS%jDa816k|1sUULcUqB)1xNR_5PJSM~j(@HS5k(Ep-d|rV$09
z0B??ANm5|@Gx4t9KwDnj@tzRc+gEA`{;7wz>rd1GAC?D4DM5#2iZDj$x`E^92NCyE
zU}zaK7T}2hS2;+T(FN_Or$1*G-+7>a=9+7RE(?O*k$6O8MSFQEZiI64tc3uywoEvQ
zo$`voFIolX;cH9g{5E=u?P>l(yuor)Ld)F8n<UQYCkMjx`^P(JT01#jB<Q>?BO@=Z
zxC}9;v~Vld<oIu>qn4QUWtvTOgoyyb=WV=WKElf0If7BNl||$+B>Vl$mZNXjh+UG~
z<@ei4VaQn5q|e1Mn)M@HSB+b}GjCXo-$!?T)oRuu<xN2waln=F8}9cT{wOxHcj^DS
zD)Z6PCkRc$bIjf1w>`%0=S``qf!+6BE~gh6-<!)X>Ie-Fx4emou~e-ZjE)93$n~H}
zn`Ijld4|~>2|A<=&1s2wH=hI)Tjebbk_ItK)aF+<BKhB_%Q;qE`+cmB3R^AT;HezM
zHH}Cbv|an9Z+VD=gzT0$<4eBYYZ=PIpB1&pQ4*SYC{H*NZB&P&A}5@L%Yf_fR2fqV
z!HOfn>@iL8Q~PyBGhcMGOWE`@7(FQ1_bupR**<Of^-K_tj9jKGsR0LmV?2M5V&G$N
zBBf!NTiH5dMF>~XFgzWvh>Jyev`e#ngV-<B8X$BKj|@%`3!7pstcO(nc*y^wE@MY2
zKH2WXzT@nd@Uut7TP7wgJ8MlTUX}<sN1p7WIBr&WW@uSj!Q8v}d@+NMA$Dlrc7Czi
zJbXQERp4wS&E-T!vWDlAeH3#ld=8;9#Y`R%(nWv(9*u!G!-i1zt}MINgGa0*N7*5t
zNLa=a346>hY-9aY8n`p-L*o;KfB1hCbDm7Uh&S|zsn0n)O5lp-L;vxjv4zH%pKwZ8
znSV+vk}qD<n@e7nTG#rm?ub{IH15m?XDo*ZoyBTZ!wh#?;o%kftmm!IUM=f0OMpM(
zJ>z9#!RG*bY%bZVTN5Qy897jTn~A<e<6fE3lwBpsYzvv~@lg{N*!Ru+bmh04UGM1}
z2YX1Lb8VSXrRVpf!(}_=*fJprS1P!{c$tIUvfM^&UCPo&gF{tvL_PohiL~66+cc};
z2n`1%`#d`6$b6s$i_{h&bgOnTns#lteTHA``?M#PnwN>xnf`ON9eK><e2T622-;WH
z<Q-LPbFRuxW}851hx)>LUIXRjq6UG^EGXAG8>zR=Y(VRp4I|THGp>C!Q3COsAl#<M
zrdW{8NZA2AoD8)ZP<`nmhV2T&X~8OIZBwj9wOB!X=M9k<4EELtXqCHYU8T4SsrAeh
z=a8@~WnxE^=OetZn9YaqqIc0rvCQn$tndq)41tB5wkLJ83X=Inm?a*keCoou?)J#+
zGh>T*Cx3&dTTUIuXmTSrY_5s~{=#NYq%KJhe1hsil7PXVDpNEs)n61Ev>8mM=ppfj
z+-I9)#}|hV>$db-W*g#vT)4h8;CE5H4C7&gj-!@Bl@thdR4daxSA<K5LyEyyoBm9t
z%FA}P8|~3tPveh_b-tw?hWm<LGu_s$H>?lVgH>AwyJQ8NruPCLM0mTqb(_4~%8C{N
z9t;oxY*_;F$qCjen<XD+vxJxiHJ`OIekdq-+M<arJNzI~tjB{nBWL3&_5)StX2Qo_
z|CNxipI^ltq9WyT@}p+K6nvaz#G;=}+lDyQEi<G$Eic2s<_Bbf&G<btt78$bZme9W
z?*M&8$r<l+0&E!M0k}IekbL3$)8~kMnxCV4L*0G%f3E;1JJ00>e-ox(?P0y*`@viN
zEv&m)TKpz`D(+Pn%da-j>R4B-<4*?eqtB4vHWyaU*YCY@ftS9jCHNiSh#2m=kBA^b
z`rQI>*rI|TTIbKv^dKP+BJi~*+?!5A75wkE<a5qlNO=cftK542r|1K?moQh)hB_3?
zf0_ORjs3q379w5f&cd1qznv@`9e(=^xdW^&tjxg8H&0D|cftRww*)^6;yZ9%jq468
z?jPL!j){~Ik4*R=t=(-1kDthLve=M3OAocU10FYre}RY}^}(mPDY2bb<IsVQ!9Ga~
zVY}ZmuD7ey-~kcrIFN?l?BkoGQU?6sKI4c`*T-|T;*pVu5-)IHrtx0pX4cNF##HTd
z*j|nAlJfGaEMKd>DU}M9fktn<Xio+maQOPzYN7#RmPc4<>7MlL%%X7V1|#)psoaKV
zaXFUblE(H%SF^DPw|QR3oLfZlUK><NFt%{%w(e_=`?23vx<Lsim<f7`sBuch|5OT-
z<AYO3+f@pkS1l9J%DX3?bW1S?fflKp7@8qy0vI5KTBuKDGvd`*sy9*yQr4|Q+YO=0
z4l+LZ1ozogAC40~2=EDD>1l@{CeL?koSSYF@UQbZ2`E8f{-@RYE*MxGSauAMuHKrX
zn($3&j7_x`i5F;$Xsjn;&1;~VpFw@}^i=R;G?27**V0ihLYlVe_S#bjbR?`_U6XCT
zI~n<zge-Qrg68{YVIsm`rbPI+Sew@%+9_Z)Z~!atJ<j!kTK6a=764lMWFm5UAO3dk
zXtUo%GGF{C4S_pfxJLV35s%tu&86$7g7f#!5BP_kGPMsOBEKzphN*(aaA;r~kEvQP
zHyHZkKw2F(&197k7KCf-(DwwtK<Bp!I_Scn4XW_<A^0?8$+<R9kZ`m4>)x>@(fS;1
zH^2_*k@(eRt}{syc?YBk5-!SExF_gG!jNLW{e#APyb0+D2x-1gM*7T1<F{aoD!74m
zsJwoEgCK=h1K;Dz`HmE$U2ke4f+SZ6kJf*`THTQtj{Hp7{_o7h|ClhZp)uV7>?&};
zg;DBNBIX9u2!O8e8l;Yz7^vxjNI>xt%CJr#Ie=9o<3>wQ{S;pPA6EwdF%td_6~pwa
z4K4TgaA5ms(@y~0UfOapAOvZqrS`$^Hz(QxOY3)MB-{Z~g5O@H{+rt9KS{u^gn!U+
zU1rssU@yTbL%#CpvWiSNdO5TEdl}%2kQcp&Ui=#&oqo*={4bvWw+Fo-FPcW#0mcap
z*{`73OcI@_oNefe)q_%AReG~pKU6@DS4<Zoe9iu3N2dE{Z8PN6ha%ej_LmW1A`v+M
zx`zEv_UXSxTK<ip`cLy8)I>n(^7w2hpC)vbgS>w@eiIdH3pNLxqI6?^yUnnjnrx78
zOsX_Gx)4nd!D;&`AtLaa5GfBY4Xpq1DBp#inoP!xTZ)lw3l-?05P2f)a5Cf%IiVyl
zP>AqRFh}}+BmRyq?du}h9)E#>f#3zcb~$N+R)4;urKJZSf^PF<Mtt(wA<L=Y-v*!6
zx{0u`j{%8(4>}mT2V~_cUod7qH-Qor7n~5l3|?E^yn%b4ug-2^CVGzu-vMtL32Ck&
z-(|)6`z;6$4#kg#1Dg(arNxgj6)N<dvEcAjeOX~(^+VGzDs<Q~!a!!?oN!xO{4_s)
z<Fu54koGu|9FL(Ud@b-6@BwX(_yQUvC-XVNzKb%IBh*ZzDB(7NfJZ-!NKmt51@Rxa
zK$bx==7=sL(1wJ<#M__KH!ESjN75R!0!SafSsYML=L$_&fCNE{lVosz?+|>Jo8sFN
zLVAr}TtpE7*SkI*6GsQT(P_bFR^x<0C-eCdkFWONkq=}8z6D>?TyKv?gekb8o#qEP
z4&`?EuM?j4ckK`!9JN4@GBBDentneJ%1{aM&m&c8B1IJe1sCy%=SeL`&4jDrsKBY$
zo4kvlP4h+HSD&;X0JSK`fJA^o_&UnOL6>f(PKHA0sR#R>n$v=J)fQG~QSTz(<HR({
z40kQNFio~I^t{@~_PQd1m~o=PPfJ!bJ!f2AZDYevMkfvfLtPzvz-y?RgPrpPAn(7e
z^obKd7{%#e2i_;cK@8FkT=V|%$IB5kEh5q)fE(fBPnF5wmXoDMP?4FyIm!cQj|5y-
zIbei{_?1@x3^GLr#P{fTEqO~gQTqT`>~H`zgtxHW8Z3wuCD`Myu8`2!X-53oX*B_E
zYRj={3xR=b0eApiXhTX2W#^mLXs#BVhbKv<jo}0gi|U~M`1R&rL;$TgsJh!QevO;*
z(wlHmU$&2F6h_-}jAn)g9{R(1AN)K40EzDHmUyf|6IP7;DGY!h(p29FY4$*Q0{0`q
zO(FCQ<9(P1F#L!~y85}zX2Q)zkcRJ=H<9<Olz0WrSvpO>OFta4IUu|RUS#OOm-8Dz
zmo*dIY`y~|n7Vqq5I4Wt{n_zN|BH^?->Mnc=D?=aw+H+Foz9(AfBZ3kTyE1GEqsT_
zlpZaD5ZOP!2!@{$;Q-v30Ne**I^Yc)@4GNH-2z_Ms}2Cc5&>JB?-}GcZ9@uxG~NEl
zxn7_6<8BooyzhXcl=WuGpVt6niUuzJ;vb#OSO31V`L{Iwhh5nJHuVmE(t6SYC(r@A
zNXYuS&;+kviu=ZaFqhB!gfNTz)GzSE-Is+WCIm8PCK_DIjJ2>BR@1nYsKOU=X82#+
z6UC?yPhL*+E;1sX-fvX%iI9fhyqXPDWs-irwr9KdolcBA?Uo&YllQ+rQjUC&0fKIT
z(?zjQ&)Xrt@}G;*>u>OR`u)&eeeCH{{8)Up%|Ew)G{{ucD<SgZ)c*dnfL~K>sXtG&
z_FcWf|E%OI@!5ZlqAH%jcK(v%_fJjzivnaOn|~Vk>vfEdQB#6ttme}<w}J$*+&k_t
zq11~QEXA+fT5Mw66!M~OOB$T5_Bz9@<(UM#k(TiY@)G<$_)!fF{x7-&^LHy^gFc_)
zspWYMZ60?CYrQS!{9rv%5PAoQBV4R9WW5*P{eA(s<%_!hO;0ok{+CeDp3UABei-kg
z3-NeFc%c_3BW*rlbG!=qDYT7>0A0A2l?vT(Rh1#5i>=<7y2h9jq~nV=ik?kOI=5|l
z-OC)pnQPMUZWNiv?@}vA%*<%rgDzWO8A<lSk3vi`B8{!9-(qz;Znjz~D7&i?_#PWb
z`z3W@VTG#f`NYv*QHuW+^z*Bmp|ASq?b1ew=LN)1ChJ_(gS}#_w>RjQ4LRi+^)-!>
z(U(8459!+VWloRUvj-g)dWQAX&V7vnw_ZxCqG3!!!qpQjHh0tlJqQVYF$q5E0c6G9
zk1xO*ychm{+RvYR+7`bOZ}{IrG4MZ+PaRXe8kf94(>MUa3kA7aB3?jld;>LTs}K)F
zoKO#3rm=1G4OC)Xa_agNs}mn_lXe*_Ka~_TGMZh+IQ21DQ<1f97kGWjsv_fIFEy0$
zx)L3qgZ7wmM-ERN?W+X_PG!CMdHk8+{~IWc4E=@p3kc;v$Mz<#*Y|mOzWKL7gS}p4
zW&9Lmaf~a_B@K?V^+*df>k&7OPnOvqYNIVtG1Wf~e@93Hipx#2p-BB85#KXRhS4`V
zvu4HMwpRCEjC7qrRug(4R}RmLgogt_2a0jPbpi5H39ibH2gfMc-t`VS@H*e2BT6Vl
zOlSSLMe+G&rFi~ecMNO5YVYczq^oOhT=}=p3$26PK&F3^4VOQ%!70T=r%+uiczG@V
zM--oUP5f<!^xvX7ZH>p3lRba<eef6dx1rcSC;d<J>R+2kAba;`s9C(ub}DYrVe_id
zRWcD5pEJpeq>M-j39EF>k~YrS9ew)FZz4lTLp(XEtj!n%FL2MmZ9GNI;qd{O;AP9z
zs{q?#Uq4jToOnlQdPE<i#EXB?xBqVRaNv5;xA8A}0<-{~h@ef_J22Y_nk0GtUfk{;
z>rvgi_bbZSCGFmh?=#`}>3F6i?x{U!Xw*W4h}R&bAH^1PiP)nCNe*5nHen8i(oqb=
zzEYWRa$N$FmIJ!CeGwaSOef(M2-h|J^tHUKC1Zn;Nm^+?NI>MI#kJ)}$4hU&;Kc_R
zFaFKl-2zm3w~@f5w9w`N$`@RurPFSX{7mRvEx}~&pip!oy=eES&|_><DOch^{_C4Y
zdUu)|%Z8*D=a=LBLhzJCqyE-MWmpb!?<AqwIt#v@*aYdn6lMDa$V|3>L&9AWg3sSY
zRo4K7U~(^-GELqe`3$Xa_gw~BDIbf~2Hag)oS*XQ3D1@NcToaZy6XoIK%vk6gJ_v&
zb3-TNWMJJc9Z^c*%yiz(ZJ`&(&6o1vKLakwaq_keWw>&`KD@S@%2cWHlwx92C@q#e
zM~SHRdFRu@zcrZuntlDNIsN<9zi}>tP_zC0*}%f*JK+DZJ%8qR^Izt_!yi-hcl7_~
z+|Y4;bOcRKf99e3w<h9)XLCDA+b&Z*r=@y0p6L5*Hr3`lirFl(E9owZP5&UTaJf0_
zcrn3kCVXKx{?|T!ESbsXbIYfC$o|w3&j|}AL2ve8ReFr^S?}03LC*#E9;0(H?gglx
z*;**YMe#T4rmC~SPU<9P(;DohGGCLSY=eCm{}Zn*s}Ks8Q^WeIW=+e_R7S$n&t;oh
zo&ArG`WLyhc<^9@XQ=(fE3xiXs&i5jS@9i=*@8YSghXeCab7xRl310HZ@?fQ<9-Wy
zJ`O2-_mZgq8uN>{(nYYfVYs{^j>R*n?=*>A?{+R+Jyi)aB>&<1fr!aKbR{9T2NB5w
zqE)wVuR*sBtp(7?@#jX(_;V4pR~iOafg@kXx}3R6#lN~BI-QJ*>sjw}^{mW~<_vw<
z6SW-vq|nWzV0#?la9M}Ar~3$$vidisHNOp|xc<4V&HsNHttVc!*qHr`y~Z+W7o+xj
zTv+KJbt|D8lz)`NoZa*Z<v6o5W*se671#noCcHijpR@sNY_~-jmj_s|J-fu@jJ>AM
z7excHImn{$w-3o&i&=}lD1UnF=3CTWm_E&i+Y)fvdj-<<;VLgL*|3U?j03S1fB(GM
zwcfS)^>lmbE0(zi$^RvWjfm#0EVzWNaQ4-&*~T?AdT}SP`@TYk8&bsm+pY1CE(7ET
zpNk<IEkVTh9eL$~<F$=<BtK-2yz>lUrp&3&p<9D<jrcV573S(6%U2LXh|pyErI_@P
zd*rg&<Qqt0A~0}h<*}_BKkijeHCU0Arnj25>y`H`?)n(Ysrf#uG<D}Bdz59oI}3TU
zop=U+tAp<1Sfff~L7ZjZa|qjE!M|cB2!#PhG!bDsdbAVI93_&!q#a9oUyxqS>CLXQ
z?h8{5Gb01ae`-72Uc&@2AFD)sANPjQG@n@vt?I+@uePYLnh|12aXL9RG%;+}$1BNQ
z4ogkV#w>55IfkxsDYJc)`RX;5uhh2fT%dP=W|Kdt1shc5UxDreaFnEfJ;iSZhz@{L
zggJ<2sO&*mnY!V0tag)6GnGK@52^N)FuR7Fb<TKwtsIhfB->w-O(m)7kkH4nGI68c
zKmtGBOfb#W5%MJ_0;_qgT;6lnAyr#5%{%S(CGnTzlXhqjGqq8!9M>m)d+jBkB{RL&
zT$H1<zYQXy>8M-D2aN@!!J6sxD=!~cH@P1d%p+;e8|RKIzH_bqY&MB?=y$dFT<P1?
zbU17Jy}7<Ut~%h_*5O=AA*eddzdo=vtUm$s4!Gzr&GG%-M--#fJn4J~yqzq{RXmdH
znAS7q;<^LU1v`r+CX+XqTaPEuScvYWOI*>`#BZRECW<y+bslyhVf^5}p45vp7!UnU
z;Z0BqwNo`2ZgW|$J3A;3O?@N1|2}$<=g@j0BMQO_VIjf~6Zu^BiEr3B$#zNLm?~Ew
zMUGI7PX+ok7&}vCmcs}58Z4cUfdu^CbwlZc68N{^SdM82goMa~`v*7EkxndhlDj=v
zlbF9Wph#gFxHLpc0kt$laa>Ca?oK|q!UAZ<Xq<0Sg^?mZ8ZZJTr;rHbwO;Si)@b~D
z@&FdlqO8njOFTm3!MT*85UO5PT?CQPj5_Xa=tN|?1#PtsAoMK)@&H3(atewtS<pxM
zd-YIZo>LHn5s-B5uV8eM)Yz5QAG@Ka6&9Qp2T~vRL#2Hq7}JxK<`;o{Mk?=<8dT$t
zpT%Db&ebGvvr?npKBA71!kq^Jgmj`kc;d$lsOpzQM1P3xVwxvLAB7VGSVI|rtPiB$
z1L3wPoG)<mwi)6hIgbHh1!Z{*<V(7M$H1;)&Y!4GX<9X?gpyW=O%_NRHq<T(S)B`M
z>$K!F4X5cZCKU?;XNN`OGkoeS)qb=SJddCRSK2@*h|zr|+K_<R>Tgh+@TuFn^mFCE
z-9(7XX5+`V<9Z7byKZuTcc%XBn|ab__^7wGHg9b{q<k5{LA&~!^N7It+s|brLJy$3
z*GZS45d%8<`B#-0kNu6c^J(g>T)T6>d7m`h0os1C-d$w%i@zdcn~0xDR#eZVV;p@!
z#{rnSrB()fcE6q;UHh}oI13zJ+qIwn27MZB*H^z$c$L0%C^XPp;{xtRof5U(;z4WG
zglzHn(a<oX*0A}lvOBK%d0gvJx=5^*#752|r=8`U-KJ+AcL3KuZ)iCc0Kb$KCM1Gb
zTw+CDxqW%ZF_hYFy;6o~Gx2WbdUUmGXtyut|3;*YjV(7v8>YXz{<ijlG~{-&#g(GV
zks@fT9Xgxj$AWG+{%okh8^+sK%$v==MW!0Uy5@zgML;Va3>WzslrxoqlT;i%ytX${
zuw07sX1~t?`q5dgnI%u(9%?)B>w`(^Sw!Wc^0JC<qs+5?RT?rVBD!y{LXQcrNjoJL
zc27RGv@R!CMBVHD%Rw9e6x_#Hu%V{(=IzarO_~y%ro?E!FHd5^#%tkwtquic?@eQ=
zm0|dCOMP9-y6+^!28JqlA2_&_MK~1EoH?sQHD3z)i&ySyO#88^>;*WRkiMDv-bt3w
z-;DF!lp(zAb+x)uqrGM&xlUMawz9oM-m?TgH0tj_gnsyD7g8qe7W<dRZm0VFQW2*o
zN>8BTRo6u;tJ5&ld$Fv>FXQPn<DCmTJ7-x@O`AR9qOOSH1fomH&aWr#sg~Ktx)s+S
zj7O^K&ra^mIv*7~T^Viq%mf7<1de+$v*VJb|5o}IQ5TGbILq{f<#QMRb=@9P8u&AO
z#jM%3^TZ4uAN2ZJrrJBY;sv_f={F$ue<6*3IRXbnWy<`cZni7It#dt#M=IA@b=!6G
zp_l(QUhLPC5!Z+8dt83~At^0I$uEkWJKZH;v2IB5s$a--Xt~(vDwc~Wc3KhQrZ8lA
zaVH6s#ncOB)|xT3opn8pZ#ZU4gBpI+H;u88MV!MXX;dp-uQ)CWcZ`;?@3O={36(*x
zH~YmzC|B*5B`FE^3{-L*6dJwcHqrkQFNjsDCbje=4kq(3^X006mC$M?O#{w(yd_by
zvwp>Khl=?FR-5JVhE(*C+L0U>^+Ox<{SbLD`wvAyAIW!}EL${k2t8TMC)xG0vGP(2
ztyK!%sZ@hS>UGu<vA5)9rJ8$;*6>dca~zX6&{Z*rP@59<r^53Ey>jeIOzm))6bPHZ
z#=h}Zwik0{Dy1T77(?6rOpS3lv1x;Pveu4FJOa_>?ab|^N^LG7_v$nS=L`X6@d^!&
zF)L!{2oG}U*8oJC(Hy@=AtMq1AibTS`-a;(lQSe!#~eB31lQ#U9R$c(j)4%z1Z5fx
z+V_#yQs6m!IBLVVpNoCNvhh(s_2kyUg-OX6i%6@)rgz$0^(&USlkdsK{%t8_-eXMn
zFs#tL)odqe1rKnO=~qxbfKDu$+556DYZ*ECtw0|=CgF0ajBSxaN8L-dPV1_<n<H2G
zpY?Hu%$wxUgHZp96R*4gwb~j)YCcx0%lH^0rf`b7HbY$Z@TIIb_^2QDeIr=<rT(Z4
zkx|-mIBlcsF8em-%A1*&rOA`xKjdV~p3cDp>`lwqv^3)NS60;QKIdKfH@RSh&zRo<
zlr9PN(Gg#cb2xvko%^cFeR1_0KN+=STx~1k{&kK2Ywy*p#Jh$&K>1I(JHX{_;T_;L
z5Xg526#18@-(FGO&fWoE*@dW{U6kw*+yS&)%>??@{X>EZ*lI#Gb2IvN?d^NUoTvsi
z<PFNzA4#}StQd>VJU3g|J<vG|TlB@4@g(;~;uto@vhEH5coH)3Ls|k1(>zb7J>6&}
z=-2%!>)V{e=JqX<cs2q(oqYu&dH1E4Pm7Gb*I$fg-Plb#o=d3&cGR0%^al=E^f)6>
z2WbSDNl|~Fi*{|CNqHu8*6n%z@?`x2QPfK5xk<sAlQ%cByniOqDfPPJ<y)2kC6ARH
zbY(_j=*vS%3z`7{5Pg7J6p2T2+OM!)cdQmwZM+^XyII4{+6OqK^QBZ<h`?BcLK_V8
zv`^-VptsH=!uf(P-{w;hq#0MB1MP{n>4X6OHGcEgW9OgWybn!pU}$-E9|6e1qj`FU
zR#}#X^J2n9WhwtjI6)r%k9^Y>7jXbi9T4CDn}5Yp0_zpa!XX3@&k#eLv%9vyE??l>
zye({PZauCHt^b%#qrvlC8C(+?F$|+=%IDY#NOb<qZ)yW!bS}vuo5ac`g7!(8y$77v
zA=K<R;gYhx7?WO@$g2$4fI{Gyc%%zcKWr0q@JGO;7Ibqv?|`QDRXDX9Z7+f37*-^K
zE#fm>_|u7)DC-@0t@9Qpri_9QrPa@o3JxFJts}8?N7G}dBO4f>w8pP%gOoun832~~
zqYZuA2WHDKKZf^Q&i;bg{R$oJ$)B#4=SP(oF9!~h&+eO8A|y~d3i#z{NZc=<{m8gf
ztD%WsQSWI_*R3o~!}SnR3pHfPMl8(SEB+jtBpY3utdTUC!zu5Yq|0CLXMG#Jtr3nA
z=~P+B63Y1lec5XZ6C$M9Uig{_pcWNt;19sg*O$JoyG`Km(2ypmr>SVunLz5TH}_lF
z$42)L<#kE5PF-|t*eFl+ASSF|ltnji1p)@G-i#`6a7OPZ$xdj1v#9-|q*WbDzH%s|
zvGR;TQ73P<L`G(t0}w{09yIv6dH$p9;)vw53Av<}LXcNBUM$IC?3izdlQ|YUxCBD|
ze^`6#sJ6m&-8U2{ZpDigcP%c(El6?K;u74wh2qc_cPQ>qEI1S`?wa5(!QFbY*52#v
zz0SGkjC;qp|7Ii^bIxSW@A=;Md48_|3f9;jv3x6fRGI_<vr4TR`C9s>bRS1djrrDJ
zKs7ne8eSfyja_gNvFXy+zW^-2rHIlqQ|rqp=bbG|`m*VP?81$CaIb{u_%!@R;pnQz
zczKJk>yxqp|7{K74X<!=*T)yEwqRt09tF8S`qN$)$4;aXFuGm5IG<tl<r+M7zC>*a
zA!S!w6`6i&z{Xz4R-bUYL|ZovEf@iS2lWL_ghRb?DPM`eV3Ti4|Bv-Ig8!y}5Py$c
zodq7o=KkKicyMD{AU<>N^bLHy7@>tV7#Y7YIQP(YXJ6N_yauhX@@NO|jqI;Vp-J1V
z7^oh;7&Y&V9WcFKlpjwMpy=~?R(%H$&%04C60KZmNi=LF3Yt|im%D&{A3qhc%1!Q!
zvF@zHeXuZ=rS|kNOB&aEx1UYNPg5;3%*UGWPB>sz4Lt@eUK>4)L_u9RTjS)@IX)_2
z{#>7rl|H%tCXj;kw8e{Cg?08z=TW0Kd}%BlhXvSw4NqtY&A5Dc6?zZR_>;yU5-(r6
z?d14_Va$sNxnUNa27^`1gPn{dU1<SNeY{%}aBl4EuXDk+_VWat+kcO6diu(FZTl+a
zvcLh6cN#nLJR9t<_$$7pa4Tzv9PBaLDH&q}?zqU;;F(TH;Eexbn^qM^Old`4N`@gb
zw6IogVC<{0-zAQjed{t2{JkJn)4>TFI_Bqdwso-2BP~)8v`TV+jEym-qeC-vahyJE
zkXqI2@Y~XhsUPoBJEv83J|R6?RXucGqMmn^?x<NnBng@{$YZ5E*B;PmLM2t-wWC+{
z^SRI`Fc9m-l>5yJ_DC1l-BiKp{m>ikbTYL`-(8yRk@VGR?-j@PyoS{eb<5qcMvC|>
zM4^jJX#>9|U<@&T_$-;|-zgF%C1@itHxlNI){U)r>;zOz1Q&Q_sKr%5sJ)Y`YcfVy
zp*cMvF1UC*-6)_tvcQ)r!x%_rd2>~6J`_X6QM%CtW;6IfbcNw3<vRe`T>yYQ7+IwP
z^5;a`&~MB6j8&WEUpP++)yI3R)()pJ|J4oz@@Mp1o=homrE2lpB9>R`|9aCuWzK&o
zr2qJ@MnWIw?tY0;ei1pFPH9JyuN5CKW{Q7w9_6Pf2eKE>l2?uO+?yV;&nAQ!6;2l?
zEUPTv@AT#+7<S7zIiNJWf^U?#pu(L}3fPZs5GS#ApL1B4J!q9+MzgKWYp=0xxAB>u
zvqu1Cp9pmS_(``TKVc<dh|#@dH%`e6CeDanY&qoRB+B07%`Anr<RUAH^keCk+XG89
z(POb98d?86E3|KAv>bQv@N;O8097%sS6l=4bpck@*qTUDM*zj+>JzLP)|xwX19pH^
zH`*YY60(UTl$Z7J_^U6wEiET$Ei2ip$1ST^t*~ywwx+-3XWGv;+Y~gSMKe<MNe_wJ
z;1LHvIC2$4?Z>nYeo403(d(*vW1}a~(a3Q3sf>hgEL^3u9nQ!6{^U?8E7Fr}khEF2
zx}uWG)Gy!w6Je;b3Opcn(DEntHJCTHp3Hbt99tuo@_o9Qjfkdwe>#?F<D4W?qe3UR
z(1|5kNY%T3*};loS^j%g+l+g4#VO;TRUI#No!&;s2uoFsovfr`wZ)=r{`&K#yuG4K
zhsXW&pU=qaEiQFtb<tmtR9+QJ-4iQaj8vz^G3w%nO`r9yAB@<iU}?s?EC=?go_LQ4
zDW0?${&-;7hF5HP{3-an)wm3B^sa$T+4ES`5n2uLseV#*Nqg|Hl~%OYOs~N-kXOV&
zUj$^_rMUd&ApW;+>MO}Hdn(2)sA=+MmS98+txYwP0?$bHPh5iHt+e3|$FjHLC&LIp
zsVD^x0yxHE?f@jc!zp`Eg&fgmXMeSCu|@dyJaNh0p8J_Te_0VsciA`t&1gz!V|hfm
zUPu_%@+gjgADsNBbK!!hD&b{{Qa0&c>8AP6-@qc(1(w7jbdN(h6#*tKT~L2xmO2H^
zGHXt9bZGRf6LmsLdeArrcZ|}>y!Olz8VWP7<ygH(;dp+vv5-EUx-3t+EP)UZ4r)U7
z&f=)IRn~&KT(3hCl-d&8pgFE9_W^rpuA*Pu$_mcXvP|AfZ~m0WA_X=oY7{4dNEM5d
z<!&<cWo;N&O9uI--DDG3%F;@xr&$UQk3lnnfbZR(B0q;nNUVCucu`f18;|2<^Cgby
zukdB8B?OFQ9(2AvDO;$DsL*DfE)#Z-QBhIF8c?jp;yoo><v6Iv_w4x0i6vtu5<WeU
z3Y&>@%Wl2lzaJo0sc=(*3`pHHrVvj}X%6+b+b=t<<jPP;4)>1c-!RM2iJZ&zdkj^C
zUvN-b@~KZNmBmRMJkso%ft@Q^v8MWQ-t~&g^ZdwYB-=5z?IzUWPp-^tF2Oq**|@2f
zX>=Zhku~d324PD=$)jtQO6$2_$<z9AaYhImIcKjOVQ3;$esw)?i{eSp54u<F0lCBq
zq*$6w_tK|fDi<}NB3OJSJoY4y@*5z0fMfv%A)H3~Hri)mNJ|L@L4H%s7uEiMZi=nL
z8jI<Ef{q6K#%IE<K%$BYrGqMqz~5D#y-3gi_gN!ur;RoNl_?dSKTHyI1w|9;q`|nK
z0vID*2#1qnQX5@!*+60jEqK;(RQEE@zC--6sV(V^-z8@dhX;Pre3@E#Sn|qiLDG_x
zSO=H~)p;0uT>07c*ciNZnjJ4NvdTiOs_|YMtxGZQxB#W6vk&T$^I|Qba9WvMy5A%>
z7tGhQD*SS{m4+PFyNwZKvLrC_Fd`Nmi5q$^$ZJ(|9T0?kPwJ{YY59emHg5<p1|Q{_
z+t{E%{qgDJtI4Pv`#G4qA#ar08#kQ8xw2tz$MWA3v-T~wxt%DlE5xU9k~{wtMYdBp
zO4RjYBQlva+AdblZLU`}j|S(_*%W8=Rd)P}B{8la`$S^sHG{&9`i5iKDMrgk{QcJ%
z?o|eN1hR~!_N&~42lXn2K&`_YO>DbXC24Foe%_SOxgA(Z1^Rvm<u<F3VA;N%KP{a;
z9&u<t%;o}z<Xu6OOg0yH))(|bfD0VIIij<4!dQ^2#lMpK!#Oso=(+qpj_t~4CuXV_
ztZQ|E6$H5n718jF-RuLJTzvl0QFvlK=8R0Pz_de$F0I6d6*K!vxmBdl>FG^pMFZ83
zn#bY$)-$pK)4TbzKf1hCBmPD{sZ4UCiiTxnF3KXFj9d<!f0}+wm@w`17N5|Q5iUQ^
zY26|=UojbjjW2)NE<G;qH=mNu*yHF<m1MN;Zl9pJ5BA!Za7*`biN>~-IAad8&?gH1
z83@-?Gbf*_U>GV~K}uwLBZ~<jWjKD`4Y`*8QU`gxwMg!TJvI<>m@XYJ-TioDlDGwn
z3-v%o2>f3foow03`y=Up0mp(zt4rnoOF0QaLSK~cMLR_-rTU})J0^^!e<NRGhb9MN
zUf@)=6|Bn7qEMR!gzCREnTx@kKdQ91uWEdwTdA>X%l+e(kol5|fIt-wK!AL6HcVZk
zxO+WkUuZAg3j5`{3+DIgGEVW@9R;KRD$8|9=5JPA@Ll3N=1qgR02UD~T=piMEV|{v
zpj!wC41HZK3G3&}bO=0Ysj*B1PdU-(kF+L)`2ba?wo=JVpgtdn#Dyz<&}r%IGNYR?
z2#u#J4Vqz%x+i^I>t^79KB)seirYLD25|V8ne{EJtLv3mO~#?4%{S$RwP!Cb8FgA*
z7vaz~q#AmH<K~9+Sp6+ML7b0GPoYPD0gOv(mxY$w=Q_QRFU)8Msr?eW_TwMej6)a7
zt84Li3diUP#ZAXUaNe#rH04g|VRCS9a;N!S@AtR*__;YXwB>BFB>02CX9<doUKfrd
z<k=VOH4o24yq3H{HFjcTKY!`9YW?!lB#eL=2**+=u+*n~Fg+aBI%wfT76PisF@qFo
z_o&O-2mNgV))IKwkHlF;Z>*2gMO}tDwkoJl6L%7#<G(pJe^E9Ru$1wuQdi*;9?-W!
zw_z!FKiV?54pp=>7K`g!=+`OU(y1#!1M0?!6>kySurARh8df>z>ph3NArE;KT=)(Z
zSK5zP@5j<Bi#Bk?r+nx@)y!sTo^boEo8eYm*k-vWWa9-{27$c~KR#wYwIp#=2B!a7
z>8E8Euvp?rIZSCQqhIh)(6*fM;mBWVaj_t&B7a4%&Tqw?V2{-u`Gdwu)6f6ck>Wvs
zQ}lrOxhIQmw|Z7K^Z13<?xUZJ2IzK-b-T(c$D3F4sn#I0thK&6qphR?r-jQe7^r>w
zK^$39iqFBktcqmAq`@53hZ4dhrR%GmVCT(r^&-iXgh}1>82Jqs3Q(pfIDEaPC5S^M
zLr-Rs;pN<6c%k(5sp@o}cSpnNeU_CKNk8wM@%E)|N2}mfW+7nHY|Z>x)B*-mYP!@J
ziGLnmav7P{cn^s+xpvG&nYw>Canc4xzv_oWP7-3NkMll$PW_4y_}?U=r^x9x|KW8T
z->KgflKT})z%U5)nWOnHU^S@}P3HfptAqOOpmsJKqeqNlxvjpgQ={$}qlDYeU5#Id
zE41Z=<d3h&=3zqr?R$P){w;y=?*=+5kfJH4P&viQ4OuN}R810z1{CqB%gQQ&HDQbf
zXJyyaO0q)%<c(79A#}r3Uga`&MUq7nY&m|9;~X}<g&2zH(jUZ93B^K~YD|%i07J@f
z2jF^#tF`HZ*wfH5@qFt=?5cS<nhZA1r`ncWad{MJ0c|&L%Yx~FVMr(x@T^(-gFr}D
z4GQQI($s!H9M3RqbHldI0auIh{u+w;R55n_h<YDa{q+EYrCli{(N?h-`Zdz_DW<qR
z@h57YGG=}>dVnqJ?-|SZ>=TaUc5{KLa5)a%r*OnSV~x7pky;*Gs?PLD&6V#h!pE}Y
z$~!-hho$4EzjpzDB`p|0nruhbK#r9V!`OkU+wC3)yIP1qZ&`H4CjaOIbEey?N6^=i
zFr7ZguXrhsB9GzL;60mgXy^a#T$uzgp|XJ+%)eem&EVzBoRekMZgU2*IOxrUCSK-P
zjWKY37hp(h>!zMaIjxvHqkbXsUi*w{`xijR4_aF}!(6^4s1R05P)kYzH=jK`fBiFE
zAgGnBC=2BFM>};+4^9_O^#QC9z3zEOsUil$g^EUee-<^I&SoSgEz?&t9%OB*yfCc)
zC|-GQ!Phjc&z9J!Xeuz6{UG#*_W<^13581k!QItt%{w{1p=jj0#&?~GRz~*Z+`?rJ
zC=Xhv2H_=by9zRa7Hq~yi-l2J8W)`rk4pANbe&7sP|?oT!bPL6UmnYLS!^taPe`l~
z=Fga-@AT)A*fJ-1`hgLnV6VXAOONn(P;rsh>^}RjQuedR)NaM&P9xdDUqGgKoZ;#D
zwco<afPF3nPA>UI*SE*_L#!s(V{|Fm!^87oup^w)lv(GJ7snSq=dM9`Y3-L^(O<yt
zm*(Ju<i7xL#@=7RVDQ<*%g^VF<+!WAfU(?xs@uDVA_|4Mx2drWQi??m&3@jJE-cqa
zM)YVPyQ-!X<I%d6zku;v@jZ{&tB*G13qB4LsE`F~vta=ljK2WR>}M3$ZHzi6ianP>
zUBe_~sAX1LimHETQi9tzFNx<Nj|9DJZG7rbe7Bjh<*s}_PK7PrAasKA-)c*NBTH^&
z){lQ~b-MVnbCIklWg6@Cd7C<I+h2=N9?p~=uJUNqD_P{I4jDT=sl&vBetE$*V2>6M
zhzUUgXvmn}J7Hp~dyy`0gCcoFwE77=_G}W0d)@U<8LIRT8G8L+GF1DYGF0xSG4+3!
zp@sh@L+QY*Io5n))7oW~*PrXK9aJHmwe9k9+oE4cmxZ#D#)=^<M^aFhN*I|jqpK<{
zbmS9}d(<-c@+J<-jOQYyAoAsh{v#ie^*e<&j##pt)wDMgJi<M*gSXo?-(r$_S!h#F
z!;T3!Xr+B^<gnPdvRYheQl``n2csSI4qLd9l&$)@8S}UgM;3_m=K^pf`7Kj&a~hmP
zD{?Nx%F=(b+eI-M`km+0H*BOEete_B!ad{lrq;pO<+lJ8`86@7VYAcNjEAOn9Xc)=
z(}DY}oM|6PF4`tJvRl!o9hi~L2-!!SY8m|k`4@GvRX3Fa`V(uAC9X&J#jjF@yaw-}
zCgV~LNkUvTK>&$qWj3*~7-fb_`h0k1<NMMDQkz$-4lB2pY(;6#0h!j;O0IK*ScqPd
zBTKsN(B~p2rt8Y#X*Q!}+?$6=XuT}MaADMFRQG}3#Qe(j)EsN9u<ScJdd_2!YDY<H
zzLaia(8E57{!))OW#5oRMfe|(t5UQJe_z5WyN}^UETg|%y=`p&@Hu}qv@KB(A7$$N
zs~g9=?#i7@4*6y`C-af%ah`GcoRsFmVrtr5j;6|akh4Dbcy<k;vqA0Dev@pWBdv8D
z=7u~6QiSk|>#NB_aovYxU9G8Q_`^)BU)Mw}VgP;TxX9Off%pv(VXA%EyB-codVZqz
zoi08`*YUR0Xb}xQk2WtDIm^z7i!33*J3PK=_G)8Ad&GPPBlYv|L>#2!tpTmoM)>8d
zKBv`*3@e;0^E=auqS~zZ{JJXYS&b)mV78QWe;PCnPc@;oau(b?eHF8VQ(dCKktzM+
zu=7;;u@*baY$eqe@0jBX3^wVF%){+(J^I2XNK4j;BQ^;FMtTDR>EDwT<>g%t_nnb$
zTQPJ_G}X|w8~Ebj^H?|?+}8f+DWO^%1<SRn4Yu%Kw$^r(S9NiF<?!@G<m5DL{VA_V
zn;&(rYyVb9GcR*$$Suc*Jl?u36pwsA^V;#?-DrXNj&gG36~w}f>^V2Fbou<WMpwTY
z(nOhW!*yzHTH`HJzuM-usNlK86~R)73<N|87w54DWi-0`t}Lt0sp<`qtf{2A9riy|
z&B_Vc#zI0DebO&<KKM0P4jIM#*!d{%#ylrI?$gD7izzM3=!N&Iqh#D8A(q#OWwtX#
zQVyc_2FKGMl-fNK6RYRWEuP|RwkasL%{q>YmIq_o{J$)=(q^v;gpJbwev?Qv{xF{U
zX=U?`%HtAkv^^HWN-MhHzLAXYeXF(FMsL=m`SC_q#dt`%8dmea%9O95YmZ;%!+YK#
zdi-JvK@CAg$8JM(q6Q=El6%4Vzx|5>Yv5Gw#}_J+^E}pnlg<D7hJSDR!e8ah3?c({
zSwG#~q4bECu#~trR9F86V8UHHOZV!eWeu|i`Zo6~gYl>A<6s|SJR+4EHxq$3yC4L6
zi4h}r4h>0N7H~|MS$o!(A%2N+jt!X%K7FZoQ3U#$1gPnl76b+UC}A}*1Cu=)yGiHT
z=0H3LM&ZP-f~!NfA{x4O{MD7|=XT`Q_y{3YvHdku=IcA6$1swnOaIEm&f}B|_7`?<
z`Sa#@h1MqcRGdr_(-4|>G-OU(=Id)_*wTmd+7UDlFaebwJ(&s(ZL_8d8&cNFdA!TY
z<eL6-Zgy2eK4g~iBGtn<MNNmMna)|mkE~}+ir|zB0Y2sgVN_4o1pE=wjTg_>Q~n9s
z?Sd>h)8*gw)aF*7mc`75O<v8_)vyLFhNQ_R#oUCl<KPZU%1w~x6(kncedX<%OzD6p
z{Z#fkfQ=nQJx(K>=kVL<8iCaj>{tV#&hC2Fo;5}5u=NmFA;~1#z$mt$GuDG(t?IxR
zF$>8RtZPKq7OBHJ4jpjdFIpGX(GGtxUq;doLlx6pZH2{K3oP{ZJLOeKYHx#oQuw_L
zkOxr&iN7rDQypxniF;mo4f_T^8$Pq&v#0yHO#r{izBmsEm{1JqT0um<aoQ$GLS5L8
zjefqi`DItVH1xKB5s`|FrMpl+)1Ba$vPrFoQ4Jf3JDT%~z9x9F_i^t2^yTPYmYCz=
zOPB9G`GG$}?z}~{{hY4UOHkb4FJJ79(IROgL##>&bN@Gya!hkZ3J@<6FjjLYz*XEq
zaZCUpNi8_m_Fdil9eRmiWaNKd8=O<!Kp!35XHk2U6L`etP;+D`S^IkB-kaF@V+Tz(
z-m*c1vA!VR_DU)iElp+LNXTG|u-is+(Y)g8Lm+g)s+Wq{-6%oMX{jyfFCZ%=XdhRS
zKvGSGhk%(w=6V+|sYos}_R|jQFxW47^_XV8tFO}6b5>{^H+(v2hh(s=@UeuIF$=Of
z2fM;oiRF>4O8s@6n|SP9(b?dfMskz@_1Q@*z2(TKSEEgF;P`#0&~BR_#iU7!f?8bn
ziAEEb9~aZ6(1ox;N4PT<E_Lw2$IIb$$-)_3#ibSR;rqSx<OY+3f}&avk0O*tsV;Yw
zoz~O9>TIyp9c@>V_a^PA00h(~BN8l8EvCP$=xhl0?ED4%*|D6p-0J8zz6uH+ozp;D
zVH{`D`VTh+!t80+cSyzj@j}PZB;Lei*(GxW?NuwhZQ`eqNB`#khAl;np9n#Ow`q4@
zk)r_p&erB|*jm*P5FXk^Ew;*-$C*oq3U8}jnP#$uX3A7Pf>;H5#&4T86>DfBQUZdE
zn=wZwA%R`DypbrBq9KTfS{Bd|dm$y!2Y3Jblb0W=S378otFi+^21BR1`$Lxn32o<9
zF+s^?&a4J+wprh4b|`UD#!O0RA71x!z`8W6=CGR}o=sTyx`WYE%i<I^alh621#dfA
z(Y=PphpPE?cI!kaYaCJ<4-;$H4Q7_5rP^h)ea*P}YA;F)ypmR!<?SIFji()#DPJz8
z;}P504WsA0W7)27bHaW^iX>4R;Ptw7+18tRd>)o~Q@OZDQx<vh&MoA8ld#v0LSwEy
zh_dN*XVmZN6}dtSJ~{3rquKl-^6Da@nV7Xk0!wz|oyX)V)0tNMj~jejA2*Bguc~n<
zSl&+yMrl;mHg4!>mG@}VlWLUbAI!z*dhD>ehz41~X|92Mo@hyho7@^VRx|OYBP{-2
z+sP^4qkMXpr*{d|(yU{ZKcntMTO%oN5Tl3YGsx4#xat{}?1+i~VaP2iHfASdTIgP=
zY+@Z4(v0!nA!E;EH4!?Wi0Z<Uc+sg=Vk}K_lmjd7!)&|FQ=O(73rq@s`9yn^y~M}a
zjtvyruvewCr>xWvdOHIjF!!hWmbJUT`<$GCt13VHb8Ps=)a@ih$V$30T}2Y=RDO2V
zmf2dq1-$O#2^Y|p#ipS!qZT78%gVM5G??^pd@X<yHchgUT`9-VNTN9G&m9vDaxxSc
zVuL{Kw(2zNiIYt9Ta>0V=JGsaR1Qx<Jn5BWY}+|LbO`g-b~H7rbY!?2@-ZA1&Lpd(
z^|0Edcg}qDQI9E}Al<@Xu1|?BY|}-PY+xCim)X%KOc2B$XG#d83FC647m|>WkoJSY
z(cuxjADwYRdZ+vi6{V7SlTvouVk(?<f))2fKeA?(ZZXaGernv)<0c7^u|mIMIUrXq
zn+r=BEf8!?akXoh`8YLjLikJb@QaxSZk|RG985=$2S7Ls0f4}qT}HjbWVk1w)(KWu
zohIG3mE4l|l!6+`iu1`#!&Qnv_tMLx5>_z7J!JLv5<34W)eMsM8WIBQ4m6I4yoB^1
z^zm;RCf}a3P9?0jL;`Dtc^GWYY7~p#%nCQ%=^KoV3S@8Db~Wq}rX+?pIbjB3IdY{1
z#gr~yr9oZYVs(N-wPwAdL}ENej@(fJ6x8AL2kGXYr>9x98>V+5^x$2;HC>D~FuL~z
zztAMfq+c=4MZTAVk>yve1{BGpL<v<u&?Z7p$ol&lITN4XcfAE1bCa@-aXca`D3+v3
z3HFRUmZX@UR`292uuKIvbsT5Z`8_W@ine!8^2b{2xuX+Zjj}Ij0IrB#*2eE09!;Bz
z-9}~yM1QyH{1L3)t*cv2=P9Ncttai6)zNLF>Ievde&_L-Ac|q)ot2_9!qE4|_)~19
z!Rd$LmaQhp)>{&vEatoj??=uST4%Wgp(m$;3C8sCi7ZK<qI`E)trwFm1o@xDQfY1V
zt12~&-b}fccjz0Y9G&WvRW6nZ68e`(wBZGkeY`Ao|0BRq%l^pk0N6S<O3CI;|N4`W
z+(`ZAhQrt5r-Ko=fARpT3{-JG=rbLc21{llE8<{hndLwChRN){y0IMr`qGkZM%>YA
z{}9}J4<QLZOO2ixkV$!Clq%W;2l2(o3beyl?XOHC@$_2QV^)j9e+p<OE8CAW-!G{N
z>Z@RKx?X+%S>~TVI*_=?K&x<QZi^~pN%lvne>G~G47hGxpkxOH{($A<e)$?OC$Qhe
zQYe@|OB!uYvW-}5e_7DrnOK%BDzhU39B}^$rLtWmesjaCJhF)P7{LRzmaXK8mXsQ-
zbDSoccEBQ}rC?ars!k4Mrb+duUftGrVGsQfjhfAEzlXUuhPKS#7-~1Pw1o5K(CxL~
zMYCl8n7Pr^*beN#Cod@WeuvSFqs<K=)0(A*JX)Q{Z6tZk_>F)OX4n3}%m>A3x#T7D
z(Q1EB!<J6dS(UT=l!k(+$V6!?w%Z>UDs>v7?o@Bfa*$g*Wy2C)&7w}rQ1GhgqUGzN
zBc$Q3MhVzpyVN571zZ+&AtTIQ1m{ztT<?jWq3`E$R{eXM5jnB0gVbI8IfB9;!Y5%e
z{O_s}gE72oxNfj+Zs^n!8NvUbwJ2mHFG%>~DFn<BU<<&0!68}~2?WO8Y_$pmOwh2n
z`WHs%@vj9R-3pZUXr>0DQ2Yf*JphYyc5~${Tqtg~z->#{_FMnRVB%@2EZKB@_%?C(
zR$^dl&)2UFw6*p^D9Z%4qFdwf4&K^t5r9?|&(&WU1`bCJQ{>Tzh6c}^JtQl_2F4^C
z`#ULPBL`d(6%~!!C=i*+apHIYDB{fC;sD&|uYQPw&bq(vSkzB0z7{c_X^uZlM9k|H
z9aNQhSNQK5@m!qli(7@apnY1F6HF?k|AqR>oD1az25OFfimAPPsR~mO<eXm43Gl57
z=G>dsh*S8aU0bKUTelegSeXIuv#vOeu<sNOKUB@U5Ly<r$0Wjh@QXFmD(3>%oRftF
zo@I%Y6J2&><IR~BzB9a{0fk958+p~*Dx*7-#`O+-<e1+s(onIoB5briN#0HvUt%TI
z)$+yP5jV+oo;1iU<`&d*ycyMPSz+U)?E)>-EN?KCu7Wf?>E`#_r+WqbUPgo*+T;&7
zm2Nu~WV>TLI3J&X|LPC_0cSeix1G|S=*#ba&zA*sO7kO2kE55J@p(osZxnjO&S9BM
z-eokbY@H0Xsu!q$lc(KOto#K~GKSXJ7QP&N&W;)V)Fd0;V}8KVw%^dGp)d}oB95IF
zJajqT?^nW+ohe<m6P&I<%HIJKeH)=FiUYyxD@W?fdE~)PAGl(&EWVh}ec>h}kx-t3
zopMrhc(2n@?*9A^ro}Wz;9q}*%x;m#mkLznRI!V`3lJXGV-uw&nGIDO0(z}N>YaFo
zaIU)k1pPGM3Ee(Q#3C;7s6qp6cy=joy1uW%l(B-&hvR_wFjz|mibzcqn{GZ?R?|#U
zrkx#rO$ZxQpGg&r%$|dIbL?(4Q&yXCYq@M}T}B7ass0LE8ETb9pc2-AHM;9zsm(oM
zCM)be@HxocROxVMT^g>rlNeu?GVbgRw9YM24d2wO*TC~>jjhtM`;{_VEE(_RwJRce
z8i{=>ZidssQvyf&1n=3*y^8&?d1FdP!}sXDF=8mkhs!hSVaK<y#l8`yG=l6aYZSBQ
zcUK1lZ-fu3X5ZjZpNM@T`8eYl^J7J0W^piHPho=_KOxx$$h}yW;nqNIpz954x^4fl
zAFtSz^f5I^nJ>_O@3P+*&MDYPp+Xe(qJ8s=#EL|f;HG?_qkJGCNt<tZi!Kj7F$Z;e
zH<9b)`>zURi->oxC`sv+4<7eel4tuyE<?QKFKQ$S5Jn2V8TGhG@+1vX{sbC_h;&vR
z&d1FeVR{5b6xJ>VR+aVZx5kxtGZc_DLHrYR13q$G^p5CsUTjt2&u0fuU-LEzG?r`G
zX}QBlY~|ElwJd~_Eo<trN|=W=7Qe3KCk(lL2jr=50?0tHZ<OP-U6h9B)xWKacPMu`
zJ#;avQjAQVMwQfA;#yc?mJ5lQ!>!Rt9j5K%h@hr2en{I`-H+&K^=KfoK79+VwpwW(
zn~CW?QxD}Z(jBRf8u2uI(A*h(znfd8p6KA#wRg02Z|iRLf{fJj>K!s31bl^_0BuHS
z>DvDbc#>2q>R8OTJF!Iz6A{3HlZ)n-v|`+b=L2-$|Hs1WONolw3#=vtgf%m0p!%8L
z=aJKyt9Z9>4j}f4+{>{^&DLdCHBg1=Oj`sQZ_BX*bN6GU8=S}V=(R~X|FWVwW@xA1
z7(Agi4&$?~@|=#-mMYQge35plF|0Sat&xxymns=8p8Pg_^zHco7?K41_owN9?97h@
zuu^SSx}jj>3J|o<)>XZXgM_IipI<0uPAD@)vEsYTE@w7(!mG##gtZ2&G=J<Na9U^V
z=X#i}!U4^-C5epTId`RLss(`XNDhQ2IVFMV0x*gIAf_RvO;UVL@@E<VZEYFweKQ+F
z%BcS|RHs7PVEY+8gFU0sNoz_HDS=s2kuFkZhFYSKSprVoAvENeGGMpmtMd1>)#Xq<
zAC;Q?5#8VH^|N1=yoS;%e@+*jTxQT%Cble=#)3pf#um$k=xddIaXHZ#7{Mv8`E#li
z`deWNLS@8el7m43C+SEh5zmkwva`2{zd%ciVmYf@>r}+olXz6QG9TDX<%@q$!1pb9
zaMvU=ZvkYmo4VJ@{~?)ob3+(Z#9Y0X8-7|K-Uk*O=~8ox1vT8UtvW9%@@@3&*2<^e
znV*F}2_o@^+;|;RmdtIa=amChHHx>;LH$E}UN6K{3`U@IU_xkgm_b<ajEXx4W$LxF
zcMAyx`&-(vw-aElQ5|xfamBG1KJP!{aVvrsgw!Cx0g5)<Pu4CMJsWD;(VM>rSODoF
zX-RuWsKneCA~hMbYQI{g@rzu4^h)EFF$u{PHZ_?rH#{2A%_Z9wH9D;Q8O0xQW^A=F
zIWYT~e|A~9FInbzT(lz08L<33Hnes~EwK5<A$Wh3_2yRKp424b<%e*_<*g&dWtVOL
z-p(0gj;=*9n64(YG&^~Ij=Vi{b>ke3uj|U1@X<f>DYoP)a(3c&F&O#@p4&i&doxtv
zi4Bjk=R@(xyReM4jlTf4p7!8Jku0i;#xy>(8)<w0Q*IXLuce>hVXOzG5A-cw!beFq
zXqr89>bel1he%O&ikoRp==)Z2VTS9E#Z~&BjwgOVH!qjwBT^>lqnwTF65Mt#tVf&Q
zrHAUMg3_*Z;E@nTe!|xA=@Z*m(I`d=Nc<u@WGU>(RT(wkCKnCre4;N35BfFwWE;`S
z1R4dUnCUayGn=+oKhnB6{28x1EC$<-2}S?ZAPHb8EjONaXj^(-GF@L?Q{9@8a9yjK
z!+E`;4#R5kkv{ZVNTsi<vVH<TB#a!&SKGDLbwV9Gs8mfQC7C4<oiC*;y1mvvRQFEg
ztm0x*gWnP>=nROFXe@l7K4AN85N!Zxl52b~w&AtxMwFFbH(i@D)^8@Ye13_e;YmTB
zh!M@A9zBi9t0Hfkq%JAxLF4-B^Q6HhM|vrg<gJu_X9k6#ondc0#D2%&W&`*v*G~}6
zTch>+MJvg=aX>+nG*9?aQw2c~;WsYUh=k|p(Nt;<R}k0!NTUZH$U-^(R&wdJ#&$;3
zX!>hu5^yJ8TCeW7U~(q!tV(u3HnXo#TWP(MLF}hOs4inbWC*#$=~All^2-9;ZC!WG
z{f)~hxelAPrLq!StXvjZ&o<!fNtg_iKQ(<PK!=(0zCV8$iw`%7_|?2|&joY3R?O-b
zXo(g!2G_w>(MQ7@qz`G=%%mmcR9}j$VEylPWct%z2h*7N0+*WT^~SNfaIF3;6FE2#
zq#`vK+1Kdk?6f}4l{@gWs3Myjs&Vw0%FR3-WL;nlRbm8<WpoQZ64RHcFqqpa4iXat
zcEsf9?HeqxpL+;<<G)EXd~hYc{0^J6{;eV1TSCIr<+6b%6f#T!*yxb4gAk6dXYGk0
z1{jj8p8N>^?TZ&X0|u*%MJ3ZHN%D44Lu*w8GsCgeJVr+vQxYXF-#0DBA(wqtV<Rf&
zU8wM<)x+Ye$d5eM4KliQqWkc6BrTWIxK4+Fch*pmWJ$^I5PYgv;Rn;<k)zO4Xd1U5
zZZdr%SrW*gTRHwyRf3!Ualdvp?tJRBTGUg>b!(~PlS9T!$}qU@Gs6q*TS$Gya!UU<
z4BNqv-eEDfF6aEu*|?0UqSBtm?QSsDGe^}jCd`vL>qNI4>scH5*BT9kjU$eBx&nW;
zF>@4sBiJmHK6`$>kPS;SWSfrl%4+y*<5c>KfLu6UnFO%}P@4TFKaVKqc7La}o+IB+
zc9%zUpMgYEf7v6nhl>aAjgnZ~rqSC;E635{_HgYO>nbg?pGNJmY6+?cLo?E?c$Cf8
z635G`Uk)S&B)@Ta+0^mw2}H6nST$wuKKj$E=G?V`eCGFTu+{r9meA@yr@S|lsLt{Y
zPEkOn^;D<q>_<fBP>lwujO<UaZNAndSsTN8>8gvb1UZOZ_p%g=4H5D6m!@Zxpjnu@
zvlozwg_4TcU7!smlPncl>?lFovZ9$IOHo{Bf<9k|PZUy79HFppjq#d0yk44tJx6Ny
zlW4353O0zgPjj?Krt|ew6*#2qlhT`#3Pji7yzyxoBP&+>&cByOhf+YMoX)E7U`4K&
z{LP8g^S*dnq*-n**~FTS=EPUacp$Zld~kqyO(F_wjp?>aoAy|vG!J_nDI$Vu=J)u2
z*1Zs%+)$3@gR6_h@32cT;{M+iw+WCj5o*tl-xhf3?%wPiTqRA|v*!E-tm_4S41aK)
z2oa6I+J5ek_Ek>9dd{OH9JlYEuis}3{tLkU{p>T0v6Mc0y(D>9oDKPsT-$j26k9dF
zP`o!D;NuFZEy6vfEd8dnN!~9N6=4Ag0d-4wZ8fxumflnm*UK_Kt;kP58o380ca_fZ
z1&EPsdXzYR$@G(IPr0^&SWg>M_U~62!y0|G>|FG>TfiyFn0(0xtmp6<SZXDps^@}-
zzW}^tuCB$JnwOud5W`3L>lfMv^Wa~mrq5E@P2ihvM^Ey$J%0h8I=dde8uV#bEVG(`
zs^>J0MCbde!RMUj`{o@}lI4mYR)7<tIZ(&jj=@R9g1*fC2qnp`Y;tDv)QYCx0~Bee
zTL@U)-cZ3ugUKCk#$lqYGVD#BuLOBu#uGpG;ir?o07HKNA^+6pkjbgc<%9~xrrRa$
zVKJh*K-4iF0Bqu$7GhwVuk+!>+T3a@FVB~_lg;hSz5R)$URs)2)1u7v^~&nIz@V5$
zr`PK^Hnsp~*jJsEnE-gg(46VSn8G)?Jo!JZ*8ksq4CtsK?$<$QPP~Yfrl{_4X69P=
zJ92ms58-SQt9Nj7>+nVmtGD+dh)Oe;6AN%efMSljHYlMJ68Us{`RPIQC0WepiD5br
zf$lbSNcF?>z5gk19D-ZWwR#)F>UQP>JQc~AnDx{R05yk1x*}{+QrxV4$Dz2rwNEP7
zC|{9X(LZjQ-)`=B0=-zWta52>JqW=-j=iiE=UbU;DPHw^21p?c(4JRfA@8I?gOLC<
zl<<|d@JG~3^ddz`B~e)gUwk{KtrYJI_shln2uH;UG&v0=X2ps%KL3W5yXB+&!2?FX
z4w%#V5P&kfvyQ#AdLUL+1bTXjJ=S4gDWS;JXR4ag->$oYJgImvFkxj!m_Z@#STTJ_
zNP<auB5o96#Y|3QIF!676kJs6gX<zB=yO37?jbN}T#NFjZ+bG_q7fk>us%UGs>a&u
zOzM_s=h>lVSgohu&t5ZVCjjyt*2R^_mn3=@`OVM;A0?!a;_2kM!+a4!7UG2k0C39)
zECOExnA^|6Oy`U8{}}u1MM(#MMB?ZHu%yVnpn%>#aN=P2it+uU`KW|I-Yb>vHura~
z0K82RFVtfSbIKX)b=vuLQCln#>?cfw&UM;m&!%<i=1cCDW07NW@3)9*P9jO(Tg?%A
zKpOS6@2+^1y?_fi{R-`4FR0lKTe8_JG6D|~9zzcQJsg4?&g6Qxfrszu_3A|A{FpnW
zz5Nz%m%1lp9(fTUh~3+{HSvubn;gaUAqlx9g6e*#4bgl90}*bNxjk?q-^VUWe8xQ!
zuMzqS2+@`O|HHAy8x?|tE;`nmisMwk>{#&<D4sPiJ`MNPrw-jeq5Do!zC-A$=xPuZ
zbU%lO3v8HoE2l@I_f~=^zhGzi2;~0Y9N7K*=5)_D5(YM`7@UO`beUbkj>}qr7=ny}
zV|$r~&Eg~DJIyy`B3?^!N}2SDSCzNEDl~bX-2e#AGamo%iAeZQ*+m67KaZTF1*!^^
z9-k&x4Srn{;8zS}>tdQ$^lIfwVqa^~FPd<?T)ngoB|$ak`j!oV$_y+5evW#*a#^P+
z-a2&FrMDe;<ce-`Q#2W0)oG78u>GaVTcm3Np3l}7kxPut;_M1@Oi{h{Z%-$=PvO%p
zd18t#-ySWpg_IY}zmcwD3T^xRdsrQpR>ZfIo#WhzjF3TjUaiNB8E8S0C(0n8$IB2C
zbE}h3g8q$Tb_Y%M@wn{waJec3>bB+1c=(MrtMsVf-gWNz<A4R1XZ8Np9Lp?MHOoSi
z03I*}K6Hbr{6u4gU{zJTj#z<6ag?b49I@z`dpqkZW>t%5Vr5K#Jaf$P{M4ab3v0wd
zQzr#PK>BJ%LbcSF4C%D>_*Gh3_|bxHzk^rGph-@ZtxcTMp<qU}mR)0pMufZ9@~LLZ
z+(muUP>zSLMSP(>2&gx%cP|@WttwWnIk||g)sVxkZ6~DG$EPiqEa*6-UA7Fha4eTq
zR0(?gs4P4=-Za<gxKq~XT9m&@^uBrV4l}x^4fV241~s&8cE8^kjfwp?mrz>k$Cfl1
zbwMXu_4ukIos<&NgAR6G*@0o>0tBYvd=(qj!X3GoWhF-6IiO12_PyygzM5-Vt7tB~
zh<Ck3s_F@M4GB9)CG=`#Gt6?6u(9#Rf8Yu$S?QO?`CL|8K>W*Jo`DO#QK(KiJN{ga
z3Uwy*G3f|8EPDO{G~9jA4s3t?+<IaG9Q!^@Hj&ox^69z_N!oAk%BkGe=xywc$p<Ct
z%l>Kzd!x5!L(0fUk$w)+#lF^sCA49k2DwJTgSQz!3yL7-<dju&DJa+nT83nV)IcX~
zn)NJ@j=PG0Pcg(dZ!A&A_KOOhB+@V-!z>C<IgD9C0)Rl5h`<Do7|9egbt8bC#hF^&
zW0^M!<XAoO*Am^FhDXA^n)K_@<?$o?VRJQwtac`gr{W&RtWBp3P_ybavJ(GJrtKeC
zp-yCg*AM_ogzIG<!e9heGe~#))ad~1uzNbT6O=2jh&mHX<t~>^R(+G*q!Tbi?>r&Y
zq0aCx6CDvJ=Tnk7j@7FG%yuS>0|`6+O?m(x*6o_}%wQvebmSQ%#K(J4tPAD(?hrbg
zo$&F`yzkjasHy2})S6cS7Foy{B3cC4e51<-JK)Kv9GG?6wq#>{-*Wk+=s3Ip-Yf9E
zPf@2sWfgRh!IwcAW^zx8N@_b{P}`*}Lq`~ukq|-&FVJy~(a=~_tZG|kmS$a9o!`1T
z{~)SV&<HVx*Mb_S?PLvESK8Q@)`py3(+kXZ;`*1PJg5RaaDFTrFd0XeqBoFO<X2dT
zlWaq8L1XW-Y)jsGJcOuT>%Wf*lL1o53Ex&s&oo}4PveI3M@O#_q6jgIfPgBdEp|3u
zb5;JdMT1FV@CIcA;dPKOFt@F=6b1vLvya&IgJ6y;#|;UAGj;llBlCNT8iZP^vg$$-
z3i&q7Gu%D^G?}(==sjx02T=#GWSI%DqN33|JHv>_A?Q^0Hv8nwSDA7tpuPR0B8<ba
z$U9ihFz%sQ`Otgq4`}Yl%VG6;A9ZhE>pp22@4?vhFQ9{2?M>AGP1+&J+jQd{#Oz-|
zZpx0jk^H6oQ-LCZe*q{|_W2uLMQv96Yj@8qQ0MJduN|ZVgJ&>ZtGLh_>l-DXvzB9u
zJ{c*ymsLvK3z4AAi1H7l)(7KBma`S2mGeMmrxEuqs^7S!Nm>q$6&kJfOT4hI%c-4a
zF*RJQ?mV-tS^y5-I$|@tRm4vb`mG#5XicKPi9s^&%m*Qw)|p7JfW(0-s`_blJRj4>
zJ(BBP#}8PGRyZyAu4M#$#Jwc`0v=g9sxww1ikv7$fyZ69WzTp|uX`VuJ|1f}&in;<
z7hTCX{^M1QGuF9{!HTXhneYv;O*T&1;F)^K>o$|ODbB48NpE+d*>@i91a>vgy9Hf5
zDt>NzV9L4>WAJ5fQx6${cp3Nd=gGxdka_!~m0XfGNv{a(W~6by&iQhi7OQj&sC}ry
z88WT-jPZok^Vxf^8P-)w`N6yk3|=koAHGQfO=Ua;i|c!7EO(Af_~~|lgS<U5{Mx}3
zO?cR4zNk0|%o1J@^F;|DS%j8!5Fbq*#E~8k0gh#kjB3IbAv4EE{TAXw=8B^cG7F$l
zL%`+l?ekg`{&@%jhw$&fz9QuP-0PF^gM_4sBELJ`^M)N^4=h3vYi$7t(bSziYFr2^
zdvPuD@jaq4Ncpt2=kH!aCH%Sp_P9_locOcTk&p=8pdfDSNvuaEbTnx*th-{eNI?7}
z3_b~aQ~W9-Fe3ihi5CC_ke$}Xulj#K76nt1W#;tI*19fw?;@J*!eI(rCkS(w-L!js
zUI>@aSo*cV`}ivWkovrhKn9+54MF`THVEnybfGkRMc$%@uuk4Bl=vzBcheIRIt{}7
z*%@VkptA&~NE*gGE&>Pq<SA>hyXkSU?$2<y5eEHUg*IOTuwq3JnkXWA;MDD7@;EpZ
z8v@rc6lPe_2qR{Yz`Jt<003{V*$IA-5D7Inh*yNTR^AmluqiqSzp&Eg*rG^6aGvd(
z$g?9Cs0;wb!jbwspIiX2OBTOzh>DC*iHW?3Bp(DIJE0MY|J}vC=JZL_Yg5EEkF_~?
zuQ@1(vsXg1(u7<(9zKdE=LH9aqWKICtEeRRxxmF1iNJ#!+V?u2dLw4vBFu9}q(JZ7
z&!0c4c_D04|MO$MkrC9p<hL8W5J=G2Z65oo#IxVz_3YqZ0Ky!CYw7>dM)cqFlCMI3
zw%|kt4#z$&>>tT~AQv{Rt$oP3H>zK&-Tin+%58L0=`)ts>GC`9=ErO!q+d*YxAA2_
zemua}-|AMXDJJ~P;Qj8QxDCGcVpqU{xLD0iBCGC1+CN+pEaSAR*DrE4Wi&TbYXlz*
zQfu({QKq&oe%RS88uZ>~7ddDq)ll&+$a1!+@p}(NMEX%}jleLc898mmW_0Dg_1sM?
zTH0qnbyBO{Z3VB~vPg-fN*(gStIPj90mQPkU}~^#)l!ZBLLP1IPt9FTTloXe!I!Hd
z2_XHy_sISuAZi#}pUf6OAAd?-kzfrm^pcH1q<1rN$5vHi)oU(}Zujtrw7}JNW2SL?
z2Q5D0DAD?gfDce3!K7e+02(UPKFogO&`;|H6Ul+1*B5&&6csVQGW&@rdM<O@S<~%x
z&8sXckneM72WufPY8A}q@o=iXZ3>w!82McI0e^O$NoUnIF<Ez_S!1(rtVkjMPa&El
zr(k(uvP*h0a}o_e6Su6K27rW%%i-leAqZApEI%3(%GtBA)8lu?7fVZVdxt_<rs_e;
z<7AO!JS6glls|TwTid02DA5%cfJnhRGEHBSt+nRvQ#YbSr<eYL5~_NR8JO-Ua5sIC
zS+BZNAkhZeSNuBb$?o)HG|UeJ*u?0fqDUm+#DSmpzOU_=EWCI9%rEl>HWIYzfuvy5
zl}R|YcU5-OyTPBhkH*;E34}lNPe;~#3vG$~>-R^S2i183{K6PRt3Rc#;Fbbiz6A=(
zZz*y#8D;}rjOP?2A?#&xaD|R9za`QHZ&eeze6b5b&6Z}7phh+B8^9-#0OTRGM0`gM
zxIX7S7{HG+;3_U|S_4Jb>9qMm`20Ibo!A~LuvA!*U3m$r=V!`Prwr#H$q7pS!7+OD
z=?_w8as=adnurr44S^!*tJ#^%;XKFTACP5@*-78>P}i+|`ZHXx?JBM#fv4vGvvvKI
z`a!Ehl8VukfhKZ+{ieB26HRLgbFa0_*PA*weanJ?HNsqcD!UVHK4(e0V~((@xH#Zl
z`RqPSJzKb;Y*^KPri^I4ZFF_okX@u+@Dt-t27}Q3GQIoP#azYM1|qT^u^um?9iPaR
z<~j8Q0FX52F7y*E*E>;Ve-Ne$deTmo)!SN!0%qPS&oTdd4(~%@X~ZbiDsr8-g>)B5
z4w|Vc<1=>7@+{c6c#Fg`@+sXvk-X1b_)N7+!B#q%0mFrcN1T<iua7IHoIR5j1cDq3
zoleM{n?W0E5pxw^XJ?=@``q{jEf{KP-;l<4ummCe$)}!+oLkin#lx)@40O6Ju5HN5
z{{3G|3q?nzDmeyRtu+j|x9W!9bJJ!uXs~Nj(AB*v)IE$T?wO3N{dK)jY}Qai=1Q)1
zsn+^Evl;S7S)i{u8VwDHfvqtiJUQaMa*K=d?~z5FM^fZu)en2xi4kfS%m`^LDE4u)
z5z1u8H1U<on=0G|v2rUI=M=m`Azh*?<a$vBYrXSBvVRI2ahnOEm<!QIU0WW)0L+O3
zhE)<q-hs?OOU)~z)ifdoNMJ1A&UYKoNH<4H9VZ8w6b~ng={?;!H%Gzt7HMM(jgyFW
zmS|l9_vz&OPhASJhSeU)5}0)hC-r)^sp!02>G~5q*7BXssF<da+*$6O7r_DBhK&=x
zkhAAARAHjguZN{!p*>BS9Y6HKaRGT;0xb4}g?|BZO<HmVo`x271=M!1!m8FaR>L&l
ztGmVvN`}x@D^jVZW^$-Ek#vW7_09LXI@htn-(SI)7<6^<s0D8LrSB9wS>|?$N7L;O
zdzxe_dv5znzEv8P9x$U?rOAxy31M^am@GF}wa{w|yK|ptB*_*eLsCnZC5b_pt|IPg
zcze}BN$Ug4O2aKPz>gqee8CzG^$gGEYUq?Ll;hl;xu;g-C`0AD1hi=c63#(Nj2A(C
zT%7UlwR(>YQyKt>e7%1*O9Yyw=EO*NnfnlR&1%hMAH(GBr}V4FU)x9jr_1M90otP)
zuI)ZbKJ7Fi7EnfgoL)6{^q1{}V#}mgs{8OogS|l@Uj_0whVT~<$1dS1n!6wI`Pp~k
z389q<<LKW7AA;p^jhxZg;jckMzpnNFEggnY?F!F_3H;A|7)z>>ztS|ulJ={Aawp$u
z%Pt7Q32yx3)Xmtt@RfGm96%8w9}oQtxHy}0rpp<Q&8#!jF??!rF5x_H)WA#iW*%nA
zXalM+PA5)q<d8b?YZ-+xMMo3zM3Q(5wYH&z$@C!f)(26Ru-tE;KRS6@0keo6l@6Wh
zJ?6dr!MU<b{`stp1BEj~^l1F|l?iSELZ{QCfuqYs-9E2H6MQWgmv3S`(HWhR!s?}3
zeCR2@^2a@HysvJK^pvE*evI-493i_lKU+_>3IPS%fv_Jj{#-F-LVT^<>YAOt-r&wH
zYI|q8to)-Ldk;QS3E_qkZH*jO@)Qi5j~&aN+=3-?NfsnSiXC7p>ro!hX@nncTc7O;
z<M>VQ+3OQN>sw+>5vRQVz#P|2PS5ymZd#+NYy2HAPvGJg2Aa&*bMxY`@U&@{BtCzc
zk8cX0w5DWe=4AuAb)cIl82hbil{kC#@;L6X{p^@xzck*60Fxd&T2xWJL2=|<tQ8ht
zY?Zu?9h2aJGEb3^vE2F8T{GKU$&Z28sn>(K6ojikJ>SJMHc^C+|GLT9%#dj%h-=31
zzO|a~*suR_ct58bV~5Rxy(it#2c&PrGM*3Q^tD@|K4Ee5P9c{2L|0&0ZwQ<kTRpPZ
zHx}6rtlzQu!J%T$Vw7=M=vksL8CwuB^LDSm<yx~8((pXSDBx4Q%dXKo%reob`jOUE
zHH3E}YL$_b;!}wibepm*ru8o%;N>RU2b_)1Q4Yq4*cT9rHK`4yGi1=jc|`y~CqF^d
z;$^@Vh^3Dz&z~uab+2LEY1D7^-gPY@k2S4Y`O#kLAPbx=-*S0Ck=M!I2%9Sx48%vr
z5RuC^qNGgW>o+;!<JMFi@vqk>{Xt7Iu!V8m?oU){SY&BbmcNvt@Ge>rKIjdAN2jTb
zZrlXmtQhq<T|5B&5AxnJs?D(5^A4qymKH0;rMP=>E5)Ty+?@hN5<IxH6u06o#oZw|
zv^YVEOA;uqAvi_*<~eJgbDnwE%$zl6&8L}fch;RN_eyeSXJ32&|G%vfV?Xs>hTw|>
zHyd+GRZdmPJ&B&lJa3BcT>YQ+^s2z*X_4~zq#S*7LaoZ=;#G5y8q^*IY&n`)0Y86=
zIjWY6Cbb|VxGCc?PWf7306?&B=Ot|UaoMOJe~wYjboa!JD^NJeSp8sX6K+(N%y{`}
zI#ej7^l@^d%EXAX-{#Q~VgfNoNQm42Sk{Gr=B+;PYGX2-M%=A3t^o2l$zeqD7Q$`C
zs}<&_?s#Q;+6^8#*)wHMOa?Cs3zu0E!h-a}J9?x>Yi21M&??fK)4LPKmDL}}KqceM
z6?G~3Jz-TSq+k5_#2GE{iDZQBu%@&^W-=-&{+s9($JD*H7>Bss7!mzXIF!^Av)93&
zp6}_j8!wxEW9RRl4U&ATsn~W5jr^|GFNFK&43>w#{}&KyqQ;yB?|G|<{_T2AX8G96
zEIi0JkB}SFSUtDDUrjjr^PGP<MoBN=jf=8y{6}~(1|cSULnsFCXF2I&xFQH+$pplq
zy5<5?Z#Ixu;7{$dA1^D-jGak>@^I*_xQZF8TMBrXRO*+sSZ&o6lL|NX*gkLf;jm4C
z`PIk^0cRCHEaZxH*3Bo>KR$1f>MB%V*47Y)jzDR~J}xyTXXk8Xe%c{SguLm87a;Va
z<R|%*WdI>x;k4fqsa%WCS2E|Fq=Xe5j);eF%0JvY6O<~{EO)p@yQoOc`lUfQ23k_R
z_@wgYB`p?ANW|qGYH)Op5hboyw)BF$bEdka@E@azOUABxiDk2?_O23>hNi^6cTzr(
zSDj8K(<u!HoOMGBQt!h9M1Jl`rYq0VZld%)z@rzcEp;KiC~`@+juv|%HX@D6g7nXw
zXNpPHKIJ9H8vQfbteqyhPB!c*;_k6q0TmT&U1@%1#;f_KfzK1Aw9OhVda@bJ$h>01
z+v}TL1#{Hre9SI{w3DJoynNmuF6oANTcarrTPF~Dy56cn&$0!4&k{i;Omp&&B14s*
zv6N~;huB#?UlfdH|FonIsgOzjeuy9}OpTgqmB0s<8?NnvAzD=}`^qcGQMDGtDrr0P
zb6pN*Azguh0;b|%v%g|n8M|9$L{^UBh&Y6W88+S_1cSWXT%dQ`(eCd4-;-SYlVL|M
zEOopWwR=t@Ay8b4u?A~~7QfaHm!bs6%9je2AH~cpdo@BOI(u@lSg@9NOiQ-pO*xS{
zN!<@AX^)g^2?1?Y7=TrK^nMQ$dWTEbL%15(cLCneo85b-la!rREb<c-0<69RUigx;
z%2a!jF+JCbDOnv*?$~Od-}6#nI*8VWY-d{?lBUXew%@B}%GhU}!f|QZ*_9RPr{vV(
z<UJNv&*IvHAs+l)Zl3`;W4)HbNw=(`F7tQ_F70oL^LH)z-VFRu>mw;<y*e*IHWc3%
zI=eyL8heWU$lIuXnbhx5bA6sk?+nC?!_Y#y_lmh@r6_N#qhOYTX>D|kX(We75$Au(
zGlS6uLO#f1Bx+RbB=hjk7BxC^$rK>5e~ZoI<b0kx8t{cG2W_9bAE?Sr`_fZ-F1udh
zfO$_fxN61H!@A!yvzi?AK$zcigm$VbJ)ob9CR#)rN@bHiB)r>uKl`Mwt1T9M93!zC
z|Mn?7aDFx*_pZnZayRq+rOE8h#WGrzle78SA}k*$uRU`!z|^g-{iD*uDCJ)C+<g_v
z<9NnvYl5)Zh|VP$a>RDcHHoK^Q-DGeK6KJ<7JWl`ny86OciG{-qz?v<f)e7iF5c6+
zZL7<4a9vC$PiS~ZmDx+aE@WL3h;baPHj!Yi7;0AFa%y|Wd0FjPlNzH4O|y!taI+rS
zPG6uVFK%k#AuiniU9SB-I+uJZt=(}jIXgv6#gGME*I9Y5<lKsE+1SUs`*XH7d8{|Y
zz`W7S)BtgDmYmETzgOr+s;ML9mVwL-=PrA9PF4!dZY1CNXrASaQn<DNIaUtpJ~Dv*
z=Lxh#ll72IvaOY(=%*s13Nhu6?C+zyls%T0>V(1sx;<vLR8CtKt+6hM%h@VpJHPh%
zt445buIAc84aw{c+y$YK7h|smLIFSD*=WbLrC9CF%0<!IKF3&Gc2F$AQG8CMH}1-u
zJ~hZLje8fCNhQ4+HW9sO^WOYq#AdjBIl=@-?(@Ip&+T6UbV)+@4v*Hv_f)j$^9r`l
zd+UW@hnBSxA>tr2tahW}&em$O;ZEwH&7}T*vG}AI+xg9?R-~TGX@$~IkMwxdJv|?&
zZCV$=4>ivmw=6JWoqn*hlNiaMx$dOp3#1)%t+^e)KS=;BNbWfIUG>uL8owc}D8NUg
zG~^sc_45W}Q($5?lvK5UXmYge-xt?S70PI!$u)w)s!s>nzE&Ts&ySEr;q0W?Je^*5
zcZD0iy!_f}*rzfw)RN2@Q2!i}WHO;tX_Zt)Ys#56?G%^hUYWH2eJjby9-GVRrOo)8
z9kNf?N+-SqmxiE@%pcv<N@_|95*;-Bwl@xor-D!2x~(~$lf2Le<4CzZ(f_2)yK^tS
zHsG${*?_F_o}@Ez)Amjsii%GIy2-9LtH4vIuQnn@^+!6}8T;c_s`w7+G8xAeg7qXV
z`AachdeW7fdO;76sc{~$d?kJ|{NifyZ^rHia$HMfdWslH9Q};vBWVl#OKtqYE26{-
z9#md4O|K*6X<3ue)iM&)tMl5G8Ug1jiRuha4l^DN3bEk@GqT(k&Wn$v4X1kYmX5GA
z-3k(SMV>qrh{~3ur$VDTv2vvFh`7^csIMjOt6-bJFU^J-?qR%R-!kKZ>#GA)_a07L
zfhN(;i%!LhMmP=Z_y|K`0qtkyZA&@~++hjM+4;J~*^x=oh;euKd~rpE`Es5v3;WO-
zhv5^we#u`O-1|@Z-%!eDC$YLj42VwBS(A}6$>`}>E=qY}+w*#|Ljt34MmUjr$YqEa
zFcC9#)=r5Tdq)0@G8$y-!$ZG${gklX#JZEsVtBJ-f@JQfM4lscEhO9^uE3J2EbHrX
zkBf%dkBp8-PothJ;zjy+RPIg;&Q#}A*hgRLI?4K)SGz_lvqA%Wo_$i?v;Pjsfu+f*
zs7E!u`YJF*5}gIw-PRM`f1Y3NI}-T`4^f(%0F>BQWP!JWXH^*u4e3`}GFC>6+CY5?
zL(}?_$=CLejm(7nd~A}n0+)5B;vX|*BvW1x5&?oW2<K{QGCH})OHFRVa=Gsc(MlGr
z6Ctjo+{KN;*LdwcF!Pp!XO3>Lk(XZ+o42+<ydYA(N^daOr&Yv$|FqzvsPs5Ti(e7w
zys?h*o?mJv+h5@5MICqL!^6ZpV!>M(YVKB7O*1?+2-7pYjejx15>XMop;Xq9+V8ff
zqqJCF^f+J0BYgUCuHp>0{aAaBy@QocHNOqV2H|!_gyKGj1)6}+*s{{m3Jm9gi<*kG
z@x!Yq8{^I7Z@=aM3hbcdG8X_O+1sL3wKf>Du}G$nIM;^X;C);GS1jeWi6cj>p$&=V
zSX#382DM~{$(|{rI(6lU>l1n27<U`QF};k#BPjG7wi`!#{p{o1*o-S#+;M8d>G+Vd
zl}|>r7n#z}tJcnh?yao$C-a8@=HHctoM-5fZ_I7Ifbv+3B#Mx|r!(N}bv~WgDL!{|
zt+tM;7Pf|Z;<_EW)wI6HOIK%YS%C|YCh;HXtYKbndcG;CM{An4QhDigd=9&(_Fc)D
zJ8Jg4CC3kf3zHBGp8ly3CO7nY!SIx2{`r;2#1FHJFyFD5nKU4K=;r<!XC<56q^=#U
z9hVYYQKCJ;Gn<~)>Q7If^a>jj?sWeJl!?Ow)J@vFB{jI^|3}rle|Ch>mBxP_z7nQ8
z!Ee*d^S!73RN5D=dP(wHdzB;A?~-cdF94I7BX=&Iyoq#zjHi2AEVaYCwDfMiXL0y?
zGM>k&dslDDW#zeoKSJ?v-JydNmBX^r{4j0faDDb~7clJiTj?2|yI?fyyIl{5YF(@c
z0B$4#wYWHMpBn_8HB~#frEF|x?iOD;Jej!WJ;9f~^9lH#DYu2hZg<$7`eROh`yy7m
zC#|3THI?&i_m+~P+peiX1@V7bQ1O&uk9t_NKvw6E!;jpl%VI&yqN)H&!c^v8z16NB
znfgcO;n}8JmCgtx|C0|xy|Hbd#NtER5|8Luc{a5T6Js@$5a+ZOEb`kT4Px)wB|)(g
zHsT*d_bOKFJWURmV5E0Rx9rneo2}9_tPK!OHb?MvdI~3^OePzIDHb2E);ipO{j#bY
zYbg_h!N|Cae-LYry1hx`R#Uj8+*M#DhsqZGhMdMzb%=JIqeSQk0_@>{5YJZ*Rd!lS
zjH<yy6pqxFtZ=IPDW5%M2fQvVky>xrK5j_?+1Ody)%A0CdTe)_Cd4IKvprs3j_$$|
zHD}IgG6mb$$BH>W<HW<Cb)0=E`nbQ~6}I7Q#~<;K63Rl4bi}4p$`E0=`)uf8ac75J
zbi?5DTI&Lu_htS2#GQW7P-7bfd2cK9uwPkP*)O(yfOL4H^Cm}0Z>z<LnXF?JD&vV&
zHu2@nv|Ye==k`tR<mOdndBN0|!A@K$-$0rKgv5`%0R)U_!I8rQbkAdo=>1&fBGn8R
zM={c#%|T^o?+8A)w_KeLH?lbeVx=pXxkpZ9ewrv4lh5Y-brPsR_L3p)K|`x%aN6yD
zuIDajtk&JGnfz^ZP;KUe`B`RZ%Y$@Whf@+<|I9I=y}?=g-7BEbH#?^~xgJ83QQ-uy
zRSfv36*0qS9BNRHvD+bUsH3XQ(tVI=;v_d^XKX_gA5L^K7?g+NT2*QBhgIh#A1r^K
zc~f<x>K*rnM1RQInf*d;Jo>0~-gWcG9syk>&&u;4=nsH2R;|bD+4luG((rYLg{HwT
zzA8xla`kH?{zOR1vgY}{ml(Ky9|06)+=xnfdekhcB5L|JM2o~k<eI;^-V<Fh({ZX4
zp3?HIo=aN1W{$I7X)xx7JJN1tHK`-OAi(~&(sv(q#Mmz&m?hRA>XcrnMk>Q|y;KHr
z-GM=nI9cRCc`J;CqPoLeFr_n1Y~hBp*c{g=l}AOtJUoXzianRU$&MmU{Bq9r6ycO<
zPh;0C{>!i8&-?LYioFBM2~)DnWuvE;FRI3u2#U4_lUiE`il8)wdyMmZ8(SoSI9Jz;
z4<(Tu!4<R`!dd4#3^#q+2H-s?p|-;1){U}Cvr0nfR?~(yund~1D$uCcwwYtINof*<
z6ja`gN{_nXfDBa3Ra$ZAg^ELgmqTpg%a@6XU@srkIpr5IMlL7Qm};oo%4B=WP&jz5
zbM6Olc}dDvjES;9K1n*mq&lMp{JTCGl*pp_hU$eJ(c(|6s*C(K!Jlg$KoRYM4K|&{
zrL4-xN{*(LgIP5W<9L$RH`6!OBlxWX+!TTyxlC52@4y$@#Ae3)YMU*UhC(UJzgITR
zBk<zBg=B|i;p#XP|LId1Qb~x3Gs*L$`ZF~#1y*pwRWBokk}}H?g5eLYeePum5Vp$f
z-hGuH3%Vp?)$cD8J=p~wG82t!`^4@1a`F^UgE(<L1&cUznriKgiW*i28%_A38Hp50
z<N}&%FGn$c@PGSaLqS?)!3ucR|1&>*h&uR3pQX?`^G09C4+%lmA6IHjl%1zO&OYZ`
zT_7(gqh+~YYPS7ITh~8!*8bHws30yxO?Qq1UxG#yeNm#Fn#Y*z=Otz)ep|fqU?m%&
z#VtbCWdGe)KdGc;{j$ywZflzpeY=NQ6BHlzI)AoMC&o#1x#bq2mMAg$ZteX7nwRgJ
zBc0(IBQ1ej?N9Vp(>wg~#lhpkoS!$sqryEzUc0>E7cN$!+!K5uOBdpvX*4KHnMM+q
zo7jK#=*#*(BQ5Nim?AB6@J1d-1!=_}MxFK~6GF`3226Cz?u)Pjen-2IOZ>ZAiRG1@
zZ_H#9S6f?|xr_5$Q$eNH>Z{9v(9i5+hrp-h;XASM?MJVEQ*>}>wTQ-j4(`VUXpmdh
zNbKQGW5w6kXiSN#{snN^R6pH$Z&S`xpE49xd-$>I0280z&Uu}7lOMMSG&nL|lZ+F|
zXO)!1jFaB=r%?_~jRD-Z!{=|r{>)IV#0x-T-l)McJ)abp+93EyZP{C^3{1qI#17$$
zdQPr*F^wU<5*73}Mz~D1>vbH2D&oV1_-Q{%LkaLWifr-u&p}Pofz@@U#hy0MMv}ei
zl{l1NU_XTB{V1H3cW9EnZnHA%eN<uii{&k4g%9R!ZW&L}ljOa>M?L0~elgI0naYJW
zW$d2*I5vubFr$=8`H#;s2n}Mq7!nvYhanAajT1+Eiz0(4TVi!d0HgI#Y~iXH=;JTd
z*7VY|d;@A`2GX2%E!zvO;uoGC)<Chry#|GTsOS^WkA%VF>omH~_TL|YB`2v(%Y5!A
zQ`i(&dxbJoW~d}e6DsshUV87M_Z)X6@h_mE{4d}U%dHB=zdm@Bv%}T>ppw)W?vwl$
zjo-R(68GMHPJ4-Woj4462JApIw-)g6t{)y4tCe6Zk{i-&0(A3)9>3$R_#*zS{oVyP
z__?%kRfxmZE1oK`kg=li1Bmd)W!IW$%b^xV>qJ8@oXSp;&2){L0q{u8OxJ`*RGPn6
zFRL}<V_wqmQCk}#=j}ms9~gWl9&_vt8>!X6K3@)=+v4H7apQa7Vd$aOz-RM`pGk;6
z@uB{+AMB$#7ML9q6g`X@(!m@xvp(OnzB5w{6w9a4I(uEp@Ou4J{A3PrnwEYqO7<5(
zny?yIhMn9wv^PBTr!(%(Q(B^U`>ndd+ks85jS7ta|5Y0K`5w48f~Yk};^vy&3fexf
z<*%~80n#qz#IK^_t?P8+JJ!CoEnOJCC}aF(R3aZiFF$vLSg4YGM+^82{s|rPdvcu+
zf$w~W7!0SGT4ZhoN#-PD`Twsj`G4$<X*j`$S^>5~QY)gd)Zu6uWKvMb(2#F|TZ>l6
zq`S(!gd2mmsF6nQVNsg<=FVPVpJFn6q%}N?k`M#$?U_4en;-UHK){iBI9|(drbCQ1
zO|NBj!!xHNQ})#?Qlqem50We9<Fy@GKK3UzmC10C5&9vvMl05j<?|?YN4n^G|7(#y
z?wsdOiz=ii+rN!`GiJ6?p`LWnKsOWIG`{OHU(OaFZ6;1P@@O1UP)5TE9JVWV{YJmU
zN#V(H|LQII<<qS6@JXNg`U}Oe{WoZ<5t@~J3c%OSg^SElB7M9lIar#KZYVeQx~|U{
zb8>Pj<hy>2?{r9<9{BjE;O3-z{{>7kPzQU*XTZ_y=7*w{l^WRDiSvgGdsvHhxhCvB
z$!oQz?E?q8M!_anS5r#i0$MGG(AZvidQ##nXMBEnF_}R|kT0o0WZn7`kc{mr_T`N~
z9wmtKX&CGX*g9*J*;S!c`uHeCH`CA+Jh2iwp6VW|B2d)@jU1dy)b}<!5=v?}1fIZ8
zZ>BlTw8s++EED4!m@5xI(d0k%$~YX>ZzvU2p(JGW-rGrZWCdZ&$rqG}oZ_`c%0>+B
z-IEs1ruduD=vt=^u(eG(ZkEdwEA)hyqiv&l*Q^Uj*A(H(8%$-5(2pgRdoMV7{|Mt&
z8d~$s9UG{YG&b8V@UMmPi>=SjtGIoNWiuc>($~;^JmPO=Fi%8f?Lr56)cWRHJmSh~
zHyCYzo0xW!CFu4T<VBiF)wJ5b4=|hg-UQ}56lF51d8xD8ZO^8rXfF=QRO6<cK39)F
z1kH7+^sP13a%8Sm%<k!bWmlEot(H`&Nk~aTCYOfysnqnZPe*y=_ySkv#8aAAR7<43
z-R%&3v#Zt{u(GY`YiD!%{ggDKc4hz0UD(Tdm3C7MG4?YvpD<tBi3zU`-#{v@@@A&Q
zYOB~|Ktk}Dyiw{il^D29_nTz)U(HF44Ow2zf2acArf?4h$oFf76}fp9pAUX3)cSz9
z3GCuD!__TjSw67bo1bd`;J~e^CJ%kFo|x+F<=3A>=Dk4p9&3j94X(Snorb<^;3^pN
zc@4_t_fKs8O117Z_R)xJx!xy$m+=5tAXRG2Y}fO`nZ#Isx!EN5N<kxW)p#!mYh_iU
zpK!|upIJdvgI5ZddnpyJSSG3LuCB6H*lPugt-^ZEk|d2-l~%U34LxHXTkUdHdlYz-
z*+F=y&GI{jP@5HX8x8=Ao@|1SJ{TJSc%=492yX(zw1(j8ON|(-M7<`heWf#`lNkq*
zs7`G}wVoSrWVTYv2jXQXstn6Sx!(L79W!~m6C3t0<l6c6p-Bp9(y?PVHVr~DUoWfO
zGyQ9PDNck}=?C6TWF%C-Xo~&p$9cuo;5*Kt^;>g>x1LOZRBa~O!}A*|d23FLdXASr
zU^X(l{{VzV@v33lT(PwF&4nR2%vA4JbFyQaK?|tru}lP`Qvk(p4Y@;tS5w5X{MuZz
zA1b8zBHjt>*VaAzn$OV_XlMWL(bn|`TJ%*>oW_4c@UP*Rvn}n{zXoC5|4QIS$G3hn
zpM<!T60uksx{WM6no^+rC|h;WDlv%qZ7$U%d$#UulKocNhEsPzuJ#)Oyp8n5KVC09
zDWQ$%z!pxph88CM7%XQ~t64K1oi-(hN##n(@E5Bs4QhSJRBI06YB)c-Av?`UM@xTF
zg-m<yvIox&{{mJmXlm|+4!2qhTk<?MhB%yWBx7NL`lBjndFdw@glsEN>cgZOR5S_b
zR1(`4kra?@q{MkZpVH*JRlg|6>G!*tJLU87H>US^H?h+$u&&u7d7A5^{QM{jZB<+S
zBcmuE2uI}^41CdMiZ&#TDtK(d-SxV%M%=k=#z76J*zf6A8|h2HmK87R!3eZ?eVcVW
z%`FX*eyEA0sOr$s_ryuc3OM(h`-uuj)dQhQUCBrK1WB0&mCZOtuNOpt{5E@RVZTjG
zmV~uQ0=~akEEQ=@sb8CR*;E2BKXKB|);znAehi>&1T~;lV+7v%<T2CfYNYDAt|C$V
zaet<?Ogr4ci%rEjwE<b?+_@ae_DI}<axj7+s#w&pRijXsy(7<U4U4S>ndT0J?=Rx5
zYKtLeKcc=Fy^Et=WHv#X+r>;-G&gX)5oX>Z6-vhWP*RgIpiAqeSIC5gx46S6cJx#e
zb3wMeYT5b2LMp!g@dfX<?*4j_F^Lo&#uP=eqD&ugMAs60ur&uXvpiyEAgT9bomZf?
zkyGz2uAscRM~74LOFht}qZF=0bcpl$$Kgpc&1*#s(UlKxKayC<FA~uj#@aen8m=0U
z@BzimWIiDowB$$PKI$l`SRZOf67hP<CVnp`tk{T{5y@<=(%sPdfNxY}=`mY14{u4d
zXW85~zIGyI5jf$G<1jdB=1cDwti0l;FDNOh<`CFdR%TB0YMybJ7@jQgzT)J4Cluq>
zFHSN{T81E5oM)mZ<`4d?f~Ecu2kX%xfYON=vHPn~hA@=f<*PN>P}^6lWo3@h5!0qd
zXW{<gWZqVP0S5I@3Xip?Wz|uxAr5Tv!VM`fRiQKg53?i+7Tcm>64(J(RG#GBw`Xet
zlAW@#CW~LZG@=~cm-NlDNX1Qf0wT<=x<|XNQNZn;f605an9+%kWcY&**(q|!weV;>
z5T6=j&e>0xFLk_q%;{(U+Lo`$dyOI2P$B@YxeJ`RU)E)<)oo`mxc?}WmSDH|mp7U3
zPYRErUS=3@XgC={-62dP4tC3DBKc1+Jv~A!g}(ske`nv+P7!>bL{nBIdQymFc85b=
zp_X>;Zn>-9vOQ=u-M!|!m<JdrhyXI4wu2BP=DHAmWghgruSH2qLX7l4dL|5v4P5$1
zbi;}ue@TrChXseih)@Te+Glf_S;<EQmrg5JeqHn1!)-F4ui`+bt*4`GicHxcL{DGl
zSgcfX2l#hhNdI>%tTzI-r&ls@&xuGb-)qn<4_!$iS0buDzI%7@`Mp~*o>e-)e>c_<
z)!6aQ-ju8GoMS4J=XKGRdUSLcKCU*whHsPbV76V#I%-qxr2dHJ2T-_BKp}IUZBLEq
z%hYV8;)@LlTl!q;54OIh%lzf6E2Bl=YX8lelqtMiJM!oEk2|~PN`v4WhK@<qE)D2#
zX|7(CYCgW0-I=r2s6UEV#Jq4{^lM3|$6&2B=%#RNl`behEp53IbsMUGPwg0vj!gon
zzkq}Kx96|WZ6~9IzkqEuT=eTu2|tv-8@zs@=*$X9{fUNN{G90w=ssS9?Dg1~vxsdi
zS13DlXWMfwoPpV|?b2MYciW!1#h3Fsa2TA6yN?7RUkJ9M0vdTv_)#*cYFk7V?>|0h
zO}7;q<X87|NCshWjuZg=jO_VwYD}7xAK&coDXF(X=S8Emjw}JSikB=7fepKH1{dzH
zW9RO<KL4DGW74gS>0M{jj!vdc+0n|js?DjHN5Ve{UFti6YWKJmE~2r5XEJ%H%?Tr>
zni*tqW51gLWl5h*hl;iwHu`|pedX}QFd>qC0et~igmBDW8X7Yis!C7(><*(8xsv7R
zCK8{{TC}%qyiwqEi#<t&jfJvmdSbq-?Jnc&+IYI0$R#65qgYcA$Bp_;Fj)n&#Xi}K
zcfV;Zga$EP_anmky5|Jjj6G8#K@w>j-En&_P{rz5aPs)7$9#zE)QdNPQbYJ3X=#kd
zj(ixaD1Cp2yknUlr?)}w`St2x$CDGz@VB{~n5JmpX_mct7q8Oq&c$3D&Uk*EGc#}P
z<hrc`lqihlX2KYNE19$JNh;DN<ZmQEbO0R3fyJ|#zaz*>Y_;)bpJ16RYW?0__(_fJ
z<n9qLMYqk8%X14AQCgh0Sqdn%RX1<<hZdz+fJyuXW+R4pJ3x%zDLOg$AsU0y(n_*n
zy*9b+Z;;i~&*D#VHK(X(-i<Cf%7`wI?wbzswbrA-Yq5IWT*LNBqDi5jzYXglnknf-
z);Rq4>TI);h}yFYPHgSV5~DC~1^a5sVgu~KUW#J+{XP}@<C$T4(+wacXe}AMMNWt+
zOl32yl^goyq7@Y8I)9tIZXhFO6(!V}((k$2?g$4AdoH}k3(Y+b^7F_V>HXwdRJ;`D
zfyjTaW@osTN*b0JBBeIR)DgRuv2@VOc+c!?>tu|v$CYu-LNCLgmK>KFDO;1LyBusg
zm}AMW-d1N*Yh!DYP@cnXSIrG~TWsW%vwXh2J3uI06yxZfs9<8Rs?g$0?QQ(hb^N7-
z3^G+`>PU@0YM5!2QLNYcL2n8%lrCmm4YPmWQxOAnFhsnjd$Ga24iEJ(Oyk<4pJXw3
zkN?bjll45f@xvxXice`(s!L@WPgYDq;=SA;E6ITt1&u3c6Uph}xf?k)!4dZS#>=>E
zLFl8g;Jk5cYpt<-zZICM<NmO|ppd5{bt?s8voy??kgPn#U@#aG{Fp>e4iDjA-DUCV
zph%qtHL;{zQh=m(S?35RHigiL{FEO|5DHTs)Y#yPeGTVp*-RPyg)}izLzSIEl|^lJ
z@+3aZ=6ISBG(E96XE26f)qW>i%*uRXi%9vPar4ucudzP9;N$1))XGMNKRdQ|^oYhU
zEkQI}jq}l-Sfnv<`aPl?tR#`o6cptHaV}oUCEptrzVqic*F6wU{+T3d8N4=EAtl~l
z!D2Hee2^wTprq^D*gl0Q`VS=@M0z>3aT1mFV$WE30VnHqFblVk66?(E$}=P98@S~!
zV(nK=nM8tE5N_8ap_8uWq>tOL1HR6U_$vt~JJ^S5oNBR)Wq91!6k<NpLzPAr1hz>(
zW1j^}wcClAc+8%{zGhew(-q;$!0>Apcf-472YXBtLqn`CO*>pI=6Xfk%F|oiwwQE?
zvNdVX^63h;pru2I7mWytnPoI1I<tPTiqpg%Y<RLbXe;x~OE33*W8R33hqFLt<|Dml
zH?BB2ScM;HOjN3n<*DJyt>UJ2FXe00zj5@ar#C7sE7pw%u4hg1TXcet9%ijXDw)iF
zd5isYcH)c3HlywecYrr=^Jjj4O~0}n;jzL-!XF+BzBG)oDk%Fr`zGx9fZ@US{o6ij
zYKPS;SxWZPh<0xEE{N}jM10Z_SsLrGIli{hU%;2<VrqoDea-Yca+?{FQ8qF}ouueD
zCX9Ixgwv;6DF>eSrwp0C?@w2B1|jcbkq(A!4|c4b&3y8+Bqn!G6${#x_0b##qhH0Z
zjs1rpHrrD!9n;KFQTe6dd~u=FdZ$@0MPZdH+1p31MOz0so-FjsY+DWYojGG!Fri7?
zA8<cGBg_#U)0A?}PiqybjBN>GpCV%tS{_YWg)cBCplYIHEw?+p61X`z;}zjYxu89z
zXd$=l?7lYk-OT>0j{;T5{2y5z85J+<FJGKnm%gAk75Q{BqhrdLsY)%^J8BlC8m7wZ
z@ztZyb}&8hW}t=SLr49q(D<$SId>;6w}Anx7j{n_LkpPMeu)uPC~)y6($~F}X9-o&
ztt6)x`xs1=7xWYdjq}8O5agqwbisJHfZOM?HW=J`{x8#q{#S<@@bG@AEro!CbI*c2
zJZhdrw6WR0#t^mW&T{u6y~a;Z!uFTBh(-6U(!LRLJvZ+R{he4d>_|y;HQatTmhbcC
zB0uwd-56PMXx{CSm%DJQoKm?n_K(&Vu^l1G_b-4!e>TAn9~JeQOLZag9CP>NTB)h=
zkjwIRV!F9}R6jn`u^<gmMk&78*Dr~GvUM3|Pn6a~vsYQDxaMzn$S<h=U`|lx@7i;8
zn?gqo{q9Lo_Fsn`ven_gCjAu7P1g;-Z^y(g78CVI;0T>TduKB4e-OrKbbsWR{RA~<
z-QrMpE#6_(CO+>IpIGj+LOls{c|!0;VJ4EF2d~iWLbZ+>rk2Sv14`_y6ZuK<JVLpn
zDuT%kaU}`!-t@clY_xnozBIbp_R@E^^EI#BaDm5SYo_LRyeMXN+K&-Tm^w6(O+(JV
zp|xu0rkML7kDoHaU6?DyWmFxG{K$wh6iy<2KHby$akJQndtd&xluLd5%r%BTM`_}m
z?y1)2-n~-kOoM4T`2FP9`ophkb8dC&^jgRcRL5Tct7)zbV}oPj6RGVHFTn*-BEa(k
zgl0%|-~{Wa;3szhB0&(_=X8Y3Ar=4tICeq8y~FzY<>b6i2nlHf(E9pbbow*k;p?)B
z#*Fs`Ql6Ybi}57xs6>GK@$3~zaVnQS;x4DxV($B_2>E1oR+d#$vx5vDvm^4wM8Bcf
zNedSOd}gyF8tU&wvJhYh=yl0?DC;WlZ0CU|$XxA;Nll%+3GN1&h8-X>*V+gAc>zz6
zt@yYhz&=zw0nG~Kr7pEzLqj5HQe4n0_b9t@x1LP!Y3rd-W+T*hGDz(&Kv>|B#WxJ;
z4vg4`Pax_}IXO8rClkUgnOzZQOfEx|Xt+7A*3p>>_TlJe6gQ(uTnZIcc$4;)+_~$I
zCuf2to1*1Q<We07qs|!`Z^v!6N>SNa$l45VMz4_Qu1dVAO5`rTET(W>(g2MLUBX5-
zq1;jNXf|MYNVo;qc_$=~x&lv}?H!BA5bN?t^|gz02}+`~h`Es5i`N}kdD!zErFY5{
zSdb5}K-*+az^-dMUjyoJ_cc|xBvUhguU<beStcjBB>S2R`JkYs@zaj`P8blWDe5cO
z?8b$kVmw7v?)<~syvfpR^`hF%-Tg^Vh(|Y*Z|Fu}K);+9+JFqY6W)N(E(+njf`2I5
zumwEgx(>#4`P9-k`B{rM81JsnWt}QK{_7|F#l?KCUu$USIjsvJ9<|2tQw>Ul)*q>o
zHK;RoRB)}11f@3w-!0gQO2ipLuo}0j7woCpef#8rM;mzO=dzj$7u9(6G8s(Wn(T$=
z|JdhNc9|;t$4~S^fEO(Ob+>o~66$(<cDRMM!)xF?g30<antuKZ`1R@aouLC}`Q`&T
z{$pUD-~4Z0<T8T=f>!({+s7@Rr)ITu@93Ypx&PK?HWcTH`#6>_5j8{&ycBW<-id?6
zz5n~E9WwFm5WWVq^?Ge64TC12BW~^YYVc{9PPs~Ahob|m!7SS7-u$aorP>Z$;o8)k
zX*{QAa#%wS{G0M=#FJi$(YI9M&S*=zw1^vAQe~zbMvo{;dwyhGb?U!>a23;Q4AFEH
z=5X;xNwlBIC-yr$a<u7-4DLB=bWL+7=Ve7wV%Mb)@ev*1Gk`1>0Ubzrh3TUTR`k6#
zZ(Osie!1FLm6entOyL%nSONLf*IHhE4}~RLgWHzP^d^Kk_k$k)ZY|z)%hhV&oMQ7G
z=Fe>XZZIq*wBpJ9BFz`4%k<sLFP~_&D+2n;WSvdS5@skk6xb9g<NM~4t((f&9x^JI
zjL6mXe^2|NdQoD<Bj6s#%J?&q3VSDRnqqymJ)Bk>uGi|yW~Pm#Pma$oby4JMD%Y%t
zB1_e{Z?IKPvJ)fC87a+?`&qDK0OFUk>!28K<d}vjd31Bk8z1lhP5BQcFLk-Sc$3n_
z0g3~n`8!kJxYRGd(nre8c>Cv7$UK>IffFuJcQgRmIrPs?C#f~K(qa1vg=|Wo&V~7-
zwHu?Gp5T=CW|=AE;(g6Bxuh=(#Y<mpg!C63t!&2+cV~O9mN80S_i2tD&lR0j*cn`5
z_49+!S_(@HaXbBhAC#iJsau5a58jOoQ3(+dO2gS_Jrp4JnX&djF2!eIneN_BT$Jh@
zzTr-=v|klEYrVHjqvqC=%&j)6n%CmfVk4z>3&kG{JSId{vGnb?^hB^i^OJugnOMH`
z2_$v^n^5yCyQedFY=-2Ci*h|Yfc>)dlaYo-LwZjyCb-`gi+)w|Xu|%p&J$%SE)6B{
z<HYxX0;+CIt!6Jpb&*I5hL{1y*@O9J!%_YA#89;gd(=3`PWP39`253&l%8k9*E(-C
z+v<D)?L-d&aEw;3_IG7@CV9OdQ2fXoaYneEk<OOWh9gtfi$cSpM&%^tPj8$)<H#Ae
z$Pt1v0FMEYMHx|#*`&QmBLJtkkk!VYo@S$k#9_s%Z8|$-KoL~aOJg<<CjS&f%(K{j
z{b@lPeO2A+_<o(N9HaY&N>2xsTkY&(E$!qyQa0hi!}~C~0w%)!XAxA7z1E=LoMC;r
z8_@VLH;3XiEq=!kW1$v9RJ+$-05?`w`?sjl@MKFh4ku+%)q<-YNmN=sf){|Mpthos
ziEGGX<fQMHF04;AYn?SUXE!Uxh@8XPG0o+QW)$NLxcfoWdFQmo1Ef2U`60WtLD?Ox
zNb#VmL=Te>ut9qze2I5Y&@-^<VA5_R0B38%+mn^05W-uEMNzOD9-2^!oc(87f|0*Y
z*RryTQ>fACro3b-+_&#@24Y|xRbvt)9z;)jOB4P>X&Oi`?&&8dCoP8=wk`q2Tj^cX
zvo?E^+i%T`a!NWF39vC?8!#PLUcSBR0Igmuou?23q=mOB<MAFzey+o({wkY}>fxoX
zxZ6`+o>^+~(;G1s_UgzGXf$!s!qMftWFjl|SW)Gsc0OGS&#W64>BAv<x#FX6^0mpr
z>hwb(_qN%)3T%U-#-pZpB*3wJ&b5&Rmnb&+C_7DEVtNZ%s!g1Afz-6#PwgOss(@$8
z9DO*s<A%rTk~3Y1d2ol{1nGN6=Fnfs+4YOd#O!x37;RJZt~gjcv)aeE=PzunKU~f1
z=jl{(y)NRE>z1s`&Nd>|FdW6blklz9qa|QEgwjjbC%H2zy<ivg?Z7_aD8u5L(ZqcA
zVS207GhTgU>Q}47G7GM!zdAFei>n*NWj0ZHg{Lby6(|6)ZgZ4sInY(M^z?dWEc-iG
zdtkl1B1!{T@_R$;nW@t%o3gVS$MV{GO<YwFxhqSeC$6VSdp1<&yUDf1NW6}Z{m(YS
zg9=qU8bJn1dO@1Kxg(Imia@w7W3S1P(IziNM!2VS^z@$!UsWQ~W|rq=Z9Y0#N2aYs
z)u~>hYNR7Ac!GN31{^cfiwjIzCt9I`0%x5z%}sf)X7!(Behd?|oeBXTk~hm|36-UE
zfX5T)xu)Lne<o6X6*;HIIIQ(qN#+%fR`5UYyb!O_@M48zdyN=2&8UV_vSJWPgK^5r
zk(W*X@cHqq%Q=H0K844wj5zyOtHPAbh6d$8r`EiDB05>C;P70yv}{1Q%SJz%@v=6Q
zXBXD-Cfr?JnTHdyay1Ux^?&%1Y>5>(cf)*D0&Cys$qNG;w+BgsT@H_dkl*F!X;Ht>
zcekhNZcsn8OhV{zawQ%;VjA?HkC^^<iI^NH5Vqcb?t_9At9fJN`QeTfQ{U&G%-XRV
za}%MqfZ=|I2=lW^M-*E#tFp4x_I%>{Ogl9ErE<-0e9jiUUJ2$r(hBj5R#~M(E96ja
zuQ-SacgST&*L5*RTq4u{*wARH>QCpr5C@io9CNk_k&lXTCPWPhgu^mi`Xk9Y$cm{U
z1Q`2q+p7^6azWTqvb`MJwd#VoEA%n!gOX#~Us4)JuFxEzE)qCvvAmd0X%5UmLUa`k
zG;O&pbZ`59*3{jU_oeza9Q)zm!^3voJ(HhJ5c2g^>7Tgm@QLZ0=|CL6e?Q02qW>_3
z4go=tvWyYIhZ&+y^jc|Y6&R4c!SW+citM1-y^gM<?3a2w#!?dn71g@gr7UQ=9gpM6
z6@^li4e!%epX;82umIF8c!-d2f(wV+!myY^3J=zqEVZASrLX27+bKt=z98x?8zZgg
z&Ri&Z3dxA6oqvzeI*7~*HzbX2s}2ZpwuAKj&@N>}QBOyEoh~i!LTpw|jpqNNeb1{2
zm1XAGd_?$!+Y&>0JlbiqmBlKN(q3?3xDX9P_X_Sc^4{O)7drp(lll|hDsgY-@ua#H
zh}59)Ek-xh`<8|!%u0E*f@^;CgLfv+ulxltD`)`6Zik2X1Ym4C8w~Yw@S>~>#;PSe
zU}aJ}tz25);ueNozJB~~ITc(K1fT+MOuB4JE#hKmvdIY%VBlUYN@K@|#{&V2Z`J<%
zxs&3{2WW-U0$$@gM_|`UO6OB+P`A2K-(mDrVDtvGW7PTf=mg=<9)mS7Sf4u4K6*s?
z6rcLe?+n)XLt4Acj0!WHr*GrJ=cWm>%(P`ceC;Hj@A4|2V-CLy!ngH%XB3_7K`rne
zQK60M{w?>y$G`B(yhY<SV3Rjp>N;3r`Iy~5AX1d59&TEy$3hpZ&pm?x#D3D!H~>;r
zI_h39;bZDU(~zH(5%IkHp%|{xUW-1#VYlIQaDX=$`I`b$_uudQf65^LMG^TQhyU-`
zy#M(z5K`xfcnqBT#NEB^`d~&Sa%s}8vC$Vk_ZKkwws=|PRrYTbMBwyGBbsz^c%&;Y
zaGSycywg_{Wn75c@Gy8p-T2|F*h-N=1QJ#Uq6AS@G{5T5DJJ()hFM2L*?c9*xkZ=Z
zR_iclp1%0)TWAamjU_brkRI^XU)tvnq)b7lAk8WBYv#E4y)XjQu)bt}WiHdBh&{wL
zaXAdbQe9XF4mC*_pPo29dV7u1H46D!N|BIk>BVNwT2zCYa=J=hOmon3opj_twM?Tm
z5InhoCP?5~_6c!_pTXkFSx+?-d^^G+BGcFWF)!tr-*>&Qnu^Z`g$Za5*%BDy30VwO
z7+DM+Jp5=vW~-g%f%qkY(K5SEpoF>m=duVfRb@+S@?SqSn%Pu}jhEH=LLGEjmN!^%
zJQ=i9c-2+6YNLPts#H%gstfh~$Ii}%g>x<m;boh$tZ!)2UciV(E>Hr(38?>o{|Mm&
zZaVb!ugqw0-j+sv=<))mQ8n!J6ffQ+=D<_dSKQ2W9nIhiYlqeo!64J<-;2^-hcqTF
z$FmMr42&J7x_Y<MUi+${r_E82fsX1sx4UaeOm~12Wjq?!aG%pn-HL*3?3k+wdSv;{
z&EKP-Gr3!Xx1&ttwn#^DbBWcR{rdRK^x3)4;ukKd`Xf`qSui?|)JO1~w8s2zInUKN
zLiZSqOdw(^BZ?GK3qaaNg~XRi(alCx7qxfa_A=yOmT`fLJz=F?V)9m#+%pZ04B4|s
zV0N%JiS{U)GlfWEgV!O%D=Ai8s1K5#j2_PUo2cr=x?!g&NlmI80WVp@YDt>viVTY%
zQ>>@Z4tCLr$W}A$Dp8&G6PAYFS=8VTWp9!-MP?`7aZ7IrMqBXX?_C%fUK7oArqQl@
zz6c{Ty>0H=BNp&BSzZ%aa|VPl%|8C<Me0lbX%!`Y34hrRv*o}-;ZH*T1+_Y#7Ln8J
zgNC%+H6D{9m1v^>_LyNVGQPp}zGw`^%)%H$EA#59b;(kL94715Mgm>LQiU{oRX&Ca
zWh_Q;3%bso^$|o#&wV!}_U#9FUM0;Q%VO)rLi7fZ%Fa_|<kfbR9kokcCreI|j|OcB
z;kU?-^6#w(2I*n|v?^~5TfHXWb#@#S$TND{!g)3G78hgzNL$TzUj!5tZun9}S!Y&y
z|68c8;L`#tE2;uoqp_|sPQ?(u@wJo&$vu2ro5g~UHHMFK@~^=>cfT13xG2gB`gk|@
zndKN_9=lj-d|_mJ{2pL{6g>E{I6F7?01x?Lls3pF{FU+x|MpnxGV?_9k@w2Ad_6Jb
zqk$PIW}2OTN5W5Kaf|+KCv1&gS<*6E+W~Ye#A*pUSMCV%$$xuNT!vTrpOC`R>TKVS
z(G5Mw#(ANKcl;Wur|y(!1?u$hx3~NS*nN;hf*6f6l}B;8{i?1Eiw7i-d5<n4g8`2f
zf!EGD%leIAhlOtQcymz`bn$8hRaLFG=WOpQ<8>;<k*+I0hx8vu*911|+_&kDg3Q?q
z?Zw4Qf1#CZc}4eQ-SLj~>OEzzO^9`h9h(+(Tcspz%%&%8nkU5Wzyt);m$2|V)|uw%
zyL*n>^e<(#wrIuC<?7ivhQ{|^Im{nJh{ea4L#@*@jYrO@CL*pK`_6mKM`h#Ng&rxe
zN$7cw(Cu)l8Gj2CkND+UY;$#l4&J_&t>F*j2HP(d^ftsAUev8m0k2+I<Mm>}r8(k<
z?BC<X^yv*)p9s`7eNwn^(DUPqpXo+<Hu~cYR4f-)Wge6q`*u{^Fp!osO?;~;w`dB)
z$#d%0&sr&@EGtUcVpGwKvC2wPCJPC_QT4FN;gQ`q^vptvc2w<`C6N`hX|_)XyjxVG
zv59!>LiJVXWxC(<!S?N#Eo>cuZVtEsmtbglgqLOs%oudpWSTj?*-~}ZT<d2S>vt%9
z7`(Zo&rt0<IdYWVyDP6w%&|c&GC^h|(pCdc(o&WeX4BTSrj#s!q-`+o>pxlB#!=oh
z$#OJSOQen0+&E?3+yM|hvT=Y{it>&3;%F5y6aL2d*_I8?{3Y_LIJSSST<FxzLG8|<
zl+C=D^J>X?K&)~YO*g(lMXKrx>c8K6Apr*OYxb$UvPwG2t|v|r(`y0iQsv|%`H(67
z1r(_tjTEd4EBiVR8y17wvRJ$)lINdDy#MK{R09=TE1Jzp?f?DVFZsznz|)J>XaB%^
z+_g43v9QvJg(yBIx4)ikCX~V+!N!##*icp5RB)ZLlzkp#Z5H~A*c7C9E&ppdoPtf)
zm%W%%orgO5tKOUuVMyO}vvUZsDuQrX-)*GS-W8nN+?O{$jK}}*J=2oI)bGI}X>s-W
zAZJl~ZK^dvVL!>RTTOe^tFq@FIxh`(+1Uwh+08J_na~hmUoEZNgz3dDQsvpCRGMsy
zTMy@k4T{wEl4|dd8#%~~340nH@z>W9rK(tEcUimC9@9F<$*Hf6PEs6ak~!W1rw8VK
zvXyn3?leX!Yi7E{5X&_hCPm(epF^WoN=1x0pj~inFL`H);QN6o*ERz$qKd{+=_Cze
zJbG*5CrW8(3YZ3Nc-9XGz0W_@^vXj@qiS*&KsCubeyD~Ri&e^2p<G+8`M{A}J<o7l
zi6^fO`NZ%QCbA0eRtuQ4ao=fsHG%ha`>hJ9E=?>WV@$j%TgIm2!r~$w6f|Qp>OST6
zX4%HhynBQlT>dRz>yWAi|H%c6O{?&oyk<#$K~=I=lnK8oQyRZ{9Ol3V-;H|J!Jd<@
zDV-7MwA{?CT=XsHHhi_@{brPUEgftYaMq(8_E5?Ls$_z!+Ra=mPLjzGE|n4!k8j;c
z^{|<hAzN-w2;tJ$eSNX?Eq?G+pTO7VYG;(fdMU-zX|rOTUTcsLiz|of?%dRu_Qio)
zAhU`1V|H519<Doaz+Zr_%*_4PRNB4p&FGSIi~s)Ky+Up+jX3Z(0JcosvbAwoj#y&J
zGI2g+Pc%K$vAKn^?A6zxOM;mBTjsJ7AqU*MR|yvw!v?Qu=Yy)#-})JAP`9H`7uq|6
zu^#tQqL$XLgjZ3z#bU8b+N1Xur;wRzpGP?sFqg%EcLez08Wv8}>%y6>K$*A$x1N%f
z>`~MZh(@w+A9&+lR*_(OtLZ<<vbnQ5&C@*(bZIMWcZZ`5y)Fb>J#Vq2T`$;?HMKN*
zXWT&8mGgERbt&nMd_&vmoiFm7;=Nnex8R$cFO=?AvZ$~*EPGRBj^kC&haj~^)m-I4
z*EF&t=P!F}Gf6EUN=9tC-mQ({KI-aAMzxB+OZ5A~Chnp8aaDvD-_`y#6}+OuZEK|W
zrCem;9N!p#g%$6jZ_m`~S34e_L8l%~j%n(KqQgw;{hn#9sy-OedC;zEc;P5Ls68#F
zh)dDZX|Hw#f(z+6+)<voC}4HWf@(bt$`tlgN#iI6L1Q{8lS^$`-@bW|yn8k8V`J^T
zQBV}GA+~MLz~E8G7CTF5kxn3o;I7G>Y5&2-MXu07ip{)UPw47sdo8fVTnN8XmPdeq
z@SzmJG-J;WVs<fX+{h^kj`lCp8ZL>MYEE6{K*I_0Vmv?7$|vpELoFol`b4og;0r?e
zr-wC)ps&>@i6#j!xn3@g^6BAsFy*x2KVkG+9iA_p3L0>jpMERYT&G-ME4*NNMqZ_!
zmh4f&1QFJ1a*xT$)>9T*M}ZNr^VO^=|7;67U8|<^jgk{}b+iLJHW%ine58Jo1cdl)
zB{p_eH(M<wc`Ty~EaOwGgs%vc%IOmTLXVg;0L5%z*hDY@oBim#e5MdfC7A(-jj?=y
z=@q)lkm$(4O-@b{OW|##6818i_uLwo=`kHcpY}Mh;hZrr@f(i#G2v@?u<P1rMfprL
zNMMZwag-XA8%_F3??K`UT?mVXa>iN#B=+O@m)lbf@9~Fc5Yv$q(jEBai<D2+kY~sD
zMpZ4DO{TeqFrM%5;v_G7^TM++hw^_jNl~lcUgNfRfyVt-x-h8jaq{@HWZtUQyuB)B
zf28@(l>a}rJLp3B7jbW)RUmrX2<;p5_RZeKc@`{TjQrybJaU!>^ol<P)}7!n^b0Ry
zr#bJf<x^Y0T;Z4jA%TH-<Co}EfC6IyZ$iTRCR-SmHiRG22a|TdSUtZN`Qwi%EeH08
z`&zY2^woKk*5cm1Wn9*{qti$cQ7%_uYd!LZ{RO-(l9(~s?=L`*v~e?!cVrDPY%=5!
zA6jolVZX7%!po10oM-W%k0h6i(-0MA8;Vs!G@-I}(~_VweN&Se`fLJ;Ny^Tu9_?<w
zyOU~D@fk3Dzk}tTfe8*MurxFj>u#?(&@3~`W9p&69~D2<*~*uF5T-}l6ARF@O|&{o
z9Gy1#b^D=A@fTywWHy7&Ye)|t^~9D3(ch5%R!5DT^Yr>WN6T-fTg~fm9)yZllXlI6
zJ5B@8IJ-@5#lHZRHNT)M^n$?Ju-Fc9Cclq|?T`R{I>ybp+qrJ`QS>~R%Yt0lkMZXX
zq|Pf?JruK?sHVg1js-`jvn8YXb32<v4x5Gz$AL4HYUci!&+|l~HP4?nJ->;;S2K<0
z%M}~cCwfU;TRAYlKPPub?8&;RqYZ0YJ9p>Ar<7=aRdL;_1<4&y7jxJt=^?c*AlUe%
z8St-Xq$R>e-tIXGFJkpjYT(l~e7G^jy~J=Hoc+IL1=bTvM&zmC3yJWrIK2|seVY*c
z*R%P;<V$Cut->wt$ju_?%ly`O+wsroMT!alyrb3}Soa|#tEZ9%dw^N}pSW8M(pLak
z$nxQZ&~cC{hS7g`B>(Xf!Ex>j_$u=BP@>Nn6kLiP_7nH>5#1VfTUg()zdtJuMqk9C
zgannM^V4wLmSfj^fC~Y<Bf^DXQ8XBhIgJ123?alb*~K>gN$CoUIK`t<`~mBW=k|Eg
z{@-5(3q#iJHNkBP`Bm-Ie3ZY4M~|iDh2&|QU618h-Z8&b8`9t;>z)7Fms=!5xg#XX
zH)!1PJ|;f<&*>%fC=^JF`@6|*d^T``xw1a+dOFb09LcO#o5%YP`kQXHV%Nc4thtu_
z;oELj|7IRDmS6aVLtFIIlXcDEC~=kfQZ_Rw`<f_oaJ~~N3IG2i?=6GceAjmG;8vhO
zDHLrf?od2fOR?Zyw1pNYxKpU$UPy3vf_orXafcF|AjLI6aSHVR=D(h2?PtxNdG`CR
zckgd|h7Wf#nF*7*@9R3R^E{5<@$g)3AZ77Mp{B);fa0oH=Fnd`HG?z2n~$&Ze)aza
zXg$&o@!=1hcF5ED4=*Xfznuy4&p{gtQ1))VDqgEd&4S)Pcnd?k#K_<`zDvs_adGg<
zcoA1+88NN#LUWQ%HhT8Pr#*DsG%vyYz~+0Sa~Hcr(*d`Z!-wco>l6HSHqvf71<NIg
zvw2_dUt8^_C3gC``x>o!>FIuz<$P%}BSG7zr}^t)OYY|=AILnW+a{3s-2pb+xJWrI
zmS9zrnKxd3(mImL#{UMHpFQYDC)f`!U{%x+>KpiY^}S>opw;5i(ljic(E)PMqWJ@H
zTlB%g3~*bD@?6h_sM%<^4lC2=+jt*U*9SJujvRStFqk_?M6W;#+I_AZF6-L!movsp
zJyySWx2TmAhcStPtaMR7ZpA1tuzfk-#yLm?#E|zPFh#BGnd6^^uSA@%aam8|mlTZf
ziq@}Pmz;YH&Lqn3M0PmewAsi}E0ZV?sVVOc8J6p|t4x*937S~ACmDl4++xOyXxAI=
zc8LNhZV3ZWrnv^3s}P-`8wmSQ8F;mteD?jV3Nr}5sF9$`S4LjTEO`{XRYKC3$s{iN
zdG9uwUuY)W{mJC!vy?=qv5`y4vsi|~D<kmda&f;Ug{oCp=Wa>uR4q;-+F+A}8s=+G
zMSLCTKiQmSA7hvnE4SlvUmM;Hp6Yu=9bhiiAb7s}bD2)dCez;3xkv*s)hng^MyS!V
z&2hTUmpigcuQeJCF7F!x7Ne3@!0kgK@lGZpGskL7bt1>{XR~bKlOKypyp<ki44d6h
zOBPc)O{t34PDO|{#Vr&f%s*sGIi>Yd@f7wFSaQZi`MGcA)Mg~a={FW=UqI9kW|{nH
zRK}s)YbWVq?Q{)gU`?v-S$|EawdOCFCPk*DA%9GPF1HTEPh9zZ!)#KI?ttaqyDd$`
zV#5xn(@DK1*>QBe{R!d)_CoR=L}ti^@}SZ4Lspevb8ho=WJ{gUmMXSD#*LsaQlcqQ
z{uDR<IQRw2Fa=ICZWL_zrC``2q|l+!)nlREF}bR<0SVQgc@f`#J_mWnG#Mz9@`#l*
zf-@9*?5~W?wsxnGH<ycVRLXbiF&==}PNj|vFUrxSIJ`%#yF|;OthdeVz(Vc|1;iIA
z6k$sBEUE&l#d}G7&BCsnn)_lIco}Yh5C(%mNyjv7^p)kNjzIAMpmAoX$OagWfx&ql
z%~2<Hij5x7T*NzUwzBUJz%vz8QJQDLiSR1U@KW|x+h2*XP^Ud=!aQ}0PVIHi<m*jC
z7Dslg(pRJ!JghBszexrnhQ@f_LVh#vg{FK(RJXwc_k3?~K-WYSW#g<xwMVHN;JUYv
zWW16YV;@IOL7&&?DsDKA%8<NI8&{}Jkwz)jp2N!cZzVa!1NyP{wOF0+L-w76!J-D6
z@(TGad{lzEtA@;#v#fAPmgimM<Im@e3}qzlk)Nk=N$kCeFKb=puB4pm5`_(KYbK0^
zK@_n_7;3QhO(Md2ES}d;;N@v3P{`g{fc8ghG9@Ef=~F9c*EE2QyhP0K;ryJ6c;(=)
z*uIicmr4d6uYE~~C?qS!(NeT|forpJi0Oy<=a))JM0i;U&(94G3JeEn!er-oU3-;7
zsfLaP0^D6Co(m}=M;dAjuiiCB*sH*X0@80iJ}nA>yXwBhnIn&~>zrpsBU<CIj%`R8
zIlc5$$eU^F{Wm}O3yI>CWa(}jk-U6^p*p_Oz9h?CRpDiPwG$}2{(OaSSfv!!+n3!u
z-_>obZbWfZos}QVlDYkm4=V&w*6nAtwEOM5F}v=XMgQR9w>|6ux^L6eyY;V18ymWB
z-h1~xR<gE@B(@}{rlJ9`(OB_i$OO%}f6?b61E{XB>7FBPRmt$CvVqgH`qNds(~nu(
zk1FC_ws@0-Qi_D`Vw;B!GnHOOWoD)~zeQ<#OuTs!0%B`za_!gk!>*U##Kr%d^Y<1=
z0w&#sc>g(*|Fof|&8Wrk``OP=79M{A098@gzyF>){YxEbOfUX!)GsBKz;~mvS6Ws{
z(|zkZW}r$J+;qi3gBaRsN6A(f1YMWS8V1h5!5-$Q0s)SOw)J<Ua6u0=#+df8nVf8u
zoH)C;-hslt@@lso4Zf}S-r*Z=<i|*FB;Rrt1vIp*CctMB6!>Ni@Hf|b+$dI4WC3zU
zvAQ}Xq-pwXX4yK1_NIYVRp^P7<X)(dR$B>?((-7LSY}H`-AHa8H4Y$xA^%ygLIt>K
ztL7r=E&0!t41E+x*l)8cF(?Lo0a>UdY_WPq=a?<*egInH+d8Lv&_eRP+u|KPmiqHi
zNg`F$NX}aMFmwwc&E#Vy(YE@d;$vd}8kf&Z83@gqB&jBzXskRku_wl?QXBtLS-@P(
z?yEHwXL(Xols#0OigxGW{<mEEBM#I%G8P7&og_sKabtw5YI_l9Blrew%;vPJdB4LJ
z)@}db!<rgaIL7F#Q`!v)HHS$$gC2$yqrk?j$Ju9$)!?`GB@9XLS0v#KJ*D4hAB<jk
z3M1Jm&$!)S%`{P&eqL0s`vp(2(Cb3!eYrKR&~kezp#_ab6sP7#;rS9>y^#!?ZH*rc
z5$=5z2KG~34)`fh?u7vgJ-oPX%gYbR?!6m=4T8T+)7{IG116)51VC{)UtZeB<hozJ
zOQfJIp6%;3=9*$7?-=^xOkmif=Fx(g{~3t1=mM~W;_&7jgJIRb>3+pw86aQyEXHzE
zX$picC9XOaL*OqVu7QyZxyEy|ZHDCff<x8F_nvWdAjh>bw*p<&X5P&r&SlYhAMclN
z4evrxzRViN^o%TNYX+{+j1|62O?C6c<i)5Z&DL@G=TFr7OvPDyX5HG)ZH_WxP=>K1
zEX!#GwzjVeg8dw487=$m%J}Kn-!MMWX*6t_^AekL-_hujF6hu=%XSfQn!6G8d%pQz
zuSKJ`hq*dc#&ND>v82HBW6ckJ!Fk_lP$6vH+^_qzY>g2*J*{ue$=NKv{q;LZ#av^R
z+7nf%^VGN!<*M$A&M?*Pa8?)b^e;8#)T*v)+V7lcFQ{FaN+Iv&mB&nki~0Rcv1=gZ
zVbxO8PqXki$4WTCYTi?bF`&!v)pDDKo^2VlQCw5m)KyA=IoI_!4|7E9gMbn}vIFfd
zqbx!R$u5a@Xw@dt7+4ti&~syQt|S_pWxid21g}bY*qP=12X+D8!Y#8%_og%*go6Z-
zE2GlbkKL5h65A)EFj2ku(V#YIcN0zdVq-q+UCoS;=L(Q|k&C<I&>S%%#<C+)m?;oY
zi836&T)n~CdSAz5cHZBxw?tNMWxrolKA`#@hH*3BiZ<p~hQ<0%m+GZBrZ__b45~7x
z*}NLbwoDY>uP+R+D2*D!KSXgQh)_5{JR=ea8Fh<au|{GJ$MVG5f`nM|000p9wMgl)
z{%&{3d+R598W<R0BDgTom~FI~$*wxC-->&#JO=5T#b;6uEn!eA-r^e8)XglRWnV%C
zXlfb-9GM)7)<_O-omgkYgF8e3@87lU5&%d;#D4;CI!meDb)ARC^>h%?pDgaKnfbVd
zi(5a`ob-6lc!3xX!H}OFg(>2~?Malo{{ce&k7Y}W%;5aRplsbyydSeRL+F=NQrq7P
z!B>rqSg2AwtlW*7l!&m6HPmlRxc5ksX_jiYj!tn&W5*re)-#C`n;lM+SGv&{&^b`j
z_S;&sE7sp~E@#=<r8_3H23q7WF{v87Cv&-b3XS$@M8zMAj(t3ynz+Cr^ai+e^mJm8
zmoa~;tLU$b4-Mc0+O)OS?d<WJT&bc07aM2b$HTjeOMs55jlrKRz<9pjZ3<yaIMXMj
zZ#6*35QEn<)oj)mtM0qoub(zK!-5ju-YuAlPfr<Pd@8(3;OQo+sZq&o;l95BM2=sO
zI6@IpQA1|PeTMFI@EX{+H(P6zzpRxt-K}<9U+9`h-j@Kcs4@EWuGb%Uly-H1QVbva
zRyzl6YvJ7Eq-(PSgpcVt)92#Z#NMemc8yZY4f|oe#%Yt7>PN_-A=AK&mSCctK)d}P
z`nDxb4q$03A0?(L`_WMy&`GibJMMU6QJm4K;+ZaY<d+Wyt$Aygf`CUD>N%rSRqCva
z4UEo20EdBz)Lhz$TU4iCb;nlY%D9~K%IsswOIQWnaux863`Id+fhSo1XG5%19VY7D
z;a1BzLnm>qrn*%m!MA2dxr$)`E+xss2F#e2wP8$=RUhS_Ze`tA#`^p-z>we4j}!Hh
z)%yrM`B@{;jzwGbDefho`){C*tG?|~<@1Q@qFTmt*m{$-lCYyhVIst6NF34`@NwMZ
zwfm=h&wyy;d|A2&vkpwkcUQ?ZrcO7;q`vV?)ATsKarD|b%KjnMOOYw{3;?y@{ulq&
z&j2s^HZcGh(<<19SReKSBvYKt$XY<`dAkg)$b!__TQjtWy)Fqa2rGu^6flVB#=@oy
zzpAlW@4~5*q6Jd^fCR(qzF-iM)Svu>fB*A@ggL(b`<Vh1UdTp|1FFaNKEk$RJT@cB
z6l8ea#zwqLgjyTdB&V<Td@L8ve1Npxu~jOnZ)Rhe(k1b$45{fWbQ$%iJg|JQyAHIB
z7SsCgY2F0rq*fhx5v5ft`(^>t55D#Z-EeKt9+Zh0j}7tA$LR#8J6(JTEhEmKflBp1
zrD@5}+5f7C<?SzCzZqtiKB%gCaHM5${Ej7^dRDsie8y!j`>NK^eJc%;6X-xUT6?VQ
zcBCukm)`)@+3=sx5qfmA{+W;lQV7vc>4%(etF==1y;k2^3Qyp5;)z_)B)oZ$bPr)Q
z42KtPQ($vj=ch_PGxU9_2p(!CR>qo_?_==DzQ`!nuzd%9v5(?6@ei$Q)UjTksBqjS
z>uRc<o{5C;o<t@X{_vuAdaWRo`ZYD!P4-s<<eTTc=(C3`xe1x6WVmz0Au3<3A8`>|
zuuG23a0Pt)6<b5|`7**6Ckcz<X6=K)-zg18YOK{pAphe7A2z}gqe>5ZX@$yZ3RGY?
z_~sCZam)Si{qYFEG`YG6$mRLhLg`;>rhmO8#t2AV$jjx88hisjO8f=b+W(&lEKWSE
z$vVcB0PLZbW8v@P<{WUAB~{9mCRFgk!k5<|qJe%f$p{0BW}(l82Mm!Zk#X)&T)PKM
znDLp4lWd~^?@ip;SK$@h_VY7$)253ZuysO%2FtE<257n6l1W%|E+>ndFB^3wZHkGO
z8_UY)P7~G9cohjCd;W%z<JwfYC7fjK$*u0y;9Qkut7WB6#5|W`i*3x1+tnnv#qoYA
zW1CY^xH+DlUxgjUT>(w)QA>k}M)VC9<0hj!?mnKJJzT=YNd*vsJ9;qiesE6~7Ma*v
z(74iw_^fu5;3s9n;!`v%(!DM2S&RDz(wpKzdb=xWAfdhF^ZM8}_lr(X&dKs8^B1F~
zJy_F6VH%XB`L@sGQ>XQ*DO44TKWJiDWC_|J4wZgR+M!Gk`_t5~G73cMY!u{DEZEpj
zo&Trb^8YGt!GCI*ISJz?I>mSjen@`WEvd%ZT5IvlXJQI)>1hxz_j<<FSGtK@4UjpI
zQ&v?bZNp@C$XuR#wC1Z;Kl}^u<kvO+>hG<iW$?hK6Y212^xD4uu5Ju9%01bqwZq%^
zc>VZeJKMaz{#2)&=uI1(Urw*y?c_4T`_dJ_>=fWIvoEQtnMv2GQ@>K*cv?5&e?AZQ
zJTqP$Oxf9&ppRRLZhgwZm$v=$lyV~?@oS0=H+M>4-*BUG@b<RBa|->zwYvSn_w4T6
z+G3lM8a3go1+~}hRB>JQ8qAup(n0ZkKl;}kR9}IzK8n7X%ach%!k!%Jm(bKpfXHSO
z9XX!#XEwpwOHj`^NBoaiD=`(O&D^6oCLRe(pZf`BMJhPIIjq>X%h*fb)#Jv#4cC9a
zl$(A$-Say*#&SP0q+1$y*%FK5qKc}1t}Yl$i$sB>Qr{lgeW(=kK5Q}}M2sarGmnw6
zo;T5%PvW>v<={e8OWqI)-PLN+xB}~!r~8a_z~z`hEo>$x$S~5$CVpiy`=fECzJc8_
z6*IOF#k<U}?}fM_uir{j7cRG(mGMFzhX{Yrj(}2fCJ0Byu%E#>+wu>5xYI|XXP3Py
z!v^+7ovdnU(>+hl-|N{JuxDVv3_psqXH2qY`)9p7hE=v){bj@A3K;mG42!^8*}Sj$
z`897$&steQ4A=J>--E7$vdI+8$)Ie0-oyF@woA=L0f7wnOxesP(Qk=5GJk*S$ryGf
zM2|`Y<@|zygp<(htrhi-Y?C*gluAQA-!A0k3Ea;psGOdff6lj{Cp^MPTk!9`bFVK|
zkCI?=XRgSs@Qp1$dc5m31#JVPlztC|zBJ}~<U-PTsNn0PizHN+O>j=HdOqJ|Q(zb=
z{_aD9fN^f>FzpP!FeJaE;3A+1xTYbF3d?+Ps?I$KcUIDz5c$SX^<a1Yf~Pkb3wng&
z{LsH}#Ny17`hzA~=l4_Oc$FnD&{6wMqVQcv-x+?+!03<cHr|3oDjdqo)#Fm-S-d_+
zSvxEemP-+O9EjL+TY_ggq|r5uKuS;7-5vtv)tc)wVli71=rdE(t4*27zz<6hDmk+6
z$7BnMzgk$%T8%}yNS_`m+HD^$!Iu5YDstH)*a;|N5rPdIkpgep!majBNn_`5PmbSZ
zjV>Eg{)!FuW|gw`A_A|#LwhihD;lDYSV;PM8>Darzv8PGG-3sa@)Ryh9S{#$$2Rgr
zKdo#3Hf$Mls}>a4+aoqsv|+}wS5UarrK%{qR4%S!;LIOdUl_f_$WlfiWrO{5MIxGR
z^MU6vrG>W{<3KF4f;fD60SQ*L^IBWX&+v{J*4#BBOe<3<)@OLSCQf+azo}0q<A6PG
zf1vRD4YD`0ZJVgqEm?o_Ogla?%OdEsb$ce38&>sZ`}AsNz(ZBJAG<W)H9pRLGTvBZ
zpxF=k87ukkHRSOk{UrP0{Wiu+(Vv_vm92HI@cmEQ&L1rEM|c;xk8oU1ewceiJp%Kg
z`4e8jO^;I~F!eUfYrF%BXCQI~Jh#zlfHxrAxtcM_w=*V={*HxQibsioq%TyHR5C86
zpLvrGRJezeJ2~)G%pe2cJ87asBkdtMuTA$AzckrcrCJ{jubpS$tKfReXdAd*Ah<GN
zedAs+EP0pn!M~PuegeIkPjB|EN-g!1Od1U8?PtDb>KPe}`z`~^H{JEmWz(uk`@<p!
z46YOl!eI{{vv?keT=Hoa+Bc7tK%u7ft69;OiC$NQPTGhumybJ^eX{cEdP<32iKj~1
zUx9@X-BvEB?9XS@fSrZ4wOW(0T)Qa`SZo&3Clr6b=>}@w4PG6r7Y?fQmObsOxZvVr
zEZ4WU++UoJWiyG357E2UgGHAsDzeU?K(pZdp7uSt0UAMU31x~Mb>x~nmZGB?(>NW4
zdxk65i1atkQCP&GjO)SYNB*b?BRpk`n#Lk-|K~V`LKp9Dn`gT<Dc&Ncm!%TxH+(%d
zE0aNemu&X;M1+Q0B&Lm8h9?Fop*@c5<TQqSK0~@;FK|?cOg-H|SHp%5e*q%WFbWLu
z=UBmX_OGS~)RUN}i`-{lQZFUEAoS8VMA^-X?q+;@g59b3DxRq`i>Yxq-NQ_1d|rkq
zxTnv>7tzW;;RVSHcR;NMB32B1aE5%|la<L-+H9#ApU-$#(%SLSmFt8wqYdJqQxpzE
zD96T<fNFt99XSZqI^C?5`x(*5=qD1)GghuEvci-a5#Zil*2XOli;5XZ5D@#z<5#J^
zSAt~J)^;-B0ojEpr0aKdW1||8<RaJc0wl%*&RCKoWjn*K6F}c+5m;h)A+B;DU0SmE
zD}6wLl*k~l(@V-6kq^3IEq%4e(<gIx?xToK61GB&K3UaH3!J<JHeHF{g|J%fFox>l
z@Q|_pc>2_GZCVcz5r#^A&3z~-H#w{gi$N~hQi~ny==aUUO3I*FGqT67*jSP4Z~DDW
z?H6hNn`QK;1Lc35Q@VB!$5pSfMHS#8Lt3pt?s-zl=F=tNiO%T$?N@J1$dOGl#vBi6
zZKe#6Y=Gwr(<un^6V!W>=mcX$b+>gEO9@(^ogV+~li!u)*N)y5Kp1P?l|s2WkZ-n9
zw-AhM%iQLeGV64QmlSaOqQ#}zo~X8GQ5`WpUe7v>o=jz+ew{d`sIfBZ_ywZl4X(yc
zGwm{U)!v0l<z}j@5&ps0=ok&r5CA8^yT3EUS{x*#WuKHN;{`_<n~1!}+d6(v{Sn^U
z;Yhm*+FKScv6xMqWt5D}^Z|7w^ce+Ib~LKCHQOc*{yb&pt2rIZdtvoK&4hcuk%Rt8
zVWvp=>m_mHXF~GI<4K}alCPyUjq#}TgLkGR)wLgnC3Nkg1%fL>1}yEZ1WyA*?Wc<x
zw?I!y=!4Gb92Q)>Z$*_&FG_So!*ME1DoYxY<B{`tjp3L`Z4gz=QGM3*nvN=83UN&)
zYgW2XZD^`qevjgt$9E4B-dXP2%qe)qwCU~Pt%6F(N=w<Pl*NMfJar|=mK6;&tReg1
zI<L^?Z<XEs%IM@68G^`_^?XxJ^lu%YGeBTvIQxcUMH93#*=_=Hy8szk<)p)=tF7>#
zj0lVHd>+<Y@c^b>8Cz1jh>nD*k!$8J20H{6>lcV}arLWxjG~r>fpkAp3j?s^3u@&_
zY5-mUF#u^3Y4B(Nq-ASvS5Wf!_^e@!Zrw+uY2|c%?bbriG9`MrQdw8meIYY@#HV#c
z=IF{L=>BjPz&`Jdi56RpxCCG#siXtEZ&7%U>?bvRd;*#>GS-sL)Woxoo+P@LL7&%a
zVIJB-NB8KZjq$5LY-v58wu~Kvf*g&QS7N@dE)yqJFSq*mEx0zBCXSpA`54vv#CLV#
z`6M+51=7@E)GYtiq-7-F{{3W=+1N+9ExwfO)%P3x`g^CY+r`Dd9WSB$Epte4j@v`h
zYbQBhiTfk7pd$X6LugKyme&#d-r9wBMOSsf!(uf@^LBob0vp;>6E9opCV0G#3JgBJ
z^#2x)V3R|7H@}o6cR$5g^nzmNDn69+@F?+ca4Z$=fsF$UZKQT$4Su$Z^%U_XyELcb
znO}TmCla0Vanq9I|NbD)a<Eh?Xm^M58|<n1?``bkS;q3OQ*={#=Zb3<g=I5-O4&>)
zE)H63KAN|Z6-UR;kEN2+=15Iv2I`}G=PDzdCyPR3e(C8Y>mUjjqZaK!XpyW-ckgbv
zv8kDVBUXIZEXd!qH-5lNxL-fY>luXodGftZYlc#vdxNgv*R4)UnG{wek=LM1Z@08m
z5%_rpVsJ*9l<;Sb@LvFXxRil6<8;>2Gp-NScFTS2SyvrN5lE9?l{p2I&1SMViu(2e
z#a?U*lzX6GPzYLohkyI%hbGe7C*XLT;s@E|KH;I2IUU;<&Trh`1EE7{&MGO=U8}r|
zUfukh^rB69Cag7?hme?1Cj_n_R%TgVK#_;kb3KtDEW~wLW9@v2%?cOdB-+TuT2oW9
z#n$(Z`a|-c0QpnDJWuuZso3f$Wb=0J_OBU^h$14sTM8K00B`~R_z(i(zZ!p?s6mNs
zPr*9Hi3QSqNW}>N?|^hMOM??V=I>KFy^(7uXO~ldCKqQ>U1<?@U8Pe}QZs9g(ne=L
ztv|6)5$KUYMFEId$ZS?el`RFgG6gUBB>WjEq8kSu3~mJ|G)PO!l|`8b=xBoE+Uf$i
zoPS?aLp>H_tQI%;ZqB3^_l=*c#I4djrAK!xze8{A`+ENc7>5z)@?Y>tIkF!6@f>N$
z90XjR8M<YuEC9u3ug0?KcKwc8KDb{E$&#<;q<;~=Rd3^7ALAG|a5mZ&C?byfAGh@X
zkG_cB6hY9cdV@x(6G!a%Sx)`(acK-uTCGMIiuip?0J^f%&Aj=KH!<VbVuc9^m)B%s
z^{E2TZAVO~m6OE2xYA(dGAz<9k3S2~;pmmeqox{Ok`x9rS`+J{qumV(c5?n(rI~MW
zo=00zAtn-)nPsDt!paZ_#%0-kJV8bm&C9Si)5@w`UdgK(H#lk%4ntEF{VblVoQvge
zBne)a>&?BEJ~KL<H4Ssh5{ZGEh#EgWUdev>o_7rK+BMzbN(v`!wy3p~(Rp_h?gc3+
zpQ5wLl5?<{9T6kr9OMTs+Olv2I~!37^)R%oFA(zzOn-UN%U~i&$!-;?34f#h)Dvi#
z?K2nFE>fOpdp?uuxf<RO?(o(lG43q3;rC>-i+Z(JV=UZi9qgNOxKMhz8W$4a#d~V~
zf;Vv_5m=foGY^nYTdIw!h?%l9qn%^vmr1EV@(#0p!N?4hbCU90QkR2qh=NsnHEO9o
zUc^LOH=oF&09XVRbYfFSi3lstSJ}jk9N@1Oiw3hpRe~9V3+xzfV=$h({inh4pH87Z
z=FjN}{kU(dH{aABckFB49^H3!y5syv`v2zG_}8`W?`02vZ7r8tt*E_dk6_LBS3`io
zO1t^vip63tb4jx;jYm4_;!;timRwZ0mrDcUStz#)`@d^d!iSSV8t^fs>D1HxiDtgY
zgD9&%U24V@HfeSc$86&&Xiczp3`&wajG9tcgY3-wwfy&l;o>_LwxT(y-+?m5Us)TS
zQRWg5ij5}~=)B@%4>IMs71j<Hv!5DDrj%T(^^-BeRE|6h@}`9~ghxLbO#9msydEoV
zv0<|<Cfb@=_&rS(Gxx|1z=FS!X=)1-udTxw31G6N|1+R;_CREN$6DM?gvW4*Zp)r-
zt<{_9jj?GWzJ;h?af}Jv)%$*)KHmUK5R*Go$mEAX4VP$8*~2yIzRi-QdEvRQXDKO<
znk{fov^ZrpT)wQFN}InDv%{Df4&VZn#t_xcXcx*;=^8f3n}_&A?drXpNq_$!<gc4q
zHd~JwUc@+OexcaJJ8)h2COoIk1u<ZLZet;+Xj`esHf|lkRrTIkkKr|e>tgxV+PHM4
zUv&Gc6tmrbrVswdGt23$u+HpWAbas!YEmnlEai|_wxq*|_v%m*@&{ktNT!_Wg`GgA
z;M>4LFFx;IT3R=ipK24%a+oX@0)JHIueCjnxk1D)^v-w-tV(H3INlcy&zx=lqrXuf
zan=4Qp?qJZJ^ZSVJh|wg_czwL?_HsZ@#W*1`yG3e=`EgtG0{o2qiNEtQ_FRx@axlC
z1(C>+MRL!X$%2!P7ne`yN<Ilh=*vp1UIh++i`)8vh}Gx*&lcUkzta43<He-9j&<-F
zDW@RItM1z@-S?Ei1I`muVduE1nAoE#B{B>%kV)ptw-+lX)L25E>F8OzS`|0a&zl-u
z(@*KGBBKB-0N@Yi7=g6h{fk32Xg)Ro9L7_|4w(17;bdWBY}-vGe!hq$@9Y<l@)tn<
z=o5_kdi?81VEqo0#a8DJV9vf`(2G1Zo|WOdJLQxXXAAABz&85Ve_rJ2Y#qJ5$7%Uf
zXtW`P_1|3Md`1ZCm6rQED1pYB)0FFCz0s(mR}!FL&xew;W-qz$9xx6G)82Y1eDNVC
z#TpveBobW^b&ZX6FN;D?8nfvUc5Ii~KBbMKsbtk7c!P`U-^Y1+VMo63-+bMF_iy~W
z@8hVf>UOiO^tLn|5Rm5gdXrf(ugR81cmOB_;PQq)?75G;*IKroCr9!|($?7wbxqMc
zji&GnI;wrBqaVdX(ou?KAQA7ugb8kLW;EjTXL*-n0r}6dx6US}(>a3?EA|h;0i15D
zK|nVq7KY1{YkLhe`zdJ52EFO+z1kk&@vNz|$ag$IqiP#y*hw>5+4+eHe<4JEqeY$l
zC)Rgw!BYT^vot~K5!OKb*NT3Rd|3Bzp*0_LBQwzl+?ajDx0US8p_`42Y5kxZjfydc
zEW*26C-N6|g5Lj<g~Y|G8zy|S>U#K4z{__JzYr(x_7NEZ_H7v)KK?l(-sm$~Pb2q4
zgTAg8Os_=J0#u;dVg2}2V*zNLtrqmwf*C6YN%rUjA@?M<Vxy7U7bMC(%sty0D$&AP
zXEerBeyqIbyhWpuU^7y<Moo;bp--krp&dp#>^Yhir}Hd0g0`S(Wm(Jl1=O4|O)t?|
zSEC`Lv1(=e+SXZ2*8ki=w`|ua&y+~Y-8GB%b%42fX{$&953qcp_^5g3n}ItU+*4BJ
z`JyHNl5!Bsdr0=WH0ItFvpN=+P`2gYSLHpvI1?Cw^T0|6h@z_C7O|3=6KuA1@I}-r
zq@Zm413l$}%7K|rQYC)6H5CK$?3d2ZR|FIk7zhz~A|V9edv<ukvvKlBxH6`@`!4|A
zIu~XXww@xj*ZISnl(7thALTvQtlVqUcMq&5&;R9{y5rRmDwX_}ZpaP(K))UFS|q`$
zqTKjFJCWcGg&+=z2DEQ!Pf_g`>|xUgkrmM>5KF$F>vcChIoQ>=-FfDaADLu7lc-@z
zu0`?IzI=-Yl$n2=l|82e5fOMlz+>Ww@uzSnV~3h&Oe34K=nDeOKA;;7vtOv_RFar4
zO92g4i?X7bpCLauW>n1)?K$c!C?dPr>~2Uv*mGa?UyHFswY2j)dfiay1%4kID1FDJ
z&Z2RN<I3}P*9cOQ-yoLOYoP1n(KQ_^R_%!PF`cOlJm5`$+Fx=SPU=68nUmGIk(+$O
z7s_3mS5kBF3)X5uk-LSvnck3T4Ris4<~Hc8J0OSIK}98BPF85*^}D7669pFRRi~#Y
zR;vm{@Dc<bTSvS^wcuW}vy;zxpUrUDYxp9yZ1KMI*j)p!d=a{tNtaFTt@W#a#)_|@
z1+e89shr;$z!J|t@EKiGPZ2WiSXyJ=mBj*0P5uRmbt>b{Q%O+~c$HbmJ0osKQB!j~
z0&7dbiwHGOSmCpsM9nqc1SZ<Pb=@*jVs-!yW<H6xYe(wd&t}9}QgiKRy<}<E%t4bA
z$e@9rjXpsvXDIi?Y<;Rl8Eu~|77VecB7ri~ZYF*4Vch0*(&lvOG$1Yod*?T$#mziR
zyJDY|#PAWqn#%7}oz+XyvtBpUF>p31bg*&Ix0&BcTF>ImOKa$dii}5Kzm_2P;EsCq
zWkxWT+ubj}d}!UX<2iFor7xV@cX3j;ZdIb+qZI=Iwx-Tn@RvzV-wXM!tn6SDJQIr*
z7Q&6xsVtWuDid@M;33s*qJ&zq(fFw~h;zX?ON*9mHH9auQhyOm$^yHaNNeg?@HIwA
zyrP^DKY>wUrtMLf@bt4C!Nd0quQB`^o}4a+Kc6Qji+K5o`lcNOy}V1(m$p!B9G$@O
zrlUC=GDia5rE*8<{Sozp*F{QIQ%TbFR}9{^tBT4b#6mjtZmN6>#szF9eM(JB^aFzO
zd2oFSyRQMRTRT+zDDa?!Z>wL#+8PlE^az=}nTPQ&msoEa-WA1h+cS8x>!RmJ4J>T+
zC=82kmbb(M;6akBFTQbq;OPGr`WHYym+2>-xhmfff2h?+=GH1mcMlTiG*jb?vyW$Z
z>~*^Rte)|f`|eWFjyHKTCD(u#zVJ4?Pf_34gBZh&od}Y5!WtAS3(vm$?Ln=^|A9Ao
z%Tful0@=fZ{C&o?U2xdEs>3cLS@_rCnYJ>9(tP}rxZvYB&xXuBb9Uau=AG_k77jN6
z|Ha@_z_UMWOLqYTcZ63BK8F;Cch5A^<52vPC&utG>rWWEAPdlQy2<v5X#Y?(zXp5I
zEur$TOg2EeGF9j&ef*zF@I?H()88G$6;{alq4H&*_2%7K5;Q-@?MZX1$=<Snz|INK
zH|*AB!fg|0?d0>x<$k2}^2$fLQ@%TQxa`NZR-P@#ogimf$Om=21KOS%0wGj<h_dCw
zwNYGLoOsxHy%*(?*?T5_u(6xZN7?9a3x&tB`x(Y9W6fQr7}QfEb@y#j2ZZjdxJpnd
zdm%|ZJfMyh^)<7$=_591$q3r|ne&NQ1$bAtE#YW{wvR>G6JnEVNw&KtxYRY*P*rO;
zMhQKbxiav*YH&?NH&SUVgB~5*dLazKAF9wn)g@jduk7fA4IBdyZ4is??<4S8C|qPi
z%f-6fVghI})Rz6E<eS%A`3$x46BHhjjLMhgT}K@@xadr#>p`<~MtRVWaNsV=OEHR7
zSgpO~$Hy-ZI9_P$1HXB3H!p){ha~L6wO-p@q9xb&3`$JYLh{5phZ?e~NqEXz-FBf3
zRc(&YEj5KMJ9FCGaNZ!gN@8T(`QF%YW+5^_c)ku{>>|vJXZHes&2Mc`v!D=@+x*dr
zPas7Y2w6%AE};i7j4=;V8x+tH!G)eY%}wCD#L4jAIoB-t>1kdzm!xhZ)Nxd)3i52K
z_LJx?NLLeuKz1}-4}ar8(4={e(n@TG1A9c~)8!V02il9y=<dv=4ZOgiVZ)VdyRmil
z*TZ<v2r)_(z%%(=diQw%PgD29uOPt~N>vjf(1Ph#mE)QcVtb{BF)T@mG5We@L##=$
zUcy$PbXxveg$Dn)!As}GSKqDnpbYS$NB#n^(JCy+=imqsWB>0>^8c%j-094q`A5R^
zz&{eEUH?_h^3NF`hHm!1z9gm(40i!sx?D*7p^Jv<=_G<>KddsduM2mK)Yms@xi!do
zDrc`hmBtC|^*%Wzy?fs?%{aqfIyT^M%!3L{_Ay&AXw6L2OOIZR(lOr3I4`eey9*O@
zXw?_vPY~j1>r2S5vd;iR57{~jcvLez_B?=@X1k(qB0wO8hDTIVHq<c!#CagLM^oB9
zfJ6VhvebJw<n*}>nWtq$HdCdYR2}qPfkq$bmTRbKvob|?1Lhb8|Jf+6fZ03|UL2ZP
z4xU>z5T8<%H{xTj&N8D^I!k-y(8#Of-n6pyCg(Rh+Gjt6n~LI&qzpwDt%3)y#*#V}
zsZA@=Miq4_9!g3riD6ar+%t&w70zO!e5##SWnH*e&w=;s41tM=?COvF-w>;Ie0M>l
zqm2Fj#k=f1;?wLJeT<BJP7i{?D$lvFd{MOTY5OMnRO^Mv77iqF1@d_zA+3p4QSCvs
zxP^fI3vcX0*E!t#v;1Xv)hf^gbzJW?(HacLx@Yr}yPs3}<Suuf_9?;N9wB>r=)@|k
z^?kwQCc3FAbz}uAbnL?$VXzhdE^GF4D%To~fRmpgZC1MEdUn4AaSDwbv2EtJ^ABC-
z80HY3MtD9JA3Zi0TB@Du_Lz}IdY#Lj;aAW90;s~dre02>B@@9F11~<;6^7~6=dyw`
zOzwR?%&RRw6gwQw-_~T}*PffNnt@!zSTi(f$g_k~NFtV$%J1nf`s6<D!Z*=p4rq%7
zewv|`t5X}f!m5%_h&}Qn<v$PxE3bhc)dUcYG|O{_Z6J60yE)1fl$mm@ng^4c5x%!@
zhf3`6+>4DCDqgP;`Jbhc;i<HCxAYeS_Xmn+0#jNCby{(T6T?xmLtxGNpiQLYTfR^q
z@Q@GvUDK}m_^oM)qw1CUh_gvT6TX}C>1I|q{Qb0-^Fn03>SkN8?`(<L-l_w)Qm^r_
zjrLGJY&cD0TD^Zh?AkN@Y5GFGt5>Z-EuG9%9;nio(;#!pu5awv&}Ss-Qm$OlThXB@
zY2ZhmGErP`?@SmIf1#M+ba8HuUyE4wAMr6N2Yjy2`M?M-1!sEK3m?Y&;Vxp)0yuGE
zFv4D<f9&mu5Bj&(OU3TG&^KUKG@JL?@NM1BUjPwjnxOZC-t*E-QWaJO)YLuED<dgJ
zjv1c{XLSF0bo{$5@qd-xKw`nqcaV07C@l@&O-M*Rb5IqeQUy`DzT~5jI?C?wZkbYS
zH7xyhAYrbMAfn3^mlvfQ@%b_DV#5@`>R4=TTI?}DBN5v=0apg=BHKU*NOXPc^Kt>-
zCUelUzW|MntKVD1D1_w2=gB4_JpPbRKcJ7!!Lz<|Nv$l7!^(giTKkz^wJF#J5MH%L
z(tF(n>!-{YFF)tjbHds2A^lEf%ds{3HpJ=XL1>yVTUxY}1ccRC4P()2v8)7INsmN+
zVQJ6I^d1&1g-r`SeC+yCTpvY<{!U^X+L%_@uA)AKLIh5a6OB#%sNLWZB8{rcN(uxi
zDt_G+6@f<cIM}H6D1F|VvbkUWFs0Tip(+bEe+BrG`29ItkOKA1bv15jU>_raxogU0
zQNtKt#^dD~F?&URaqD*g0Vbm~CLKfEh=*V85`hJTNI#3s0|LAcHzrcx@(%OT^v4rH
z!0W3W^bhKPF7hTZd%Kd5=FcuxOSjkFr;IznsUEk}1b&SjO1T7KD+>K+_<bz1<G$<f
z1_GJ?s-v*K0%V!SC&?5#-~AwieZ(3FkH^>zCkQCW7P0%b$X~0IH-~NbFVTN?8F0kk
zyO*gp{M-?YT4yGJ{dT*&2J1WQRe8O0qV2n0d$`lP1i*<yg{?CHWX$hhqM`O9mR;h0
zTp~%=tLb+Z$3;a5s*I9VO%Y3aHHUzbzJAFHH00bmX*|JiljH?cuWa+NV><Ed*I4I*
zlDa8m!Y^LB+}ANdhzX_^MUm(lSE1e_z(XoR8aMK2iio28ntS+@LQZ6fZ@*CGZ&4qN
z3YEht<?*Z^&rqTJB7tGUQPbW7F#>sqStv3!)8N+J439c0^F8gbW?7!%aBGO}*kyZa
zNyB37;)ycS2PvK^N&=%_NR}I+5s`h;V4eB;P3@)P;BR(r)iN19E@AXEN%3})ju)<u
zsu4-E^RkHlSk$*>v*GJ_XqB$WQR1w+4GmtQ1wG!^UMYQ+YQp9|6aE8xbC~(^TcEkg
zz`kuwNevuguDTg*AhKP*za{gma?w^vw{NcktV|qhwXX^)VncDMP8Dfc>*=p?5Y#tj
zKwJd<nHxV<OV_)p@1-zr`6}DxJ#tK|Tu7eGYRf@6A&o_-9s1T7qx&Y`?57Tfq>m@c
z?{KR=Dy0yKUhrd|BlVy1&D0F;*)!365m+Z$t8Z?kY^lUAvPA<)MjApH+{eeVJ^yG2
z4OsMBsEgQ;Bryt<(cTyq=!)>zEmIY8zS}80cyEnq#`_K@i7xJzQ9;Xe%87?mi}w{3
zm4eLeJ{dfDULjdxw!qe7vHP@;>;>E@Kr{mXPL~w|l%$V|2wTUK82ySOB+OK&pUdOg
z@2HsxJ#VU2(oSp*B@WEzQ(Zu-4vV*+g5)A$pf<#*YS}k69ZAMa?itgdy(x)nd!mYp
zrl)&G3k&Ti^nt_hFGR}{+Bup}KuF5PC7I@r5tQ>wazLVRwhH2ht1Dx#L1+eosVh=l
z`gKtpm)K7{?1<1nksG-mk2Z@eVro5H9Q0ro1&hlcbJr`8*|%nELX0WWrOm_16&rRA
z#pE{HZ`AqYyN48k6(tr~swIv-8IRRi(~=(>rnxdnX|Nu~yo^J4tOIZjg(pMp4#w~7
zp`Ht6YGr}n399nL#LIge4kf^Nxiu*6u;?B=sG**|J(u;z%PrDkbYJU@yAXTUmPeAt
zdzhh#mPJFqaF+M7iKs~SH#DUddoq(m-z%Ys_LxUTV%8_B_1?P)tduOOP%$n_&WD&f
z6adJ5wu6BeNuj{|b6ms$q-Dg9X3jK;jkYH|eMAFHCRj%UBg#!Fr)xEv*nKchk7OlG
zHb6uC6OT*m*a!75vs45Wn;7^K@LFCIe6MpM8BV$*Icq8or5nB<gTIY4G#ql-sXd$O
z(MBXmQQK+AoI$ft<ClwmXXnnG`HJE<d7}XNz7vec7lDtu_Vq1`%1_oUk>qhv7477`
zG`#gAIn8Dli<Q>kl>1}HzACjkhP+O;86&HDH!5%b8DEY8JwsOGBm^-f$C?)>{k@DK
z`=gwH(Ds!!N(^aIvlh^~`E{FS7T&ZRRy$RGZ;?uke5w3!!y+m+KaUZb!*F!AJlc}x
zFOORUnD=vi_>|CP;2kQ+)cry^?n3F(_P1)k<HzTTiGD=^7I(US9&HO}FdbuMavg>A
zWeh%V<+tQr*r^p}aI?1mMc&r~Kz?^;23u+wRux&U$^dWO-2G~KoD)DpTXiA(-dyF|
zKi5-EKzte2MlNqQJ@bu)@uf)%feZexZz6by|KkIg0$XgNV~cD8A43w<n#xkr4w>_z
zM|pe0H*U_AK;X*cMB|3*8#x(ivhXBa0=f^31L4r}oWKwO$sfFc=7@MK%!Jh&4QEmy
z4y>cn32x8U3c5#CItuWd`th(|T8x?%b@8q$c7zY?%2`J|HD#H^T5GJuf3{B8^p%C>
zIpzqvv3BBCK4W09hbZHh9M?4;OB@59886@aaA`Fi!i~=D*-<HkH%{me@KsS~_wPGs
z+jklgtP8RC{*4}fs;RK=?*rc9Zw;o9gaWn|B?4q=t4p$}^&Sx||FH7ZJ@^6@1RR@t
zs5-L8P5y!!`1=n3iY-tINnV6GHKoYO-pbl0OpDC6*ahMdRV_2}Me5dd{oJ83)(?QR
z*7=OEA5INlDX}vJSRgvEK(o<|w3cb|{8!Z9hsA8AG!-~4Uk>=b8iFUpHZVxalX-7u
z!GA!V^b%e%gi|sh3VIKMYC-ehcB+ESC2Q1P>vTw+n}&X?VLc*lc)dEnvnY0s&lleE
zUe8EG&&455ENg$Z_!R10pnJ??Tw2<89FX?>iwNJB6TRFPlb2G1Pb7yT8k&rT>a<*<
z7jCmg-OMX04#GpfKAWb>G?C|^8x;rOug&l;2fua*3ZFd7W~ro(7*JiQde#cl^4cJR
zu_aD?l5*Iw`@xX2e;HnRl-?{FgQU?|-&|U2NNSp9^UQhSSAyubArfQ4#lj)LWXH7P
zM4;jCu)Nm>vb?hwsF`Myk;ck4LrM#_TYk&GhUeB+%A~4(tg7-j6+#TxFeYV=mpYgx
zsO8#@51IV)UR+#1bf@6M)fre>4QOcjm+F=Aha^mxo|*@n*o;j2+k#e&Wn0LpZGd_E
zvfU~ffLiy}KL-47Dmvsglpd}#MX{3qNjxTF%00f<$GafC_4^I|C(06#D^+w!zu@yW
zgm0*_z4@a1{|ypI_uw6WSI4sDJOQ}plfEB;i`;&$5RT0GXS;fJN*vPc3z?8w@ONC6
zFJSW<y6Mms^@KqJJR;mb21IOxtQgKyY;U^`)~fS6Z!>}lG`B;Ns76WbtMv!nt@^;d
zb8orM%8YzNQ8HP7O<^Y0jadd__Jv_pawAI=I7EI3GtT(Wxl6>|)tS|A=|4#_zIW`%
zKrH_M_38G%IsRh3WbD+GmYn9`p!TBSZ4L^`DHZ*g)+8;7zt-!s({$^)Gb*;zdG{fE
zS`WsUWW6*J)p_0d>KSbjaR!yC=H>WJTV?0o`O#o_M`mpB%M|>dz?<UtS>f>?AXW74
zC&$M1-&9{Q<XJi+qbIM&4tSRtI+9Yvr#@0F;BMbY_W4jkJ2M5#ei<*GO$;XA??jVd
z$1P{XJ1-$ta6Y<v6B>0+?=3j{sAXi7+-dmAWVGGPy2!TW%(WFSiEY$`bz`$UefkW}
zcyoegdMd3DPY0g5YrKXYxwmRA9o;)s%XuuG__l+<dYE_PcCutsf||scWUJ)}nCL&d
zW1&7{5PS?c@76TRJ|%ZjCWlL%`j{eK3_1hB3q8l)#}l6j&zR_sYYe>i&fIq1#!nly
zVBlfxoyy|aJC^2c3vN$8U5?iHtYQj8>eq31KFkCUHzs<_156${>}tFqLMEGtKVDzd
zlARK1ep?VVA{#VPuD{gKA3P7M2lIq<TtO;H%8!>*7@GOzR9=6_4Hb}+!p7-oz)9|w
zmGhaIX;)rWZFEA3wS<|9*mI7law>b(9~mAVkIgkN@!w&#KQX{6{N1`NcT*P1XCxes
zX*=@%eteY#w?SuHs2$yD7_j93m&gT=bnv6Xk-qIpr?Lsx$r|!LG<^DMcq{upT=>)X
zp{Sk4_x)hJvoMn5hPa}t6|TL%0HW2G!m=J4BPam`b%UAs*U21lom+|Ys=nJD)f@Du
zhBT9okKKQNfuS4IRrRT8pejwxpX<ES>2}{7dD(boXtbJa_bhW8n#Uxw$oHlg!4<gp
zrM^R5S^=ZGXL@UAlOP%J@+ci45=7G<)bm{bc<UE&_yGN%;}%_tB?OI5S*YTFbZq{8
zTnB-aj%a)s-S(kl-e^y<c2{%a(Pw4WeaahXT#IJbTztl0tMlAL)3~2jAEGemuZW&r
z^KCQqC~_v<z<YUNh&c62Tsy%We#BYa*Q4mi#>-!VOc8!#@X#v`QrE_Btcu|@EPBsv
z$J?bLHV>$u7eAab-^Zs3w74?u(0(?pLWZyI>1(Pj`Q9uYI%|Pb*TZDbveB<IZ`h;L
zs?e=ylQFy6jGl8}aHm{R53z}%Vlh4a_OPySzg&pnf?!**Mnd>C&I^C_hBV&;TM(QH
zx0YXpUUMU@rP8kn0@v5sd4`FGTRb8YV4?K>{((}?Q{XP8(-eDc_F1ylM(G1NWWV;9
zzwK-hJpW$X*yJN5W7IcjcQ7?r)E>hUp`t0_Mqv~jf^fw=A7kRdr7Ak!{1p9<As_R7
z-uEUG4&ds_yeLhZ!nSa~o7I8;x)=}$<BfKC{vk}gPt9Qham?`;7k6sxipQR$wzAQO
zk1REu!ZoXt1sL(Cp!@S@gHuDqQFe+3rNlITs^AKDdK~gkJ+hwm&0<W_P{q;Xcz(VY
zvAx??sz<c7@qj|1j2`NAF5%=;b=J&CjA@hYIG014|KrV`sr7NQ+nK8OvzjnWWT@ut
z>CW0=F3(nutiO@*r+(nXASSi)|KtnorLEg&(dy_r96u*Fd*L{XiDoMQi^e~D`)Ims
zzDXn3j29++U%HSg>h4eZAL40htLr}{e*-|zUW{Z#;yh5g?jJM2?yKSEW@Z7Z@2Jxi
z;z~}D2+6DW^Y=gczO#?f8TX^ya8geva%6ZaIyV@1HVpv(){DwES$ndZc-R)teyjoO
zv3F9Ks@QUcbp6iwlBxB{wyeG8eS2qbJZvCV`)fu(!6yxbj9f65r)}QqAIFML@_e-p
zdXLpR)jam)I^(mqKI?iTngPtQeU~c7BY)hpT;PlYn;hNr80fmMN8v4WNkF91$I&z=
zus!3*Kmz8ZpBRD;aM1uRESm>@difTU9^vxpfG24AR4o^<alr9MTEUfcUog|b`f+uD
z=-AC&9((?oqqR#4-WFU)i%)U~i5Nx&UY&uwX0XF$MjvZYkz2}3{90!;$xMO+8$0=i
zZ+~OpcxcT4nX<rI8O(xl*>wB%Q(?ms{I$`tm1{Sn>(eQRd-^)36SPjpQj6j(#28NO
z_;m04VnmD|rHv06aV>>6H+KTX(QCW_7<+D}cSeJ5TaNJgALdBwnSv)}?&Z|ti53<v
z0yx#%jMlKP&y5m>AN&G0b45mk*w=Z?)sxR*e@?2Wo%NO)f9#%{N}Q;S`@Q>F%XjgI
z2M8l$5yPn111lJKr(RPpZpvX!liO5P45?An?vduG?e}<#1S*B}CpQvF=D^b6nngaU
zxnN>Sk`#lsI_1Jt7heQ$YZ^v_O)mRbFZ`<RH5Dc7iy1|g-qUh$V2h3{B>(h>TA|}x
z9_&A!jt$`%!rITIercpztHc?p+=7uY2pIW)G8Q?f|KTrI{21c?cAPTPT>R1DjHo1E
zF&u>$@i%-?;m5T39wOz6WGFpYp;6xhQ9p;5p4-iq#D=~xaeIS+K3$eFVBXDu!eows
zn!8i>cJ$n4dSDlG1NOM$V^;m=g$w3SgqGN4W8C#Fki3eS<I~=riOxW3**_`Ys4k?o
zDI-Hje3Ttr5ar4=6{&mn73yq_i?vUb5{-=&Uy8Pu9!~2ldnLsO8@F<l?%px+ympr?
zpY{`;OyWei#zn;~e|$ZY2;)^$*LX8Dro<oM>o?;AF?h~F!|-&D;N{o^Ogf@Tt2e<=
z_Hi<l<wG-gx{BsS@?jJV@Kx(camXW~jZCZuTva%$)~>I_Ng=}Q!dc|ze&myMJ%#)B
zjEs>*`6U*bUNGIS3*d4Na5w`}<FIY9L%A~GiChx1Y0I<eegE2O{})fHWjFHZB&3yq
zG;F#Q=%~V}6vTV3BEH5as)~mg+SY*m7F-FFK|Z#VOS^Ytcv)1CP-i{A6k}7(VU&Ji
zmq(etX_laqkZs6D_Lb4|o74bs3(t#6__O}7t9n*BWL(qLKl*_>Oh1g{iG?b!=2)iX
zk_@zx@ifHLuOiW~UK+g+xsXyE!1>EWELNQqOqwg=4^0k3A60TEp)DT5xTI$Ev<o6&
zg#DBC5NBO5=WtH4`XMu8v{QWC0ij=!)JDfr_%KHK#l#rwwq~p=<Wan4>QJ;iUEc^n
z7^;Pn1$SW*(Z<^t+3H^jF6d9irVnv=pEj8-p%SV`WPKk3&UowmThTqNt>}}81ych4
z|A)P|jEdvmwnZBVfk1$u!QC|w+>+ofjnha77Nl`#ToQsy0^LAxceln#&;S8~OVbcs
z8-jcCdhhf0+5dgd*yp`_$9<pf`_iLo)EHgeRllmW=2~;jDTTv`fGJ+`v1#-0B5$5I
zbQcdj-E*$so^@E<yEHlT3<AJK{RDVTY#!dp|MoCP%W9dJNd*K|W0G?xhcg#$zU4t8
z8#|hBd`aVJpGVB_rFDVFgF#(20x~~na-^eNoK0*pYF})#t$^G&w*!kFejRtGyI8^j
zhdWL|Q~L6NXL~M5YO{XDht)aCDYF3wdSz@}J!3*3_@(BxYjSg-L<^K}JX_w4pQl~{
z;o9n<-We(%OcS3(8wqz=HiJX=*l;+jkqt{D(ueR(@1*5foAmM(h>@*GQq$$35d>nH
zNLp(7Ig!m8WMOyO<-7z2gLDKBSvNK}>+?sco17O&%UcujXRTCcIoX+kWC{cHG&lt9
z>}Z#Sq*>I9d@sWHy=#pT9I9_>=Zz=6{wRyCftyw`arVGChk;NQ_EX&mL+D!8cm}bg
zvA;LBQ3+YWqmgDcRo-L=WLJpRg&zlcKz#Rn$fTm^1(%5t6I15u-7RBNN#swql2IZ$
z=ONPpkF{CJ(=f8zb_ykO3A?dj;y$5CJi$Y`PyYR)%eRc($$JrbK0JHP4Leyh;!Xn}
zs?Un+9k?mgviAnwmK6N*u9Mv>cxo^pSVj#isNcIN+|x^#OitfPe-4@n4E&W`U7+Mm
zT|}j7BBomitzXK@T5NYXyT!pTka4DYv(NfKlo-J#MgMiZ8=AWkh3gEE!cB~HoWer|
z@E5N7wW>Y&Fy)>}3qu56A3FJ*RRzLi=cu_WSu4!V70pFp*R$u7H67DWl>*i<Vx6nU
zn3Cuc?L&F9nzw7OxM!qLqiRM;P3yOCINj3I`puTkQYFKMwj=SuZ$=}sfoIid)4w=K
z&L;z3BHruM$+nT-g3G~*I{$lj$Y=^=+*T+S3i<#FLg)y1B|og<8=^%M=I9RYP1ki#
z>WCjq$ynz}+BW=zeKe?vH>@!6>t{RJV~XT3pBOF}3vB*<@D4X+=a;PS55Aa({IC2{
zKB@zWHH5$ngK(-9dR|@?>mGE1$Di>8n(n}cru$0oSC*t?UhbOPhEVa}NrmJwbvM$z
z?<qa3dD{vK>V6UCE)Hr-kIQOLfuA08qc1iw;(Och=0Z|dkY>ctEqYuubb7Z<&wNJl
zQSd&xP%S~G#V1;ngKZDF!f1QG!q`8nLv&cLp1TUK%$bTe=;xc#Jd^DlISn9AnDQ%3
zm2&WhbaZtyp@Z~+c*au`Ihq4TjiepTKCvZ$b1nwq*EMq6*2+yX*hzyn&v8R={Bpm9
zxY8`_`XuBXT8?jrrbhaJW^JGIG0h=MQWjbq*zZVMRL32Fhy<WWA5Nz@h{2>Uwk{R@
z4602lBwlejLr64-3y|1fn>J61tt`sU7Z^T(K@l@uR?dTBGGDO;D8|MbT;ULdpBoH<
zZWD?id9lU}X67Ywazdk|$Lcd&a0#*Hw(7Jw8Jv7jUc{hIzjb<j?|`<e6VJdk53;B`
zVe|9U*;v<Gb1Yxo{)s{BgA2DhSn+1tf^c#t99dDZRcYL;EvYG@_||l1vH2Wc^jNM?
z*hw#I^xN)*eakw@+h`KsnXgu#q8f(p(~7;?zM0`@Xt0Y}x|E)`44R32DjTk5k^fnI
zM1?k7F23Tw(Rv=UlC2#!<)}Xw<Qd@B_ofMg#l<tKL=tZ-mW1R|)sI<NgCVQzY_jnt
z)qDYaBOprZ$(AEXKDWjOXq#KI)m1eWN5ViC0<4%<WsZ)^eKh<yU6f0*$2`u0e4){&
z?Cp12nCUdv8$rC$S*WMy@}^xCgnhF;VA%(SUz$#%q$M7wL>=**GRdP)i*721$}XuL
zwwdYSn!oV&DZM{IsGuzrKobg1q6dUwt>bq?!-%91buS5i5?m0@5R{s7oA-g=qJ{RF
zx?%<7hK!FWB8DS+=0ODz+w1xpf-kKNKZqV1+2Z=*uu3msB@%s%Z-*Qd&Mcx_-@G6&
zftXX-_8>8ek`g`1VQ{2n&gmHD);Q>fe8H?!h(3|w$%2LKijiZETdF6GSa-%rNB2)4
zNYm!@WPA3rCGYh<oP0sP*Hs}h^Y>)nOM1L;D^;25+2-zm*gF|)bk8g6w0qmf`mb*b
z5up5EclELU-|dBb(#Osnf<LsiSnYeymVi=9ps|swqD+6fp6bvYv}w4*C~W?!5fRJU
zzM#zWc|v7FE&ox$lBmAE27dSdR+#Z`Kuh<xa;yr=r=tEel6zQhZh4DNI{6<yv$3}$
z39SWHGSUOzy94Mhh=~A+V2qf1JaenR8PTQz9!q)+B41-H0hSetS^x($p+o?>_rU?!
z7@Up&EN%U*C`21r=}%HOUO@X92>)bvDC7Q0YonI=7z?idt!zfj)3{I~E-6cQj~0N`
zML1&OpH`htBu=P%TdO}jnliukU0(Q52o2{ObXpfRNmy&O{oJb0_OxY_V|$@IbNyV;
zld+{SUNL$Esn01!SkCy6%dC^B^YLY|Q9=;JVMWqPIyo_09@+J_V)fm6(LrCr#{Iq&
z>mKHwZ}gf}fg<+eKzNcuxQm-kbg17?8Bx<YVeAzbIdh7}0+SIJf5E~+))DfU)l8^$
zVN`)pRSE~Tt~m?YhByEtRKXWq8-|vEN52LA|L;NvT7uwc3$4?C&+UJFrCE4`Y4%qg
zfqzp1`>(tZEY<hHvH<#V+z<oZ;wjaI)88(-2+#G9(uo;j&T|d6?LL6mimPiRLxp~M
zmdfG<GUv+k)4LB0?}LhuQk7Fr$K>a<UM5~fYDKIuZ6znP2LwVrYwuq&RO~%mX#XfK
zXL=v8_%6xx1(nk;haX0(W0#<iPn0b6+}?({b6$Ir4(o31s`gC~;Qo|#e|*4^9nZTP
z;u@tg<gyq9GLksSW&6D=95~(Wx%US^tj=$H#^h+(dL9z>cgyi6fu);nH1RRUVZmnZ
zfo?fixWI0zp?Q0}qxDMv#dZcUP9(drEIp!}CkNimKm7B&-ox~%x~6FNg%2~I09p$_
zM;<vI>{;t*10#5mw>x9rs;;t{)CCf&tUBZE7L)zxaPUC89H2u2hyu6#BIuJg6ZiNv
zTg3s9C_d`sh1*TtID<ooD3ys+2e^V01pWZ<ngi}4>O)<a>2fjYa|!Rbd0110>Vl*y
zws&_$X4GQ-v!JO%CdPD3)T*1~e^2+eq&HIif`UPBg(oH8f_CT+0E?X`dnS>p@#z?)
zK-Z*XO1p1K$=z)C{I~1zL;>fn9m5H?WqhSTr1HU<WBb#iEROA_-;=hE*QbAH37PZ-
zEO&pKIcK%#y<LkO0iLe~0dAy&fqcBT_{PDfja7~wu=TC<ouW&}hhx{zj)~;&{DSh*
z6*f^gZH_w=SC&+_kK^^aQ~S6XXkBl+HdT~8cFdK^|1KwsK7qH?(T3>WdArDw|Jei2
zGw#4)Fz!8n%OAipwDgg}UpplKDAE#kvN?lJ2?O>FA2O?Yd@O@ODydsRt*KEQbLK2j
zIL6AMxBx(CBwbHvBn=`jpTboMNG(#^B#|Vj*!m*1P$&!GhY<D7-m^HeW-M(1erZ`n
zrqv0}er$OFa~s;>P5!DV)cW$L+`&U@2YGeBeQm-an&5p*SBdqg3RPIyTWgVDTJ(Jv
z=ebaabT-;GCNp;``i-hIB@xQz+BKjuGg2<bfeff!j`Nd0Q3ydNE8hDteQdN+phh^_
zEQ}`0hPrPF0`-)@kG|UnajBG*XffNM-VPj*M7VrChQ2(mdZM!94PnHX5k8&R&;Vs;
z?j?(DSD<;3t=ITWza^Snoer5vrladBul*>aO}^~|zcoSmst6Aq&K_`23dI?V`L`Nl
zDjRR;|72A8MfL>~YjEs<_NC~OIP?LIJKZIHU1ZQ;(Cu5?<ne$z<LX4YW5n%@_eyh*
z)T6+92l?ohW6`_ESS^8I48Gil7#mpKu1qT~c8DB840!7|0*jE#7r~)^e*jDOHyCK9
zis$7HPR{-KN-q6>J%F#91z5Ij6;@8XiuZDzS$d4M@%#%0=P$b6G{T>Z(k~&4p+};%
zG{Z}e-h)SpsM(jqquWQ(P5DT{Cx9gd<y1+m>{B#>;`R({{P*_J526>hIo&U%{{Rky
z?=dW85Hsg~C-+hT(pYjNM($>pH}M6O%8_>WIRPO*q*)A9WkU)wCZpLXD-nh#mWvHs
zBMGaQS2vmM4)$K@O=xb7J<^1y_+%Kd*giIp-9lEh)Q>?tFMfomJz%sXvM2np-nC8v
z<3WkeYO-e4>;@u`v)kz^l}6ykQqy`(lzv8IWy6AD{9#qf!Bm%zD9l3vw{F&eby6WW
zZ(3)7+lCX6ut(Xy2W`g*_dUTV*Ak0#PThGuJ6%{rSK&GWQEylO(ZI#4X&do%z`M>x
zO{PfBMGUf_Ilw=+>a=Z>D*9-v>*E1Nj762BochpaW@>AaL&C9wK5SpKBJ1lj#hck%
zqq-ljkfNMbvL}3)32WR32BLfE^5(*#nB~1zJcBCM{8PQadtStNS0-&9RfX_+Q)7X1
zRrindjQQSSt>)KUyICRy+<OsKN~e9hog7@R%k&DHjhrJGNJ`hr`^z}+V$$=MOi`(Z
z&rZ%PEz*nH>t3;&)J|N;rnlOWL>v;Xxde7pcrHH0EN>H>uM%_Z;qEXogOfI-9SxLM
z%10W=9ng~P?g!UTzk@x^bCc4xtEMi<3ZCOc>X&AE_zWoy6ADQ2fO+fW$>&$T4bXSE
z-KgboQ;(vxu0hzvweGz;W$T<X3)wV8K#@7;!s6y_L*Zk=EdDhE+r~0=!CxiH#o5P?
zS8F6F#r!NeYGw?!w1&r*EX9%%VrcNb+wzN%)G(7QDVHwF^!gM60NjiKY{H>}b8V%!
zS5svMGHfxiaV17nbgdE+?AsKRai)&D{b1h86<V3<&QJdLRlz$GH}qKq@-qu^`VMdJ
zjmo6yh&(T<XQ;_dJ5A5FHCu*7Sj&cm9Q7}p3o_#u;EvJ6uY@Nw3TiQJG(Iee<79CG
zI$smggg$CHU|Bt=)_K1W7a*r96XU=-monFc8aN7gCRGvDhg>V~WWC7ncE`grTI|{+
zr{6pr@+>$H#XSTtwE&{f!<1K7IK(T7;f_LO>S8#4Xr(|%KO(?rJ%s;+*s$~ncYw#c
zp`jZeRKa)MY!v!SbSYF?iU_~}z;*4qy*1mzi#yFsKdi|ZyXZ`%acN3XDJ;R^QW`D1
z>q2-Uf~~DMk5T8!<xGQVgm*slQaG|-D?Y_oSdL%cv-QEME9<vbCQ`*DbUBHbp#gp;
zx2y2j`r-^TQ_F0I9ImI5pN<S>u2-g9np_JfF);x&5ldent6j_dQUGu*7&XmZo`9f&
z+Z`o08zI|IJCoeO7Ws`;0d8jEA}&w6>zLoUyrgk^LKxslPpcekJpxbU7fwn_`2M<F
zldTB{tq{86$+Nt~YaFi(Zy;+1HJy%6c{?G7FMlDg3T-I+8z3y4sKP)`R*iP}ks0eu
zfek7Dwn^`YzNp+!05PGM%py+rXQ2k)*(sU@NVOk@6<9tk<ZhWNcvqxVq**?tssy8)
zEaDnNmx@Z2JW{_#icJ8?F6gH(gaY*1Vq=WPP>pbtZY2n_>vONNmmM8utD3~?e)m>(
z^_7JIGtM8mjU&GK>*3PP?7|qt;~PFSyyX1s^F%L6kDC_b0n3bWK$d@iegI7!^ckh5
z<7TSrc(SHJ6}bgnI}vxAJ)P2|`=w|?>CQ~}CTHm|WeojJ%wkgSkaMe>NaHBCM+$@T
zipm;kv&;0)6KzKayQ<q7Tlk7Q%PA;`G>h@(V(Nal+Ga)pNaYc<@fOhnTn-R(b+{P~
zGf>or+H;5Vu#dZf!h~U(WI1NUiBk$Yp`t$e-`N@DJjfS7-x!#v&y`HEk^|Y(W;(EG
z6?wH9cky_LE5$pR9sx@paT}Z!xlDwWmW_2grLf1X8rdpi%b!&TYDvA?jeq`>*HBbn
zv;ama1&lNE_ye$OF3L)7F!USUh@u<_S^BbnGdH}`)%={EB(_b^c_zn;g+2MUj#%*|
zfq;v;!ISAR`@P1*_TlC;Zv*xXk~D%Dh7c$CNs|X#6?IPH4{~94j;HDxZ##k$jk#Xe
zHe?eFqtzkSxT^RXaP_-)plm>`C!H$2tT%-cF9v}9{XK0#<wX)GnW82dl+0(JW@I+7
zvnt7?IWbRGFrj_*cqW!KWJu_9SN_2*_`^DXZyOesdRC^C(&^;mFq=tz>Q1QU2oytq
zN}Rb2u)zce++FRn^I#>+5=g}TNb`>PF<<E;MV#-ppt$;Lrp|lMs#FASWDD$7gGra`
zH;;{ERzXr6ro2igO1Qw1?s0aLLrr9bsJF{Uk(2POpPepJxIUgMF0uqIAx7_7`~yHw
zyh#j14Uy@7mDx@q56yj6+rujU7TIRL{6|m&{JUuPF9*m-qOQbN+*iLSgWqSb_vY{i
z=uj9D9IZ&$zvcv|R6m<n@R?^}M2~B^VY(Bv65bwSw76jJ|GTfoctyP7Qr+9)b9%6X
zC>MbQ42iUIB=6EPydZQ{ZMOD)DGn9ez+^-lBI87swOlFXmwy$bf<Fx!k@^FGeUqK7
z+&fxzaUEy)lADQ3XY~w+e~i`_{6%zSno#QMVVM{qkDb8k^5h?Y=#9XpLW!i;S`q)2
zRPuJWNOquwDo=%7wZ1+EKy>p7gN`yhbs(`BQyM2&_EG79k``)!3(h4H2)m(68Xmia
zMR9P)y2NVX-D;WGz!&UuL{=<(U2{Yx!fNHV;W^_{-5$UJGUhr%q4=O6OYP-^O%{XI
zstrr(Cxlj<RMW@nRIIw>hHeTm){zF2ZR-WX)~F|$?#7>aOZ$tmkkAr>3Ku_+lXp_*
zSaONT<*S{TL~OZ6!E~VGM9-@-zT(XWDlL9BzLvuVpRp01b)ehg<;(&GC_zz%YPD64
zGW%6eM`GF->4amV$@5mIO~n=3J-n!u47as;678OjqtcE8s%olApPogE<TW}vp1aMk
zH5n4#u{~TKJ)DBRL1v$&@151ApI0U5^_S$HdALI@=o_GWdJe_wR8t;@F38Mvipfza
z=M|8=5+$kT{1nTOaf*k4fC;|nuBj^<lBdSg#$?X1r|Wss#t{|FQoGWKhjy5Ae*8Ad
zb(#zCflIz=r&I68jVO0PPp|5mIdAT(77ht1Q$u%kTUiNoZEe)L<!zAqgsI3t;jiP#
zs|pKmcQ=rurYT5Dbf^N}?K!$4-Ci3SShy&yhJ8}^0BeT$3C4+FX>{lVNkkOd&>BE5
z<-Xjy7a-6XKpekT%C_{1r?4QP6V__Kvj(A*ix)4Q3V)rV7BfuTtjT6K)8fSsTSBXW
z9V+k5tQGv1^zL64{9p6Fe>p(GBY+X>WkqDfnH=rwx58x<MOh*MAW67u2~Fr>YQe?`
z0zB3M5LSQvB1V8Vl$;lLBgEL#A^&%e!oPeskqiRP|MdwqR{6?HEL-GjW=0kl4WC3C
z|J&2m%a+qzmYh7jExotOyWWGp&x&5EvHdF$e)v9;<P?$clm16&)am>Kaa0T8ogX#^
zsXwxxy0+O{<b(I;qB~EgZFt%`HPSeLzvuZSt!>S-L&=@bR9`v{OxoiQ6q|e2ZSqz6
zstc}Ldifq~sg`o)86M(cBwlpU&TiG6<C>k9N|PpfjkwJFN^!k$(tc|8G^;gu$%J2>
zUFkArTwt#GLDTv}3Ru0Dt~*&@QiYT%qSZE<US52^@<HC)60EV^5Y@K8sm&qr;hmh`
z`TDuqqC?zO&t;FJ*J;HQ_Hp0O*$>+n^qyAd1hOU}bqMPKzB*JY07d}T!rvXR(by9=
zaXds+pkRDgZ!WP1%<+R{^&bSA|8Z#kzbzm92X>Vc=t?~Hrp7;xxVZn4N1Z7n+e7c~
z5v;SjVpHjTRM0ls5$1azdHUGi9P5FPqHkE#=@Nsv9vb5x%w@v6QfE1J{&rIH$2>cu
zQ9;SPL?3hwzBsw=(Ftr(*3{X{l>Zjo(ax;RaU$Kt)SjLC1JDV{FooxY=cu0-f)gS;
z%L1W6dS0D5d#N{jALbzyQ?sM`tbeJ1{2#@&{|D0k?`|;W^!AHa-!t~<w=T0bF_!sU
zKa~<K`yTB?>(HK~pN6)8tS`Cfk73G}0LT-9FMbI#N|VczyWi}+lE7UyE4mH89Xg}|
z55zCfB=0XiKc$Wd%2g3kMX<w5myzAo9ljaOL0bz{k!`@jwb=aLoXEQK!bHMWwZr^3
z@s^9dHvb#dQ$sMK`Q<xrLtVv$myBe&R{OZ2ZiMod^ej)+Vqj`Bd1{&D2x!t$C1P#a
zg!9zaGyO+fdy#Obzb}}k;Mn-Prh^jM?nD8wXp;lBk@DRzXu9wM*q*%nZL{@8d9|j@
zh{ETid;dfkO%V>B5YQh(B+c3w^3s|ES?v0*_<}Q<xir!QfT0Wgg+(Tg11zf+aTltJ
zoVg<hvse)7c+_YWaT5n^&4Ilf#mGgH8c9Kg@NTu8>H%(3mPcW;{3aqL%8rH+Rk(9S
z^2+PAE%yRZI%@U3l@Ekd2!;2xl8p|Jh2A^45?=Dxuc$0GP6vjnJFZBI*H__S8Yn-0
zt;^R=t$*`{&ECU&9I35296beKDPlB3D9Ua>h)U&N-`)FNzq_l{T(@@LtF)(%Qy@qZ
z4lx0%v$4)(m?JOQ>)|qbDK4rK0<b^^7XP{rv%EHpJ5|<m;HQx_F;H8XGPNv<r994x
zp6B6idn0Y}3W#i<Rixl5EjdZ1h3MeNXK&KHp~)0^ERx6IW~oTeq}L}V!^1S5l>6;^
z32Qo>o`;ezUhM=?RK4%j^CoR9H;64WI9xF-fGG#?pp^=O5+(OTxI1#4kdEWV!pr>G
z^p%D8{h>9q?1}HdL!7UldDYG<tT(YyC&X6NA8aSV_lDRoH#8n+4`8O8wMe#u;WT(V
z7>ox10Ih=0N}B8XW^;zmg8o&}<7Bu@TT9SGsW|Q)Eg{{0F1363lj%LByGe)c8qu1-
z&w=O$jT$CP3##P+Ya|#zE4l*z^?`*8WvYh91sMsUyVP_!@v5%}5jm11XCNOQ;fsy7
zd*4-ve0QApL8SenXJ(cT#AK@Vh11i_kiK`dJCa7G-ylzRZXR*4AL()y!j(lfx7yE4
z$QUF6ENGiefI{#uaMv;e4%k3^kQKJP&pQk8J8HN)=4#*?cO=*xLwV;+ZT<l;7Xh|t
z4y0R9-HiaO-kIAoXp-MD3SrAy<>Dg4nvv|Y5VNqJ?YaYM*yfq$OcZ1wdUAWFDf6`*
z#C|lIvpS{^A-X}!;fz}$j)L}N#`EHG6z?h^Cf8c!Dq#ub;Tqv9ZIoM>A47HM?Vg3<
zGSVt6y|-F{$-$^h)RM{!t5i7$1Q)ZGit1CoyG*~y@tP}P`>a%;<>|hlc|6LRp`G&$
zHs#9CYU{{Sg#UPLq<@U8*9}fs(!|KJl(E{Opv}%OATD}83L3CV2XZdz?xxx<$p%g+
zOgb~x7`a$POeYMW-p<D38=0QZF%)<+0Bc>y>!__&*Bq>lPYd`yQ2lHKn==*ctz~YT
zW-`H}dV2KIbx`$<mNb@Hugmm}{oTq;8w}&P^jyX+0>E;HO&KQY5PZs$UPaSyv)|jE
z26uT(#o0!a=~ttT7sFO6h-urf(Xh(oIa8J&*ZwC@*4*6Ps+P4$a4R8Rt~&{8#a+7c
z#QfX2<RZtXAOd+4;lA4Ct?btQ;J9AHn&u+*Wx{w>16?RB6Q{neK7&WeA<R$vXF^;}
zp{o&1RE1Vd5{y@bYnWU02$hj?yBSn<v9MH>`s8(`awtkkREvIacJzaba$!1dU1+>&
zeGH5liVGP}!271kFQY@+7K|E!ku7cOYgtjB(l_iEH#k+xIMBVFGL>hD0YYcE=A3ZU
zUBw>DQhbm!HWQI`^fe`8%FyDQ-*QVcqo)#|C@yHVw1ubKHY5C&FFnS2PR+X*Y0>&e
z007}@t6zz)!RNBSylqOTMzyj;sD^<FJ%;IBz-(2N%n=`IJK26l%EY97(;n7ZW!o;6
ze&b>iD>jiR6c+9)G)N0)xL*+a0zp%bRzk{pVLOT~Ty11bG%W*aJZML`%H*+qS%IOp
z1g#vO$;Re44VuT}?@jz=*!GDDnx8yDGz?%huf2F5IujGG_(N;o(ML^!r*W~f%J!>2
zWNX-Tx8oF)LrBwlx_*F-NI)FbqUS|@aCS#NSh_c6=Fr@8ZC|4&Tou_M+PAXXCGOSX
zAO4wE;d4ZE57!UY&kP;eU}FSud3RlVhUvF$TdHA}V*~$Y_xURKYfI=AAdqPl;l4Q-
zJVB%vZy-FST6qGtn^WEnQ|8MkOa|L}hx~%Sm<;xoj1Eii41-`0OC;9fRJ0sm12$NG
zt|Yz%AqwR9y;%z=+u^!4N!=8e?noB_cZX77r#)vG+Iq{Rbjs|yxiaal<LHCQlm(?2
zz9z+2HPzo5--#evG{n|+>z)=zS&!8ClHF-&z0GP}+9J3#j&1zQBF+3G_6}r>)b_h#
zCXSkZ7zlcAE-49@sks?2c;XKWrt7vF1x>1oIgi0gBwqelWDlUUHKI#|J0;M^a~b9r
zF1@)Z*Na;x%UFiicOHCB7@1E$oVkgr_{1-_i}k33u(cBO1ZC$bGf3T?!vlJE0%J*Q
zhm4V7j2Mcxk#I$~m346F_qcTZGTkgG{sYKlQO0;z##efq-I!-E>2eH=v&J8KReS+8
zoH??tnC)@uYH6->%#N67aCBN~iJqw8Hqum5dsf1wZd<(6{06z6{Ox<k(?Si>l9cyb
z0XEMc%>nRKE%b(~+%15IeR;^`?iM?J|GTFiZlvW(!=g^DarzmoY2-o|o#Yj!VoFMU
zuh!}wS$GR-&ZJB_6lYe$T0pWsr`_H^Bs228usJ2Y?pLB!!EfA$jH((N+Om{oQnI&c
zCs%xIYRRx(V<5mCvIOk_KmW`JzH_gktQ~Y^RkvKuUlBYDCZ4w0ep@~r+smEt**Aj2
zvSUHxQipYuLb+&^lk$?D)_Ek&*LD1xeMgHMF(Pe-MS{aI#lm<6a&0?x*2<&YW~6DK
z94I4kqm8sWq!7twlmZ~0ne~NOgIpMC0s^mHdrCMcZedOfRWQ4w{_erb!XnAfi-O1=
zL`&7eLXcF#y1`%5Mo#>v{}f;+!#?|GKLca>sQV+LV)kIJbN>hN_>Zzvu11!#HniuR
zKQNb#<e&88zcnPWRNKW}XpXmgXO^VUqAd4&SmUl5j}OU1r};mV&!+fvxNF6+qok_I
z&-h@0opogCqhF3kcBpu|v<Q?SkN7BNGaN)WrKnUcerv|L@q5cx#)Jk}Cw!2k9xYSJ
zo#4++{kAYQ95lBqI?-^b_{|2mc%)C{E{*T9)oy1^MiJl7g5OXtIT~k0rmvx(@bZlO
zKl6NIhV0+YmdZJkB{dA;i`uztf2TG7=i$FMkpB4)|63UF9|}(4mO%__d%{oG2~f9T
zaw6jXLE+M88+UnAT|>tEE6Uy(ewfF%L|c?&Z4Yq|Vvezhw_9Y}N@A%m7iuJ2g9Ivd
za{qRD+bE4DGM?PB8#CbLSA@>4OiNmR7qomc7GQ8^{Ub-hN_LiG@<C2j$#3+E5N&dO
z@klmxUj;!&6ac*DyeZxdcrhbzK7(I-WK>d>UaImJB=diucl&>Hcd^UHg5Q4@o0S<o
zk{ZV9FkNBMVdywsnIOrP*u&6I+5T}8*2vU{-wpWnYmHf2S~`&Cm<U+*Fi#Wibz5;f
zeCN7>9L#(VMH)5OWC8=7lU6s!a}g448Buo*FA2JOn7)YZ`-)*~qMacDKHF;GVbatd
z)kP_|`QoxcYGFH`nyyal;bv0%`b6M~QK0`ga~3;NjOq`7c8}!z>y3*k#X{(y5PL*s
zs|`yMG$|upxxC_C1M|Bu6QyQVg%|^=whAfZeX^ZgrJ<}rXshC8ver+pnV?5`wo2h{
z-nMcb+iE9etnEAF{w5Xnc3I1QeAhk{A?%49{-;9`@6mvv&9{n4&!o|tkEy`Tm^Z}1
zSn_$gd2uIp)1w((Z5Pkj#_F;rvZeOfcm5{l<CXUKuX!vG{rnz18knjY86L7h_#{0m
z+_~?@1`r9MX-C&eTe`wu%WE>2H~Pt;zLCawwADo(4V9+L-D$th-VUlyeOQevQ!VGt
z6t(736FjtMU({{$gnxAa(_z&&)Ea#Z9#m_*R+(&SJUeF=u~I9p{^=FYZGCLqp;10g
zL~T&q4#wjrbCmo9EYac@jaO-v`^<-?P${zp%O}h2&+_ZGiO_fu+hj924P!-Rnm_&-
zfpIA4qYLw+F2fB9ttLC&;=hG<I0^OwQ$%zI_8&?%zaZ7N9muBd*j-)aE&RFL(|D1;
zsCt<S_a?-*@Le9pqsEMQVdzIa!%JAQ6K9%U5j%M!hMzcjRjL<P*3&j<FwtVGhOXYW
zAlGTlYVc57Dx)RVi$2oo;yh|AvG6tPAF37Xy5B(!E(>jJm`|dL-`Qe9i@4-n^+Iu|
zQE7pEl278efF){~z=b&G0&nMx`u4y}$mkYAdan39L(7Sty_rSLx-P9!(F6{x@^50T
zT;4G`kP{qzrNqrG!S)5!k3|N+qaBQja({AseA9k)uiedF!|?-Fe-<oF$PA<Q%17Jz
znT$B14!2KE@0E^y)(72JTYeMi<@^R26b0KPA=@ran!5LD6Gjo5*Di>imEW|1AL57U
zqtZC$3jYAU@Z^`PbU`sQ(3=FIei7$1oX9N#mKHbk{5Ad8pHlugB^anuiIVSyHTBEx
znw8a;?n^g|EIf^q>#0-MXEf}@gtVgc^0laL0Ks5X!Wzp+WT=#*E>zE(ciq8RQ@raC
z=&tKEN1idtd*#9}$EA3%&DP&Ib82aLD{Gq#k*@Pf2=U?{SG;Ew<<H^+d3U|)H*5?b
z^ZeGsM&0>CJ>Ppcf0z(q@e0AN+8GslB65@47;88F&_Hxw+?nsYNaOE8qK#jZ9dHkV
zO4__Pf@Ej1^#*-2H5EdWlx=C!v|ft5bdDp$QTa1_+%siO9NgY^rE)?=1Z4|nd;(S8
zUP?mZArmtJj?crH%hlV`HI-)73?U|iZ9XpFzF%p8CLc{QjyC=d>eMU<645L+dhFK4
zlObGe*=dzYhAn!DJb$_o7}POzsu)lZwetu+s};KUaZDi`D>3IRyWxcV+vB`~Ciz!u
z*K(T-%4_?2WtEHCZ>p<*3X*ND<E6>a+##dO5Wmg~{03yVLC+UcK6UO^Tf_+$!_>UX
zdFknOVtu{64b>KwL5>rSd;5;FM-UMXQ@2U>?8eUyN#kFAmmhvllnC=vMwy5KJ$`<C
zp>L)(u8zs-&B4CWkJaMSa;XU2ryMw)*`^EG)H82Yo+3YsXx`;tt}yWIef>*#_1(n0
ziF%N<EDm9wQaG9nWnLl*E0T}`rT4ymcJ+1<CPJN}ItGC_UbKUkce~E3<G0icrF-2r
z(31C&=;^lL<-1$)*%!L$;L-lyhbGm8sM9n1u^@bx6FtLm_EZm4XLZKmPnQGQi}ls=
z1y6z8kLB-%7A)dgGk*6bbfVH3d{P5yQlI?+ywH{i;7-w+Q$?@kT)Z;091^GgUjkaa
zvFrQFhVKd<GXnq`H=<uQM8phb-M{sjk=a{w$)_j*m5ecTsz?Lw`oeN~eI;WG4~lev
z)ccKi=fJj|PFEs~{Kyv&M>fe8ITfevvS?yYr4+zp34+0Z<Fe#K)UBsm(k0P;1Z=wr
zO7z($z8C7CiA!n+NxZ0ew|=l%iBqc}h6tuX!6W?(M#L9K#AxgXMFtLMO+ab+ehbB$
z?fm$B7cV3$K0PBReDg6JgOpC5ES&S<niVPK%Ul4)pgSI%YhvEBFE~O_bGfqJRMpjQ
z(+1wTKg_)dWiu>t)Mp~w<S<neRCNV6C)JM~UQ>!>h+K!)Nq;_Q2Rl@b!pzd1#10r|
zg}L}OnK4u<e7y@Z6Kh`mh#gvJ?%-GK_>o|nSO?doMtAaNy}--*1091_vDj0?<39kS
zxV@?HRO+P0{HfK3fGvd9+>WUe6igMG)A~!e!hu$wHt&Up;#~Z?Ere6Cj6G{I7IxK?
z%=_Hgx;h^8G11d@!<_StQOS~^p+JhM5vRsr)H#K%x248Jyc5<+mBP=>LaFG14bm7k
zb0CgaHPc&^%dBEkp&;3_HX+9&BDIWyXg8mFNZBJX+YG<M`hbLok{ssV16tKmG|Eal
zCM8Yb`g=M9TKr?gPR%`_jCKede|}V&poo{W_^QdePVqhIgWbbnO*8J?*oyR3A9g`#
zfP{Gd%M-=L2*Ehp*=JFQ0BJb=OumS#{M!#r5d{*44U5_AL9MN_*Zo?%tcV760e+Q3
ze<J-xim>=!#*R(yPXoN&J$)m&)VrYpZ=k>~cXuNb`+l1=vRoSU$hnU*jgi%klo?Q!
zHEF~{RiLyAFCP6$Jfkohd^MaM;fFJ8K7PcqhxM^m_EqH{0O(opMRQT=Gb!(X$sBnt
zEU!6x=!YL%QTggTo&0Bl{6Aw8OMpW>4|ss(zTUOEEgyxdC0zfBfe4oN)p;T~sf(3U
zIqL4<s%z}@N^BNxRi&XEFc0w*SLL<qq5mRh_fxpQ#5ids71mJ?ZyRBKZxi-V(RR~&
zI*cq;^9w8~*Cr*Yi6yh%$I8md@}ANpN3Hqf$8%TMh*57Pb0ro>Wk$ZwM?0<FA&BM|
z{^DtgjAlt)4(Epoij&_+t#L`}{xfO*pU$ZT2lY&&?TafONo|e^4x51Og~0h~3oQwR
z>^`XA-N|Uyk8n8`A7qRo37rmU^@T}3D?mK@g7Xj?)+@v4+by=!Cjx&v-)htRXx)}I
zxfY42)2dIZ==;V&(V<3h#(UQYoV4?`W>rYa76LpFoOY7~#A8gjm8gzM4V$F@_|l5?
za1USO^HbKk)Swva)0tL##rh%JckzB=>|+Z_wMVh%=^lJcEH<`dJ?_R3C7b@CDV05F
z8RaAFPJXY-cD0}F2GOZmYRDMKvqE0XM^c}YcoaV93CFm!N0jnk2iNGfsY|%Xb2828
zgrfZ>L=UoIJP(a{tSb#t&eqFDh1y3hF)yn5+C77664MvpO_OoX;lpmk^%U$XUFL&B
zol!B+k8h#-xJlgap91j6Uo+cd>f=d;5+Ym&T_67Dr@0C;#ccc+*5`jD`ouwJA+e=D
zEaSxBhuFB&tGgFZPN$MjBDHSWtZRbkX2Bcyg9k6ttR;8Q{6n6;$I971Z}zfzomT8R
zHapL{>g3*`mni0rm8Fui{K4nff!s0GSfCJ{-H>PfL9r4jqvJ%mz|HVm;~W)|;O3m0
z_jijk9e1oZmQ^gyUNR{4je@<5q60MVt9wqLzqivOyV)_1=|9&99%jUbEYl_T^CQcG
zDzYoL-Qb?yy?{-R@!;OiFMitedoI?ki@v+=#lBor2F|&Xo&5o%&)rfnNE~mXQjvIz
z{4`(Rj%SqKXKEU~xj!wuyXaojsIru``r8RA#1jMNuBp2UW9LHxb*>#PI<#{cloN*)
zi*l@-h?ad|Ra(G~z$tUGF4W6~6?S^2osdL<#^)-Tow=8q=F~6n*I!tDDu<k0eUM*|
zM_c|ZmEj(m<Co6J<^RVsOIQ+g7lE{Z+jSmUgeC-k(EhBM$51rKa61XZ;Dr9mo%l<z
z`rD>LaKLq45PgVRc1JL&gKTK(U+^VGiNp<|&5H4*SjQ13;jvR-{LA3e!4L(t&~3Ey
z9=tZxJ6!w_Z7G8QxYONhGYGg;O~<ATYqD<BtYl;yb1icTx7fSLYM|!|=0x=|Bos0L
zA7A#ort9->#|5{z&-c){A>i@m!_VPVw%)LgnjC?u>NjL1by@}}<dl|vazKewfThxJ
zj)ZC`;RlpF(m%4Z!6%!Ea)lyWJyrM&*11ujM3S;m@$Q{kp#rk}GxC{PG`j~LV{UF4
z#bG2#=_mQ2%;J+*#ljI8gAP?V+RcN+h6YYOG*W6F_kG(BXH}InRZT9nS<ar$8{HKK
zX6%i#N!ASwa!L8~s5)*XL_CyKCgtA8Pbg9yP-{l=%9wiX9gLqrGgrA(#k6Q7s8)Y;
zPuM04e-iWDI@mKLh9E~Cf`X}kyuR*8{OIo3`f9C)q20#YH0w5Db!k{GQ!0k%5jkTu
z=9^0k<!gaS>JrIxOAW)~I)gXuN>obcj75tGXkm$Py1SFDrG)Gz=ln_PN#upoUX6_i
z0bN;JEZTPAhc9*2`q>9nePd_m+I?bb-2&>yx6|$(mE}JDWT}oZaT~=GJ22sxRAZI2
z&`03|1TB;r(!K6a2wA!iI5PV(!-CMYb<Q63R#*1)tIpwFKj|Ay;B1A-z;h@J3@xDS
z5rx&O9{I7RVtIX`CFm2QXF7-6%M7O+)C9P1pFrc9VJH_02lJ4`hc~)9oRp9iw_iSy
zWqW{-&DAn46BAx^t+A&+Rdv`1EE0s4Uy=^aFI;h75+$No34y~C8G4nS(_M~&{s3fT
zCvS9AozG;JB=PB>LB!9LYgGRLLX9;47KHSH3npER2+~IyGI?Ko_Q`NuXcCq#2!n|k
zx_JxhM@3NR!4l`Y+`H<m+?lp9ByTzaqR>0l#G0H{M^PvmYG6P{0uv%DBBdOBhK`ko
zGz6w7@^bxnYD&knp}C}Rw?=())6-<#I0uxPAT%@6aBOnXWu(pV9K<EYzsUoaM$y<q
zCOX2+RlUOOdG7^332#n#J1N`TeR3KYt?N3+yw)d(s_W=g;>Q-JbP5}qf*(rlj;yiK
zE{bp@sr0w%DsIlddz^_8ls8nvoc_K{{nh@QjoruJPCv*6=T%ol@G|-)X6bkoM`ahH
z(H<W$RS`WvHNC!pXL+V^WIp?H-dwt6GslbaH0B09J1Rbl8P6B{7x`gaAP;cAgw%rF
z+LdA<YOUvf2Y)yI8w5ETvh3>x7uodG>dww=Wjis*Mcu_!l&~9HP`M|e2PORrU6Nkv
zFEXa%GNx-zstO$*8FoWy>WGZIwI7u)3eYKI=R54Az3qP&&&NTo{kXPk%gdFHoU$q%
zIFC@bbu*z{EG~B<ceZpX&)k=|7(@8FF1Bf%c3t|USb(<fue^YZZAEp$ucCPAtmTRb
zK09-sE*!>~q<9{(^39ViHo|QS*{CGElB1PTQROvo9RqAJay9dT?iZcVbiHH(sS(5a
z-QUV3K*@q)Yi%k#<4B-Oie&RtKd^9Ho`6G6gv|coZUt)W6q@`g#fwjdMGGeGfV*7D
zgj>eLR-c?Od(mL1E$a-6W@PUHNS$nVeQk-j3s-m`hWNS{@3)+hp3tud=j3oWjLUEp
z8MqjcC6dZ)(nlDjtI8}nU!ETBEL|Rn;Q~X`UOvESd=NC=i_ilZwFgYLZ3T&i7|J`0
z@JR72{Bq)#u~2X-hmm0?*LPBRzPOL13qXjxv=~M=?Eg}K!)J+nAhplyx{z`xcySjO
zhFCh6(Ot}wSe(89A=-Q{I`1y(_X!ORJCjFOd<>Vbg8lKwm@!%oncx^pqN|8S*(a-4
z+#n)*yT$@;kEM15Yjrg;`=A9>*aW02^O<j~$vTB})27b#EOd;1Fwzvt9Dbw%bY|aA
z3nzdtX|M}^2?fw)gq{*a!d_j(JZg*ADWDl?9xa^$+a<Z&&Y&{7lYKo=ZoYAf#;XoR
z_J&oNU>8w`iiO47D%ip6V7p!XUf;t_5Y>9|sbuK)0VJ7V3ges371pnVpjM)kXiCFL
z-HUwO%&#l>+T5eUUy8ZHAK<uT6?%_jp)<~e-q}gtiWWIzdn$Q{!(?#rWFT*K*HfGc
zuYvBJJ8Casa|wq`kc;3YZ2@PQRN<yibphY4XxpgT1#f+efT=&6%gm9Kg|GNPYxkx2
z>US~51)gQAusA$od^nTXBb>^FRpJL(9FA&zbj;_`+vH2Vy}*gZsmf#n;Q{_^94sC+
zaS7l;R(kX3y6&Z>>8-Yd>o9`Dq#)OO3bZ#!TH=)@{V*!~`+EMSl5R!q%qEqZh^>RF
z@G?SEx~i3GB)oAb-0PCWI8!$<58OcY4CmBN)j_@7O80bFK+J2H|H9u~FH<V_arpap
zAGgFj_YRrNyA}6lqkkN`5`BoNTy!Po`z(rb`Z@&eH(d~OUN!>D-sd%G8+lP%jxNd)
z*HBp)e`b@ASR6&EauvBeRTt|YzB~dCTFv9J)P_c^4-PQ(4q#pRT^s7JCB<zVRB^J^
zH>|fUU8>K|*=0Itvax1PI`Y{z4Eh)uuDS$!<YVgm0f@OjL5xK*9<1N^T*NCXW!V+3
z26)2fI=8P^0vxqc!vh?aXAOgMlv96+m4jj{gmcg)UpTLZyZ(+1CjLikko*4yG9csz
zzf1Zgq+8nIPFoosFw6C=aSyWOhiulxlzxjxuJB>XniC&>7+spi{n>v*5Q2d<no#0e
ztg<A^&oh<|-KClsc}(>HJq6|xKJeqzw?<6#L!e>2N7E(rJ>~Gc&spQX=qpaX8EBMh
zIT1NCN+sWV21}$7yJTOn>a166T?@+FS_P8HeB7uu8(y^@S$+FtZ!|yst?CQ!#HuaV
zjFG<2&#Ce)R0`7_qrYuprR4)24E!KAnw&p166J@UYC$G13VJaGLTiJKMu6X%>zpgb
z<WL`L4;I?fMLFdzn)%;uxgVw`ylQA*d=D}IJ{;rp1St=A%FddeH|-N!t1-qmR`u++
zStodeeWTMO>;U1j8t7Z{NlQPAmFz(!Sytt;)q0aj<@`94LE_8Oa;={_;@B98{z@Zl
znYG^Autu5mhE{J~4Df5??~~)4LyJBJMF9(>MEp{3i1C?^Z4>zrTGNYmvOx}49x23J
zY}P+sxtl9UmBnE%@49@ay2W|x%4MVZTw|%$B$&@PoNoc7HX7UQ!XEBZ@lHzGphy7Q
zy*|}T?n$I-+sE<8d2JuKt!L+74!Xp3{C3WPy7IWJ@8V8B%ySiwrQ<lNB`j#Xjul2h
z5Z@#}#$a@<^QD<d#nucN&&H%NKfaci31u8*#{Xzu*|&-5oMKF(p_TV7jjTO7;myZR
z*Bcq@M18S?m4<i>5~`Ug2%TUS{THNH>Vvbg>R-<a^T;sItIav00MQS8e=#uFV(_2+
zcNr_%UbTNnO{p&*0<3?$ZVG@gd7Mx3>0;5o#G{+g`v}+~#Zc1~U6aDap~fU?BRy#r
z9}J>MiC+(}Fx+ehFL-=54Bk#jPJ2DkC@k;P{Bln!uzchrP2p=2vUlzrX4{%VQJI^Z
zVK>htO4?M<LD5ZL#fJh&HB8++eI+Kn+j*l4st<ZS%qgbtquD;Qr^#@!QkiO@oDu?^
zx7c5$sR)LU^JDu0FDF9No{U`8bO|)SO5zQL#761aq^x?%#m3?q+CksR`!W;u{h*t8
zA5Nb<1Z>(DH4wQf&eLG@Tbc6`S&0(~$^Sl*<gKP@x?YiLP}<Zlsh?^4My_S>1kMKw
zBH_Lrd>_XIcG&+utMP1Tpt@Ktz*mS>zZV{<Vng!w7$75(`Y}Yundagby09`3TJ<lO
z|Nrj`o&Q#<DPz81SRs=&O|2o_!I@Y;`Q%_fZ`ymARpg`xtzi(JQxs^|5rqszM)@<W
zkDwk;atOSYnKDvJ?714Bw{gt6>tfQmS`R^9a?44j%I%5#!#oA6Y$b#VZdA2U6EOGx
z#Aw8R;BzlCPB@J%iOi?Lp_h3_b3i|z$oK~^9GaVnRqLCuToXIE7jy&QSpV|C&z;aL
zke-<E$LdQb!WX~((ewOYMsIi`UUQOlxhgF1><!vmzoXUj;PVFtu+J8y2Ta|v$oEG$
z_A97gGkh7Y6B~a3vNE>LXyv=XlCXkS0ICZ;jRClqW~47t9flafQhT(}B~3$%SU0{l
zVYZ-hUDx*kv)bu%XTRALGF)fqTEMlTxc-@UwA_feA~p{GU7Pgbp`)hOtMjBdCi43H
zX7S#i@UPoJD_H~8r3V1?KYyQndxW0Ut1O^`49O<dfw-6W4_Z?Nbus2e=^`6_2t`1C
zrd!B%elN7fl<@MltK4fWSBAF-7~KiwBtD}nr`LPm-7wCTkACNrNXBJ=V5sr0L2P9D
zAg9FbsV{Y(<tIq74ryHdmoVTM#6+B8FC$~!kFUy+4%6XeWqPOvmtmd^-kax~TgmXb
z?^i21{4NQxTUcNGTz@XT(DD{x&o2T__?cUJ28vhRjV@&!UEkfa&zCY{N}+v>Ru55d
zd)Fq_s*(oU4cKDWPA6(5eL2I{?GQG(xU)YVad(#`+DsK9w5~yJ{oN+82GYCW6GzIn
z{7rqNjT;vlpDa|Ois^1Jyp4H~#QAZURs$HOpn%!VUhEj$;53`jyF;KiAzmex&^Ka7
zR{IgUN!fT&K9e%m&#gb{Q3k9Cx2A;qOlgtx=;Ub`xQmceyQ{XU7InYt0$)?y!JqUr
zrj$yWu*Xe4b_o*l3L>{AUz+^jPLnReJRK8C-tp!oS5w`AkptOUMG=u06Eo&#mG>mY
z>_)R^4vyN^<l?gf@`_H~Jm=3`o2-258h0EClRwz31A|5>r)ww&jzbFi3=1TbWH;k2
z=35jGt3<f^CDY6)3TF|z!Ke~@FZe;t9alvY4=F=<trE}F==(?Ug4kKawx-WCCY>iG
zxd0l&Swf*yBXbWi%uk#82=m9xX!mHh1tYDV9c>_e=D=^~?xkX;Oc6zBp3~D9SaJ>f
zx?IwNI+4=T`64v|FDvrE%*-M`S!?{rf_Ej0ZfEOwkj|v0yPEhL*9~=d$Zbw|0E7@A
zONiC-H&K8#k?B31-qHTyIJO2S9*fzf%gQ;doka-PCI^3WGf0MF32g-yuIx?-isZY#
z*S*Os4<H62R5|JkQiZm*&O+XO*khf9RUhq~9IgMB?)T4>h@6e&fAMR5Paih9eB@-A
zQ?uBSiRavWB-WHQa~VV{QO}J%uMqgczl?11Fl%M{(yhMvu*(CyysYRtjuxWk<izK&
z^%vR-xh0gmY<3Lo{72;my|Bv7dHf(`;OeJlmO{c|@Ri8e#6e$pxn^SxdE<3dkCsuF
zs?2Y#pt~2Moa|5NLa|(N)3ClSoI9pw)zlsZ9vmih-PQDLy0YJ<p?K`3)~~xl!gQbE
z2wt?_!VEXPZM~;lPj09T+NHZ+<n%*HLkW?cc`a`DvLzJ;ZA%-;KQl(pm=R@2=YI3W
zqeS@W591{*R0FB)HvbayLb-(9hU7?#1W=QiLI~fp)bl{D7^9D1mQaZp09ZFY6gLg(
zCS_Af0%>^$2<moLBPUH3c#pC{F>Chfk2DPjw1l}#QPWAf?%RH+h{UOmfrIu*;#fZX
zPk|0mxz%)vb}V@DA8YGkJzgj#uSEf{N8M=y6T{!=U`QT*Volm*w;&+2!km+v^xqFF
zlX%^zz)~#e&_wOUX8VZ4u6haNS6t;aNB>ZIQo3O0+ng@}0e10mbc;`%bdqjThnWo`
z)~C(MR>a9<aWm7YMDz7tlOn5#9`w<N<M+qJ+L%^iTJH=_v0pwmgjcHs>uX$FaeWY3
zP+mL;V<df2^FX=MG9vr^tmcb`yUW<V*R?c`7C+P91LI5`So04KGWDFKP<9C<k~kpm
z{5JmpwKvZsxjpp!+1(4*IR}gq190pLd`tMf4!+;?rhE3LkzjXr#~c`|8>EA%Y!?~1
zhh&UjVkip~W>Og?q#Dat=Rx&V*agE2(b^mlaWg2=d@wIP2~0AYXId8oE=0Q@iy~a@
z=A;gzX85r3s-)=?^YCuDB==qvg3VE2w{~zRjO=&)ITdpPT3l<Xw>B^kC)IwpTa52>
zg%-Dyf7|E_menh|RR6}?MmR^x9{_hyu%508_8w}U#_d-~Z|@B@U#IM;m-Y9mbj~ET
z2{j%mwlAem01$%9b$Hp0f1{DQEu>=w)?QREQ!NEDa}62ay7-n$23ccdT3eG9+`b(A
zI$XwCT$!ZJ;hl0A^xLv9L-m(scE-I@ThdZX*h@~r_pqSHCq$L)1<T6QI83ht-<$ky
zetnMuzx3Jr7p!gE(S4#DE(3k^OXydwK+!)Fod20ZC^gX+H5ZKL)s`a!oZx+LE#dEC
zWaGmJ+nUB8<5Qr!WN-%xmx_m1csxmj!`J*NR*D+)01mwd3Hh`XuIfYl=1>0XZLrli
z`)_#QWaEHOH|?QMx{G#Vopcskdf|j#ll)>|OZS**SZ%>P8-ZNGW6GAQa&M??WBCVJ
zzdWG94}9n${^PU~yPTVyZ?nHP4cXgXj#K7AhAa2vfdVcN`k=Qp{QqL_t%KrFw{-8u
z2?-V;xVyVc2pSw3*M#8KxJyWIcL+{!cW6AgOK@pif;R59Z|@^}pP5rLcTU|~U(Hu{
z{%fk}?zg*NS?^lU`aNg>0F?+oHYD^q3J&jy9SqVCi>Q0X=oPXVTMBHu!uje<M)!q{
z%0DF4#A@7#4|f|;DrmJn|449aGxGTzKnSYtQ;hF;Og-%1TS!zhcg?#u_a&m+ms}`a
zR_$8EFeFSz_r|vf1dg@Ujsr3(nihNXgRQr-gB$uqxk$rnIS$#5<)TLl#l<>}l*1c?
zYOtGG388F<Z$?MvN1WFdtAJi916mEGgnIWb&6sp)n8x;yijg;AA}&pk<Vkh4dJQi0
zCDMlmVmw#zVb`m-83S+zg323{Y%1>V-DdQSUj<}%kEEWU4!SI6Nv6?KLDJJYw>OsU
zpHfYA`q=VJ2*Q(a-qQ(}h<|+MtEu}{-tyjIu9BvH(#Ps8*2h<ZaYrdA!=qaiwb43F
z_IItEhYa91ZLiO<kDwlWbBDW3B90Dnm3f^pOiEaciJao81|?xfuU3pFwiLhH^S6{o
ze9x)L{Nl26s6pO9{uH+?D`<X3fxYZ5-E9s>>c5IbvC1x^XEpU!Q6^-1x`QpUzqf;@
zq$0r(DLOGb{)O6?W|t0ZiK>M7THk?ok)+rIcd+B(P3Mi#4~<M!QIwzmkVyZhho!=c
zi-r)|@mu!ypdoOx`QY5luDoB`Wp)zjCz6b5DpptSppZ<R>#@pVJ?^%`NlkNq*@y|*
zj1k=8j57Mw^%vQnzC^=Q5prt^GS0Pwe*T;mdW$J-ysK4#N0h__6$EDFEPETG)i|~^
zT%Y(M)6R`+H#lq5&(w>F)$?u0Bw1;+0b^|SwY)W{jw?GbiI=;4pi|A0o+Yh>wl<k(
zS!#oG+##d+DPVJ!F6+^gybKy>LYx-aQ?&a$Xag~c0l}Z)n?DL;#2nmi3dQ{cz2&-<
zu(SLR@T+LB*kGl~{>{DitAc)Ec^CxfK&SUXSojjj1OmLPIjW#k(Z3E>0M9nqx4K3p
zFm7v(fSR1r`!=)dKG2qi=`-!n%&F=HxJEkx!t;8O9k($PUx?KE@;_?+26!;ndyZ=f
zVm`7WdJV>7_)*|CJMWy>bDy+F*Qp&<6U*@{>G%--r0!Kog;l76mIE?QE0>X=MD3>&
zhL$KM-|Y?a9mS*ZG^-5pAcikebubS;5}01?bnmm@kKX{++V|22{0~p$C$hf*bOw$0
zCHG3Wm&d3CLMYsizdxj;>t}OcKTT7+6C_v5-5Ha!{RSv?{Y1#~k@Nulv#R))vxjdV
z_7D=upGi@+)vMgMZ;GGeyrQ0Af}s7F=Njwk=XxQWQ_}TJn6LNctg9|K!ofgq8J&f8
zJDZz3`Xy;xkrQsNqNXZ8Md>7u@{6!gWcY6qC2ZoJ5)pEH?Gb0`r75q@d%*Tpd*k&F
zR1HLVHY+pg^4Ckz*7MKvF*_ECy;kJ;3h;+w8?AOe;;Xmv^25O~nJL7LYgVYsqcr4c
zqGXr4FH$)TyR;d;?SBjNr{C?|6b^D}ciBGC<)y|9?6oowWz8_umPsoTit3kVQ?Y4O
z%Wa)ms<WqeiM}ZzDLA-po$vcO*;ESB(UUK$Gn`7OtyRz3HN7#P%#!BU_an9&iGB!~
zL2ongah(JvCB)ZRLk)#}(Vs!40%P(@tRv35yN~xCJEehre{R=k#Id&_%ir<WWN%xd
zkZyat{nk~Zl*zJ<F+~O6GPs=7Y6;y;u9&ZixB2zl1#?$^`6^7+Cf@6Lj9FQ-+24L`
zGp@g;aEQzBdm5H481K@&S8OO5rMtLvofmwCk#g}$d!a6C_A|jH{kV=wL)hgP`a`SV
z0L5>|D)cA1RSNE>_y#0bXFBA?N?MF2t`weJC60t=_YciWU*rCng7|;J3llD96R!ID
z91e<p3(GKh_*RkusOD+O3*K_9y1R%GtH0QnC4k)E_L8=Hm?EZakEZ)nN5$!u3D=6P
z8k4#&)>NxkOtNG+%elu*L(`f;asqSqXf2Fz(|Hwa-<COQ-CrIx6h(dLYJ8mr9Ehve
z`}A@a;s@51&~thNtwFLap^^ZhM>r~Rd@_@2Kdq6Q<Ar0^Do-rm0i(0>1CD3E#l_ej
z%P;$F3T`dPmeHxR6YXp(fG5YbEFHNl3=;+z7#sJ?a~!dR->kz<On|Vz-4FDESmdXG
zj;<>Pv@ia$A4#w8Guzt&Juuc`YNMa%J;{%sRi<Z;a)?6$1){qKzMbuCEx4Wj1aM$d
z9<6otKpvja;O5W+-PegjPkQ{F$%GxQJXNtN|02yTI(8|lLVsR#$r8T*4z>j0B7(01
z{3;)PBo`0P*JjrId<4+p^!^A~8VE=GE=Y&Envmys$Enu=s1^aeo+#+e&`}1Ai{Fc?
zD3+1~ATsnN8WCT^Cg?<{hb6|}5kh}iDnFe8wj@G5ogL&FzT$viA^-u9h(rm^vT1ID
zo2TgF4y+;u&vq02V7R)b`4|*Av>N)2pc@3|wM;?mw9N7{-wkd-ya)UdL<KMMVAt;)
zxIs8ze%PVnTHc(j{~MsUEPrJs?Q<4+`V;NxGC&;G{NqR(l0rt}@8RKb3WZqb7xB$}
zM;(t!%eZbWpt)htZPpit9Mou4;HPU18Jc;+P_E}TLjm`F^F}mNuW*p-LcD#=4%>nK
zCM?uu{J_tYYwFQxpqqDz#1v|7*zZsEB-~VWBsp!>@YyHTq|ge@wzK0ht#*WQfE{E}
zO2nl?Vy}Dmqip)slSs%=UPxM3vO?#exMg!iD&{<=m!;!M!)*0d)5<C_eOB@UNHuS#
zG!q<(lvxFbhKK0w%4B^ZV6N7|?nlk<j}zt0zYu_#G~rx6!>pS)O$qO@#|Iw>wgS;U
zI^xmB66Bjm8qIH-%(!gyoR2j`8DQI!U7}5j6&lQwX<xmK&&11b7TZP(=o_!l4w?4J
z|HP>9qKGGugc>bEwq(AFiNH&}h*qM2VPiu%uR<L~=885fn^Zw&lXKQMa$vUJu0aK{
zwT09Aq_tMte#3C6Ra$8~w=gczTjx6JTaEQf9$hHiVx4(O9HM?s7$A)5t6XA99BwE8
zNC!;JhyNlEfUEX=Q%H|*bah5h+H|DkDPKvA|Di{_1gH!wQ!6f;NpMdruRQVIc0(<r
zsi-skA=A2GuN8w^Uc2w3vMdpb@oST^^OdmU5%z*0>NNm{7@w(%&lH3<D?glMi>4$L
z#ms3e10a4rH(U2kRk_MAZyj+w+BB7nzl1Arrbiuw30yIxLBT?#orvN96Z1y6ja^v~
z1wwTwh_Vyezx^WxWs@V?-<2MA-}(>$n4%%}Flq)F!aX`rnNhet8*%pJ?tV~Ew5fXt
z`hic~VL>(p4uGi+5(x-BBPWQS1!2L8^~cRwx^2znbq0?fAX;WKC42A8@{_U2@fIc0
zj~t=V%L1<c?tq<YRDd&SGvL9%V@M;>o+wVW73O-?Ub`}o{u6jnm}o{Uy*j1BpJf1f
z?r0BDA?B3s0W0J#=4mKz^d!wPF;aXr!(k{bWk6D*8C1pQ9RKu198pS~pD4(xMKwux
zm}gH~P`=DcSD){=7OY}8;#%sK2CUFG&qSqXoJowY*RtESq3vpGK|LRSqS861+R!Gp
zI+$;65KeiOQ*+)BzLYGn5aLk+C??w*RMxaGmCT6`q4%X}@0Ja{QvQ~M7NY7U!*As9
zI^$FbT~gj8!6A8W#st5aS76O1Dl>j~AN+%OUkvDA*yts!%`iQ+aCqIg14ez<TEH-j
zzhqaKpgcFm?@Yp(m2yR28lruaT+G#5$z82IIISSR{)(697k2A_HAfi!<c-}0R=v+?
znVL9ZJ_}a7FA*IIJax?tKcRxwRgBUM^xlRvtkc-^`1JCCalov63y(qCcDLh>CtY{9
z-}p*fui>i=PC4v)AJwDjt=981tppDxK~A6m7h+`ycD13Ae?!{&Ed)*<o%NO6N5~r<
zVkazI31r3qy!<zcIl#6=mjI`x1i!*xToak)bp?r><E2G{qtvS|MuC$pRGIaD7?B6O
zR-2~r70klB%)0Gk1-0jwgg8>x<Dow<XE=0-xQe&#6`jB8gz?NdCEC@mBtgyMhZfop
zVN;7^QyApH*?bNFp!-WnxowJ75$zZlcvmzlHLNz(SI(pK&l@R5TgfExE-TF@xYc<M
z1SK?Tt#pBg7M^&iKK-F?6T;BE<Q^13{M_4`9g7;hDj~L2?S{g6ib4dF%OQ&7Ex|Q8
zG=97}b&RwXxx<9pstyGllXO~Qto++*FJ4L{a`S0$Psg2<Ke1crqPzdy8YT?0#J?bL
zp2J>@@hJJH9lD?@DexZsXr$O1ekJwc><}t0%P&zhTKF?&>M-VMU*11a{*UA0F9IEO
zx{U$f#<jkIo<(R3JIMblM#`m&uk)%7uxnI@S<Ne8!Wql`<pWvw)YFKQF8^pS4^C@L
zYx>F!i{efdS|e|wLv2m`_0z0KhDg~!C`B1QQ*o*khO?Y55kJpCHv0P!7kBF_r3m9l
zb{CUJd++VLQR_nPdb=OgkOoh*_X?5}Dy<?<gJX2%auis@gsLB7+gv0P^Td@|;qt}Z
zvEg<{QIa1KeD6X4N6;J|G?T_oDukv`$BXYB5P$robz<ahD#`=@*BO$Zq9GWN&=~!X
zWWFDlrF<nF$Ru!)ymEz)g@YjgCji#Q+_E6*#R9^|D?}N8R05@ru95MR#&rtHCE^&Y
zWpBx9OZ3j2fy?_uMA?7ERS`a5(Axw3%~j9HDA4ezwnK%@2kfPSyCU!fX0;x8=Y|E>
z|LxE>prW(<!xx2`kdJUsPInypfmpi}jDfx3!o10tmeO}DZoEg#G(VK+ufX+l2Kx@Y
zXNxbH4;!hNU&ya+^|%|(J`$~YyL{N+r9!d$aSkztqPM>*ze20&F+IbvA8CDTi=j8O
z+s#Zyi(XwZpruQ1&$3~&@f5i9#42hrGX$&P0fmL3DMmLFedG!rR<uM!{214u0-MT^
zs|&O3WhQXd^PTp7m;(DVq2%Es61$<r@lxsH>e-+!%Wr^m2D1roFDE=%^~U-i88xr#
zl*cDV%yxdHWE4(UxRMfv6dp}E{&)ozeP~)6NtXTo`Pl1ahvf`KC@M6H)d9Ve4>BUO
z2?qnMms3$t5uagg!x`xvr36Gr{<MRgFSqxlQ^RWqS08Fuh@o)@tNkRx!3k62E|la~
zLhf>Fv|1wWOAoaT^PC4~T%j)lVW1K8fSHZ;=Z<mSt%`~&_B@E~+MKs%{ZYL9q8W7W
zfZy^jB!Rz)CH_YX_}@kgNTk%UnsZ9lb;Y;lf7`evE^}P%N}tZ5AkwlbN&40z3`F9{
zO&aBjV1XxBz$yJeY2Io&tj_}Z6uv=hEwd6qOT%y35)&2m@vGfxy_<8Y7d#bDR0y}`
z+PBO0M2BeBF)fV%;?37PGr`|0GsTCu4dhbK4pd@AH~Uw>A7qN`TAbbJ(kGZVY+e)0
zo#9ejAjWBAE_8aOVXOk+7N0&BMf*oaxtf>3h`Fqym>*kHh3&b`2Lt5Rv|Ch&z#m?H
zY%pw&YJqu#tb0dzCo4q=HhI6W2vW;+*R#cF6~y%)oq<O_rIQ>|z8B+3&xbg3uG#Kf
zu}@)h`-2|SY*LpSE~kka!+fdv0<0GCvPC{!8+%;)uS}UM7UOhu#alVP{z6aS)JylZ
zMa34K>r-pXWAgNKA<v2HYEkvcNVn77d1kz=Tw_>NkNwzk6^+HDj5(2zV$3(e1gAmL
z_o-UNU@9vbaTs$_VO4feYaFTfHl5H2hv0mB79+m~Ba3uR6$O+yR%c^s^@FmK{*284
z><}Rbtap&)baBzZ#bd}oGtS}ETkP+doSjlDFnAkW<rBgj*kBb|t3iABahrGv^kh9i
zt!oWY$HL#n4eH&w95rdk{n@ND%tiBM6Q8HlLN3CYB1Ypy_wXGdiHh+u{`qy#gYS3y
zT6~L!n#}U^t9IH!lAi&MdA(Jp8R>RixQf)(C%poyShUpT-;N+thq;92EpD?Qo|h_$
zM3dHTgrAWO8WHPHCk*t=1+^NfcX09Omij$kQ}UD<Gn$OvbKw03V5Wr=G)_wgocwZ3
zmNkY)(dtPc9kA<#H85Z~Y{J91WE3_RHun4W$<IG`7*NvnKP&ivuR|EcMV27<`a3`B
z1?Ay>&8<zXu|9ntR>kCNd``ZzhhhNzJsZH6;E~UkPKgL(6+a3(^dBS>b66Wcjzs03
zxk-F_gQ0A)=H}bE){$>pf?STpbq0z<$`Ta#g7ppq?5ZgZke3>!<A7Zz5WtYsq*f19
zQ(4j-S80JpL_F-8{F(QgG9|c*Y5r82sCHncAOHfc<VQ4wwEue}W&E`Ofqf|7T)CeG
zFlNVP3}63h@|_LzwQ@Rmr)PRp<9nTg3F_yu4m~*ZQFuO*EC_NZWyd;+JBE*dR}wzK
zu%j{%=qb&-3nM?hjpB=+@H#hd=X<<(<4$Q7JYMA^a(OKyYn`Dq%XhjjR=3t*xWH_-
z%<7%!B+KExl^^7w>oSdXtdwAA;yE+Q>p7LAl_!tKEH8S=D51$@mL4C;SGYxb?j1k2
zUB1I~fYq{<Y<*+H(zN=}Jc=`;Ew9<<vVxDCc8M2J897qIN2SH@5*gG?R}c~ejHO6w
z8#{Glrf~;NvKtP&-2*wJQ{(`PQy*)jIx1$!2F8eU>$PwaL3)?7=!P@fTO=^QvZhi^
zm+@~EdP#b>^2|s^9b^tYGm#7}4UB5Na}Fgy4i{WwpurpZ_d`&b40^#8nKONu|5{>J
zx7(QBe7N%b(guXby<kKVPF{3WH|-_&0gk4-)vNa1G<y~gXjf{J)2AkPY~$`5Dw(A>
zcO(GpGIP|I$2Uq;LUFFMf{6zPl};=9SN&P!8oYeTC|aJvlRW#z4g{nxBnXJU%?C#^
zFz(Bhi;~E2&IZvkPM4@{%oKqI=Bw}wS3B3sm(Ms|na7|l3FB%&cX@P$=7yWfec^)I
z%*5DWOKZ65t;j}^kkU6a$z2l;aB!PsZ~*k4D9>L71+;|24jDu#dnZ;6N%^;ZXDaEm
z6T{bFCHEI{hN~w6zrglJ%05qYIb%uz{w9%)l%oj4Bv}ubWL@N#&jzJRU3ybvH^Xyk
z_s~6~<>Deju+575yw;lUTGrR^Y3U)q0qE|WLr>087=%4a;}bvC+3$Ubxr-;Wvukzi
z(8v4@KmjY`qwC9!-QnL|Ii7m~{+QZK$Ls&~wTG17z}4z@r_T63DkR|z^K~Pa@6qab
ztl@>R6)Bsf`vs67cR~9&)BI_=VkLp9kgoGx4%=F~K4{cF`5Gp?$J#*%v9P@UeO<$5
zzdiIdFo-SrIwk+l#f@)pxGyaNA@<VWsj8O~V!5%HOjY}H?O^_~Y+_jNT>J$~ilH3>
zBL;6DVBX<71Lef_16hwEy!bHXc$WpE>5_`Gg7->Sc?)vycKTN(*$(${{>|j;i?`IH
z`?!6z$7?#CY6&t!3@zgZI_IcV@p7>mss{|NXh5t2hS2E8(s62R<v>!sB&+VPDvk2$
zkxcq&9Cjj88<T#RJH2x~#``S`;g|c<MQef@ffh-t@=APA?}pEG=C2Gh8QoZpBKu+r
z4YU(@i4$`*g)pHItrrl1_nH8&Oq<|s<Z~qxIO>ptKwSjlQZ>@s;1y;$n+`(y;MoJ1
zOnC14b(MTM1HCuj0mFoqte-w3B)leur;4H8@pW?atO2jBU#xp!LD+X757nHyZ;eE0
zd_=2OqP<OFJWG#(O7WP&TjkGs+>6nxb&53x4byY4>S!wtHKYm`qN-kY9@Bz^BaygM
zX~!^G?ZK+u#<)VcK$IR?@qMVM7B9~;RpA@-l2QStbB!g?0oPZDa3#GPxt%l%97*+t
zY~{IVdfl4Y)5P@1Rv83BvH9Ai<dGsm8AgE}+S-fu>G3J_NRc6q-j_Be1pCkTb-DV5
zFK!ro%x|MJTwl>~X1~?0qNb|cq~i`5&nP$kReTC*Td#kEDcvQD6x7pJelX0aWo6&C
z(;(>FUSfZL?iTTr2uEs4qgudP-wMX?N!iz{UDE@pyW2S1ITI7qvGB(@#!J~MCZE!9
zDQ<sJAU7Wr<d&%HCKbE%Lgcuq+kOa5HSfRZrJTkFAOtxPvuQ<|vDVi{xCQtUS6KM2
zSTLgL^8?cz(_56986eEG-t5vT(h;?U->b;)z#{>TkI20VWPXK@!h#+TOiWQa&B7H`
zE6S6P5VsFuZ@w#E!>l-mAvfkpYY7yv$#{ApFrt6`In>E-!3Wk0Ttu;ypOKp$U)NsI
zMZb4`!QWIGXze~1?zv*`vkbZSiZZgZdtyCbTOuiVpHvY)#uEF-s{B7b8imYdM$y+6
z@XN3U7Uhat8!R9QA;}sE=Aq$d$_Yp$O&nj;#J_&z8crXN<KT>fWpkdi%#Cg5el|cn
zvvzoc2v7szv>HX?p>xNISc=MD13BOazxCqa?qDHciFd+8%P3a=_r?m_rT+Fc{JAat
z;}@X2oS6jaXySa`^=Z2>OLkkL?O3IJ7pmn3TuPBNwj+^Df7aN0I$I;{qF@RJecgqF
zkA-~kkmThD0514POu!g#NODl>VeWinfMvIxcDN7!o3P@4Hy#vTJ}lC+4t)#+SSPZR
zAEt(c|G`a%r7fS`j0$%~Ju#$q9ho<T4hem09u5aJ27rDWLt6YwX%9#SmSzhWs}8<*
z8R)4AFhM?J#ONmdN)jlH?4S5&zWA>RfV!C1!H|<CjCEnV9>1S(Wcq-Uj-JLDM4Zde
zY+!=k<y3n`?)laszG{i_!@*AGh@de(@ET`SBJggD#xdbgGqR$Up6d=ev&pmz!n&bR
zxc*Lrf#JHCvw<W&!pbRmS;#dUIXhb@J^3^i(*4?)zvi3C8&ezkJD$5(MT%ugZYOh_
zC5E`6O0>vulrNfQ2?y|O`UHsGh)I7L-tV&af9*;RHy%XWd?}XK_g0yHWcO`#%=bc}
zP0{Rg{FUrk7@Azwd%k8>Stv@bX@JFzALbsoG%?iknb@pt9&Bnp>bluD67RRwEoOU-
zUnfRNJm^nmvPJGzSD`4y*ILXj<uoBI`t(_!1D#i`A6lk&cecCsqo@#yTOzMfK+xhS
z2WhFd^L;Wh7%akFo!#1rn!T~Np2xFIUU|D(^0e<q6#&XU(RjWL(uCO`TXWReq(OhM
zc2vx7s>!4+Ic7jDyus=FkEcI4_beW=Vr1+j?0$T&sJarV6WF+NyHDC#9ZCh~xzO0g
z>)gk%y*-!(Kk<zO%a(Z)BxWT{J<X))FzIhY>dG*C@)i#+TaWF@A4SK?`tJ-cc0L<J
zM|9a@Y>av|HU6PW{6GA@6R+O@)Y>y~-k2xh<g5zwVV4MdXLs-G>yB8BAI7H|N`|##
z(rj{5R*H#*w|ja;G0%3cgRO8<>dE%Y@?<xBERMK%m7hOg75g~;IDSZ?BpvzgN>u!}
zR_%Wc6!WgfH{hFnQ}8A+bYor2JqGI(D(>00kB~Gg;Z;9eQ&HfW)xRnlW%}7O-zZ}@
zO=gohfU*JsBeNeOG(ff+O4DXl#r?ycYE>;PH7J)oN!o%fbX9clwP2Ww8g;}miAk~}
zFO)u@lCYO$zvBI;_rX>}n~`w>^%@Rq%1cXqUQ=SrZENk!g#jKsau;#Z$kNADGGQ+|
z<7z_zsHY23hyuv#)Mtep{?U&f4$#w|1l+|PQNEZ5;|~_`4d_LR?uqP;9Q*oXl{UHP
zW&2X!f9lGj_Qy?yks4i79TeB7AG&jW+w^Veri2xKF!#tWkfy33-@Yp<MO@ii9msR5
zHa*bk70Zh?X0w}8JJeL$c~@+O>MS}*^Tf1*_)cJ={!=dQT*6xA8-CSSchlyHu4;}4
zeZCep!5=VbW9?WpE-*il*a!SN)~_6IFgB+(**RwZ4jpmRdqb0ux)}0mIbVy$|HMrx
zKSi4V5Rd9cm3J4N(mF(oNKT!6mn1BTwOvtXF=Z^`<%ip6A?dz!B+gIECp$F^ijAIx
z7`Fow0+WM=Q~`MY$MHI2qo@Ar-!Q-dmqBH1QDq7!S}li79Cf;pc+tl;2ro^1@VH$J
z=5FHo6CQl~x?(}Phm~VBnRx=NcE_j2SWA=_T7!x|;D>F+nK~5-#Q{0QZcb!=WFohr
zXGHKc+zS8~n3cy7fc&2EuqPJf{x5zWe3$@V@f>0lf3X}@^ngxsRR-j)b<AQIQ0Ww9
z1`V#-1K|J(upk0=1@CW52Dtwsu2=pG?w4Rf6UyLUAup{<on%@$0w~W#(MOoyVW1c2
zf8|yF^0s-)w+OZhVDz4u-KLWbz>V9fhTE2$AkQkyU&G!0>jz~6`)$^l5L>yv#s2X}
zmfCw?{?U?%PQ<ori1Eha@vUEl+L7FV9lv>+JhZQ&J^#jGOee$60^c|3N6>kr(HcE|
z+)S1J>g0gdXv2oDff`>$4dSHs)U>(^_Q$StQ+1r;+-Q8&Anb$0ZXm9rvyT<3aizYb
zB`bfqo;G@}3mF1-{$(&<kxjR8bt0`%<#faH)|*BX(u?9{#_6aXK^dlPdSEhHMyb4-
zMtOScGFbNv_)8^5OzYw*$+^RAUwqi(hIl`!5AyXgJZVi7U$~TBNh`Ru))T7{gt)We
zNb+uUNwXez+uA-g?Jyolsm`T(SpRVkPqFCQEn)^Le&jMcUy+dq>^!t!)QKc}`QB1V
zd$Q?>O&IArt<ZF$mDMoM>B9Ay%Yj;vo~la`k*g(dNJj2PXo_AIGk(4!Y1NAc1>_=z
z@$&tqaZk3hq5ett#nsAe*UU*CJ@ctX{3&x0i)fD7oMCttJdwcB*!=jq6sfIPh9Q;Z
z*Pl`i-;cZ%ly_${fOR3ln{vWP&vzyt?iA<7HC$GF1<P+2G`_0k<yCt~o!+t_zs{g0
z67V9>VUP%qq{@_8KT!4tme4c4->qJd*RhmVi-@W$EH&R=1t~wQDJw9Wnih7gqwR&!
zl-J76ssBFHU{)*mFgyjPUNJEWYm)~9z#o$X?(8S-KhC^=o~kby|Bcwx8#apz;PN+m
zKC*`_T?g3zWQxuWg4nU~t==4u6glFfjuo+yeHepv9t}>ZV%=_@EWs0>K5XK)q#cuj
zZw?lpoga4$Lk)EyTf%hbD+7}$`KZTmHR7_Ja2+r!z~p5gZ_Q#EhXkNfU+Y@dqt<20
zU1Et#Yx*e|{n3lv7jnzr-|7!SW<Y-A2W;G<l&o)P>(zR{X+N<NS-IKqK3VDVLeyAN
zw&%4?Fc5MczpH>ektnu}O1-4#Qg?E!!7iV|GZahk2zX;j+t{z>&NeHDm;fM;R^4HH
zKn+Fi(PI9@@UyV=)8S+uQuaQR44J->if>i&gGCQ}HePhkdPz%Dxe-|q?GLbKkf6w_
z+_>C_!=W8hSuv@7tf0hn=V+Jg=;J6w)7p$Zre908?<39ZN+rW(MMr!`8D_sHC_<Ph
zK@Hg&>Wn|s5nRQ+Wa8(&(ClA)cW5ftVs9(b<FQc3$e$i-8fejObZ>w$W(K^}6N$Jo
zs`Xj}`Zmw%j;W1!tTlV=8WN68(pEBI7Nz^Uv$~c)gstT?=y|(R=3>Dl#oAwrj^1C(
zlau7cDWla}IV+S#dbf5-p@|L+k-w{xR!{7w99LWIVBxE@f(o*&mER7EFk5S!Ht{pz
zX%MNHTXWZ&mRBt|-Y%Bk9?=!skX4VY=s{w>cFPs1ya-4<z4A6l_O`}*9hj8#5XSHB
zEqYEJbtWd<%hyB9Po#&HY}Y`P?QDgOK(Z=WOA9d}E?%MO-BtBDFB1Eu3ezfjdv`?q
zyyurzU+CW0Qqrv`-B)%>mOpmINp`dM8f&F(8cd^>uzW&NX|P=8p(GCys(~h%xtc$i
zwzjq!4HdXIQ|-Ws^R1HzyP+Q@J?^wlnxwB*-n2hw?nnVV{V}71SdzEnOivd8Eh8{<
zsYj?m*0nfr<Xee1A7C4R%P{3V<x9(l;f?%ETCDNc&Gw(C{C~JR?+L0Rj80ITV#c;+
z!<1n;F~t}%op;D}?Y7<COH2OH*8lnz|IhFGk}#!Hq;3O)r1lHngJK)Q%iVuI82<TL
z|1j1Zq0DQz9lnEga_TzKEYz#(9O}IPl$MRMp6IvAB>0$Kqe?Wu?ea9?Bqu%l=IEzz
zP-{FR?m>Bxv*L<WJY%9N1UCo=74QXB@P^#Q<1*V|p^Rn72IHyUU5mtmW@#a`&RH=T
zuKDA(*~2<{TUz}wSEo|omK3Q-7=HI?uTolBqmpD{4uSYu@0;L=ORF@yh}0kNF}~L&
zLHSmo+4i5x@>I3?{KDg19lt8ZIUG-dfmybF5LZ+La!x+Wxwu`f@YEqxIu1C2Kn{{X
zLHC6n#sG%}Hx5AX#)2>=))5hc=HIkp@YEM;V-b57_ZnAQr|EK+h)sW7L;e(M{i)gd
zE(h|(EIw144m-b_@Fp^;+EY1PHH<N$oVqx&fN&f6#*>N8UMRAc!IO`T`j{Ngxz+yS
zpf&jZ(rhvXK9Tl$95d!F92*zD6kM4!N8Y}5maV(z{*a|2V<+iNq__5BzKYdN>AB*k
zm?0(bHvr5)yyoDzul%wxv*YstkNaairuMs0pU*RRTXm3^9VvOm6)*lLxKAqE9sRum
zh-ZQQ-zm5sunY0^=f~azz>^4u-sg>gz2M2}mV?{t+r7t^n?`_M)<caLQmtmD6&bo^
zc+^M2$xA>l>7GldqNmG}$`NvHoSHTJwhNG7vSiVj^Bon-js<`TmAEGEiQ{gT?+o9!
zrx2ZPu~Hg|Y~{8+BU*l#jd~fkkgKsfMP%$OCjG_K@?Z%kU_Ec1;cgu+ht_Q-*cAkE
zh*hEcu&hEwomT5A$e^_opRCp*E=}H{l*YOgH<Fjqk`eV&jsu&6P9j#Q9tCEN<d4x!
za(B`M67f7k9{^{~Ae0(59{AhB!&Tc=!(svSK!CSf4ig1DF{;C<Uv}Z6=TF>&l&qC>
zxAP~l=dDpov@3-r;!{Wr>FvpYpHHi&Djoph@6}NyCR+cWUGw3rowIX(DP6@ldyk*{
zpS%GN&Y+x#xL-;Y|AT_<eqwBHS*aQjW1$L={75E2C2S8C(?7#n;70=s$ouw`=H$#=
zSM-@zrYkTDx0bmi2Ij8zUAwoZI^L7xw;c5uY9G?0S^nxi-%kc%<n8k#hS!$sXlNvN
zoTCr#^+^ap>g<AFw4`Pw`9P)83nQKJ&EIPv;i@zzb`w%37%A|kBtCIf3t_&hZbEzA
zBg$Uh)YS03-tSErxP{Xr+^|+P633NV)=aC6V5A-@!<FG@NFDt>Z@)y#uDP&eD&qJ+
z<ov7shI4t+#d$xppa3f65@i_cnQqX<7;(8v0u{71$$MXPAWLNT!-WZ2X|VY$3Y1gq
zfO$hRd%yEUq#{U+MwTErXrheRYv&z)lj|mGT;o+WxZ<(@<OeEzsqB;~@7Qa3pc5)z
zsP{Takrnm26qI3FD%-fRvfp3eV<`1B(LLR1;t~+ejZgmyH+k6sU2L)L$7Pk(V%zz!
zE?=V7om2~Sff=|r-2k@h>DNEyxY2+RG+An)n^5JYevbjUq}P2KN_(HPi2E8ojs{DK
zCqdC4i2(oziRBcsk+ZS|ag!F*2vpD()FhPX<K>@e9)7YgYA`I(F4L=|Dv)m|(zZTU
zFaEW%I^AKIoNXBh10HgO%h?v-=9HJ0C&e2Ku{41?q`#rT92`9Ws2I;x6kF@NJp;o{
zq3T}~b4uz<<z_Roy%|?;R&N@YRC0TXAo1gK@>3gT&Gx%%Ehk?5rFWeFD6P>IW}Zyp
z{td9;INx5UfpnC^L+;yS3|RB~NVahD1Fgu}rGT`k%h}Uzb~09pKEoh=#kS(ve6ih#
z7I3=`*r;}fZjx*FHEGiF63Vc>POwjVoU1*z_$+6xnpoJFyEBFhHS6PlCgD0iz2L6C
z-u7-zy?cLaS;$bjf0>cqc=WB_HcV=`655kc7>^no#5rCvsxb(lG6ukv5y_GVP2Rfe
z<A>)37Qq^2#`5oSv)`=5k9<xH7AF7Nsp{(JF)SeDAlI>c1U8gzn_ZQ2v=5;EdbI!A
z-go@H$qJE))r$P8(!6Fw>o`_;efRZ9Jb`4(=(a0r5SRjZg;|H%T;tW0V9LKRJ6R*N
zcb4Gc`!bJHCsNWIt50SsH@!`xJPm|)pl!#KWkq9RNIy(drr{xKG^x|(>M18d(BjRN
zlX<Oit&ztX_)yl&%(A3q@sjZSXmc^43>ehR%~G(7`x0+j5harB{laY}BQK!zwV0KM
z$-pw$l8@sGpXKN%9R-;p5l7e&el}99*ou`^^OI{C=_^f<0umbaz>NX3AwiirH+{yl
z4o~toFV*TBYi6SP4LDD$UKNlKMFg&2fY{U$OKvJ47FP5Ln_-02j_pAa>-tN#=ab;0
zJ!CT`OKJTYB`RG2mjAb6)6~K^O^rSVcUd&JBn@%J++e<6WG8tp<`I*LFq<cN4;_i&
zYki;raf0%RFx`%8%XLjWk5SNM8e07K+-kKRnIV45q2a|6Wi4R*1RbQIyjFf?;bkx4
z%O>2nd;X}Q&`$J@t@TZ^9p8g;T45%;v6JHXMFZ|FiveblyK$qR#U~7UMJH=!^y5ht
zer4M>2j^I9-PG&k>RFqV&`=b1BrUZl#WFdbIyqDDiZJi?L{5R+tgdWJ$Bk{pCCu3@
zMXepEp9FSb0yAHr;jv^9!CIz9$VRm3+hcKI)ArAB<o|Q>0y+w5K=M{Kv{SZ|*a41o
zz;B$o`j+7p?^q_e7w3#fbEQaKk%5(?K`Ei6P%E)`1GsWvn8BDU>;3Zey0r3p)NisC
zYZLQ_s3>B%D4)9Y7^b4r-*6w%esOFF<I9P0Y0=Tg9%sjUPl1Sw-$sMw>Qow<_@&O3
zRxuPrhuBkTjKx*?huV(nt?1)IN6gy-6{EK=>a)cag8r2M|DHwuci|iE@0i;dJW_#d
z_KcgFLZRot=k}e2t4%z1v3}n=-rYsYt(6Zv-S^d$kIMu7Vo(bcc&!u4yLDh-%GZ#*
zB?c)~v~u^q1#Ja$UZl^v36trntNqE~Zr8&R!-UUeE-2%8ekVFt?w{rl?L>d^_${H7
z<@~ZIze4gjy6!$Hcodbk{AWh-|DenM8=%XGFi5~_A4v!7)vG;E$x+Y;K$J@;iFT$c
zmSh(B**dW+)N`W00sJKO8`-cM>yKwEz;23=xnA1EaMrul)pf9DpxaQHO&oE9HmP<I
zceQP8c<f+k2!M+?m%RW1J{{3&S6(}@QCrMZ>!gjduTmkaCkx{~*gFw4wPjC7pBbjE
zM02#O^`I_f(5?|Do@sZV*F+Y1?=RwU(y?)X$=cHkPeF3M*62T@y1;jKZ8s`qiT3la
zV(S0Q30N;<cTX_E*JOG1qAf?bui~5+?qP!T37ZKa>o3a#7aLOd8Iui9p{&4Xb?5T=
z)$cv+^W+nE(baZMwqmQRn3ys5rO|SH$BiayqSr7{fuzcmrby-aWyM4cCi_oIJiyC2
zQhxxVl=}`?^wt69$*BQSb__Qhtp#q?ujE$v@oSwwF)S(1-#bw~XgyJmp0zJTXIu_h
zu@@`puAq-5@`l-0%z{~HGyy6jMhZ>(xu>xlqe#F~R-iTXa28FgR|%sznLhuHZ5suz
z(8WyDV~3oaRBn3<qXuuc@UntLioUQYIL1~_SXKtt!s?S*jf6nh(o}IKlk1w(ZJVF4
zv5<~DTL(KW3JTfAaT_GrTV#0AzE+@u!Nqe>Upvh!eU5e8@ZCX+8TN-Ecc`@ROPrj&
z+>Z@4Q7t>}ncL%>I1!X7E;|;rC`jb)bP~<P)UcoIL&=tlrjbfBLuZA;_QT5;s%4V0
z^Ccuk*=iZMWmZI`xeGUvJ-{-p@EE&{xmoM!z)Xh4PRk)>1-t^&Gl8ZC2Bt9EsoD`^
zebFS=q%PKxh$M+ZVq6${(;omZY<3j~2spt0fN7DNkeA`x#z<4$7LSg`+cWiBhYy7L
z>?3r4-RvgS3WDj~3EIDv(c`r&)+b#&-oK<y=zJN+@g8}j{m=mw3cg2hL4Gbi>3Ed%
z57iu2npSe~dE<kZ<)em#m^r28d%nBBdzms)){7DXI7Pw6Sb1O~ojqw-em=p-N{2l1
z<7|KkR~*bsWB<O7qM`ablY)zzb@Ok}Htr4>R+`&V+?pi`B<%tmAifV`RZK~nb%}`{
z7i+LQYgF3U(H-mOiCcd@SO;&D<Nc5M0kE}0JB-+Ov;0o3S)?`Tw5?4Dq^r*%&dp|a
z`tp^F@G;5Tt~x&qZ)nfytnE$x{6^QkmYF@>n!}We_X^1MiuYO2D0+{?12Yqj3}z&Q
zehq+PpW64-Th78-%P*v|^lu+vy?kOH-pwD|l&;oOWsDnKIL2bXic_-yzMjA~oqSFA
zPH@y!8;c=Ko)lCJGKPHe51k)~8ZhSZeKFs!6@)4yEblx#5@B$d&4w}%JtY8ByOxAb
z5tMoeIYz{ns4eO`%9-Ennl7z2&)BaRQcR`NX+8zzMyhZY6BpnG1+-obAL=!^EeNH@
zei$;8HKDo}*JBd%jX-sv`o&PFU6QymBgAXx(rd*y@v^LmC`~3Gl*N{yq;7SzLU&S^
z2HIy?)g_p#Vn}jn&}(_C>CnJaVY*Dl7NF8F#FMb-w6a3KLUUnbzPkawB#y1qzfMVR
zu6q@%t3!f)A;M`MH&zr<aYk098xidLM9u763P}5yca_zPt;!JMfh%%4fKp1(&ZrHx
zG2$u|V^enMoRgn{(lQjz0Ohl}_FG2pPW1u`d6L{C=h~^Y-%lqbZsnE9VDL;^t0}9=
zvCY-Wv#zns?b7hMez7pwGk3d607*iy%CVGMb@trkxIt-hxTvW@CAf%Bz~FA@uxOAu
zHtEfYfGDZ|<j;l94J%_4(;rghOZtsj*Y}3a@m$K0KhxNy-buoJ%?qj3UeNk1Kh}C|
z*Xkf_;j~R00=sH{#6Cs-4REKuyz;P9f4<hrPM>l<uH}~PQ#sxCf#7sx@~8EIU6cPA
znE^sb$vW0`lYMvYl#N;bR=)_*1Uvm(rMqESJNKz2UEL3^O&{20afR~09BV24A?yGc
zD6mW_UQ3R46pS06uzb{1{@5<^s3Z=6Yg`un;1<nzqP?|qFJDkwfnT>{DE76%uumG!
z^mP7COXpgzS@g(1hu!So1Q>r6{)^Qf$9R&fk_3G@N4L0N47h1%fP)nOV<-am>HIgq
zkPOzfDt+fjw?7Oy{zo(;lzDSb?CQ0WcGK<!FL;k9X2uujmtyewL|y|i_ur+Y?6M!%
z2(B?Ui5&qujgS0fW6hW!$^2NYqD(FW&Dv!<8m_99C^jD{)6d^vG}QQsG^D=Y_H;I}
z$ugSt^)bRHX&CMdt8HGEC}T$=Sz0`JL;K+TD*l&BWe&c!G9ji$i}O5QTwdg4iGsH_
z-<wbU{0&bwMriskAgS#5@;Z!o+Gp;iN0l~lku%c65y58u-m5>+;x0Tj`s%*M7Us|`
z{qdRpCqn!GJ6tyV?}BoBAJq;8FtGL4G0aiEMI^)y%gRzRdcmj*)7CicVrvl$ZqjNH
zls>*XR2FOxD^jv`RMYWdw%ToFwqN^cyPKdZLlpF#glu)zFgXSJVDSm5V;_5t&vDX^
zyoLJ>)70q!(aOLa-hR9jbBZ7ZXRmLKjn2<_?N_xh@wnZuyhSAHEp2Ws-@S!!2Gc<c
z#AQWNvgk2I5oSgUAn5FAcH*+I!jCM0M$4KF*wQiJy$!RGS-0PLj%9Ynv^*iPPSz(M
zRaz$WPgZ2(?{#ou6ijZe#k(EaS|Apz?=GK&Pm(gp$2C%=n!$~;c7^+L1OG*iZ_2&y
z@yEnE5aTp3g`ntnSP_{BMnj>U=XUGO+qS9kQl32}E(?@7_(l27i15LlMB9h^5etf8
zF4Z$_i8<HM9pYe8$Y#7e?oS)KFZ!Z_G<Zw^CXKk1mWUx;$u}ml+A^d)Di;m6o}u4D
zBao6hGb_V)5;XRiSy>&(m~20N-@GpXN>8bAq2<T5>^EvimQptLM2Xn!G8lTrE6C}U
zPJ0nKj`3sgrHv3kh3#ph^=j;G#O70B-IxwGE>&han1srb?^j}JwRqnn#j4R`U}&$K
zqDdP!UN_ohLVq~zjxXxR6gfI&2;1=kF&<b~`Nn9y!FMBEDngH*Zm^!zhqgwHRWinr
z=HRsy-m&b2p-ULt0h(DUCp+?N2im(wOI-Q517`bqee3qw5J!nF^SEkq>~3A91(K+D
zthGCC{ayWEU{)a~xB;BW$Z|qJPaO+RSNu8DFI6*X-TL_^xQS%CnsU!){7NY)b9T&Y
zk}9v4>k0L0=2;Z1isd{<%BU2V_znmAsL1%^T~^n=B_vTh3Q<h_P*`Sor8<SIy^lWU
zR>=L*_D9@N1&<Dq&<qP+l-I{LO>|=S<{^6`ojE!Mg7ommjA%kSj+4!*LeBW1E(NBr
zWr`ypRf#(-2I9>hyYK%|!qSp&nI9Vt{m$XO4dr7gFaK)(9b^@Jyt{Q7=NxH90M4jY
z5P6Q7AZP3yRqXBT|B;p+Z-$MHJL^6Lj8~2`?TX+l39lp-cH17JSZR5IJD_WpNQo*D
zHu@2OU6qFr)NgX;!sCYdLLD#P(Rrc!<LS)7d_{N(V|NCnJWt6uu!M0YkXji#aeKS*
z(@p+fex`hqumLq`Aw-@ty#4q%UYFm`qQ?;p|GC<EuKh$8`u5T%Vnh;8N?1fjYc8_a
zDz0&BXayrO5raTRiS5wcK&}uC*Ww$8(Tnh1ahz{V1v)Q|&{2l83Y_r2Q&v7U$!)rp
z);lafb++fq@w_}rJ-#WsaC+%gh$t~>6i*lLHAu92SWQE#%T(}mB{3X;0~%9P$vA*z
zE9qf8$I|^GBnu+<2)kDEd6&%csnJ+cYRkM=d%woqk)lmV<X7gYYmx_3fqPEntI~sD
z_*|2g)K)f6Jkn8lrqW$q4ON#rQ{C_ZY6nW0>bHy{<u>GuEy}$0N!ocXVi-<()LIU_
zw2#wy#b&4?qA%=ZG_8R}NZQzS$`#TCeMkM+k)EWafzi=L49dzN-QoPo9Y*^JXM-X0
zwl<@W00(&gW6EcD0M^mQ!RMY3(K9E9{ESu&ilx>f-1rZVW5orZrB@zP#L{BGatxx0
zbxX>3W?er{5BX7rL(kUkpFUu&?`TE<<*oFs49zB<Kd3p%n$fx)CEI)dcs*c7E~SWl
zIQ4lQZ(5V-!#{NH5n)illK>>^-IgW$eqaLpGMI7;e8&owi~254A3KnMD$Khb%$Zda
z^J7k5KVmO?IzqA7F;QXN5xZatNH$S@&T<uV^J&C#)fLQL;&T$N)e6gX=Jlrzd=>tk
z<)4~>ZZ!m4k1!A7;D-=cM&ErwvTIZTb=Ks?y9S`r+W_?aO7ERZ`>jv$`mbYby<aCa
zns}Fdpt(>GBO9$9ps5lDb`24>?6e&VFzc<EkVcQ6n{Omq;1Ddo#1}F~G7{6oMa}sl
z^<JIiRbe+0#=8$}?4$u*z)U5|@ZdL`K&$5b3f!gT<#g-0a2Q6BdS^;V1unN(=w1cq
z6%REEuc!1XXM_#iv{QUT{MhOc-zVChDo789a$Ajya46k{;ukz&+0;)v=J&%r91`lO
z_!w=~DxR;;_n5ii5*h9I)L*dC0Hm}I9rqTZY<l;FV$e%3wPLjvGrgj^`VW1yMp`11
zl4qx%W(VGEHry!gm!5YWB!?H|GlJ__%yh>y@(5Xq2#$ss`wG_N<#t@kK@u*Xmvz6Y
z73@?zg1Iwj;x+kMYNUh*ss$uSHAb1}5UhB3Y+hzcxCT)RHGR@D=w(3t)@gE8x#6mg
z6INV9<SqBfLMX;sN9>mi+nhtk@bFEGnqzIP+sfyc#U4+zx+c7N3R@(*tj(4S!#F)R
za{MUtsEhP2GbVP!Y;*OodX089rjj*a+Xt^V3*b#iQobz-q6xvbrt6{Lp`5V<8dcB=
zRS1^cYiy2XaR5S!OV5Ub9#?$9m#(Qir6r}Q=>BOf?PT3nUBlKzHbz2xxLh8i@Z93P
zBV=;aWKaQv?YMfaveQIMg8aO@oBWM1R;(Y-iylYpqpbxI1V55+3M$IRXSDleV_n8(
zc`n_G$TXkv{i2p-fy<1H*Wt&d@_8E#qqm=fceZQpt)-;@US%EHs5&pNIcjk7UWEJi
zd}9}qvOSAfU$YgmbFm!3mk8TL6IXR8CpNH_nm@MQ-ved%KA80;4DEN6qw*ys2+sF!
zU4Os-kE`&>Hs8TjEANTdC8F~slh3dn>JuAG{kfW0{6gyA<pO`^fDD^rW&s!!!l5Bp
z5D+|xB_@m4x~%k_y9S|1lkj`5rLnv2#YE+oyrzbjru%k6oymJKiWP=ZM*RM3O_eJh
zp!chrd5jbi%C6haz1r_4;~c&Ws&d)n#yy809gTQxwSG_*3YzYy_+{rpTZl8;+dC&a
zk`fi$c_KaxN&N~0of|ty$OyhbXb%o<TaC1A!pBIZkR4)ZBKu;A_QGEP^$dFfVc_0*
zM_j0DAk+aH?kt4k42G8IIq+yBzU4nUy>`xFKxR0pJ|}I)ByEQFL_^LXzO9x3lDxsL
zp6rqztdGRk_Ki=8NbVRPibUI2{V<l`X`I(le;1nhUt6gdk+JdfCEVI?<j{8f9<4X|
zDs?=;g)GI+N2k<(XMF+s<@1G-)YU+7d8ki!&MeB$On6Ku*+QD18-w+gnVlh+n80*=
zgX>k@LjPI+TY~~8s(62Kb|ZqZ+d9>6&%_KHhuwVFwe<2$oZq64_X+7)@wnY5YaLHs
z+f!q5677X^Ck7{3jstlQ6u5mRQ5cHj#0Q?FE`#<Z75H6Tzoa-JPT}`PN;vP-v#{95
z*UpZRBi4nms-<NH_BCd%QQJ#ZZY1=^#--bKrS)6x;pnV%85I+vhj7bq%pi;S4>x^1
z4ewwPPHhqh%?=aF>{Ag^XIPxJc#(T9^bTy0?@3ItQg>|~<K(hJD*}ac5&ZcNCG@{O
z{12=A!2hVq&p``1C(lA+kn>>h63q>i29yqX?Hd|DtiB~OwD`Vu>?umvfPQ3U<3n%M
z*09Kb4;J^?&_2G3(RO3?Yn)y+k%i`XwP2XTu&S!s(G#|8_aAy@wEodk^Z#+6O@rZ5
zkVVH#sj!QL^wHZ62A>9YBl@2>c>^COk(2Lxs-xf3@Lg{Z9*MR(VvN7_YsecmNitHs
zguZ8%?vB*k%ab{DFPi?$Sl6MD$Z$XxqTV=Rn_1a`Q{pn)p8D-(nmdCNgK2MT&a5V$
zOEcAUnUG#yFR;ND)h2l(A|$@8lK!@3*LoOZ#r$rVc|=agsnn8cGVYDR0Z9R)RF5H1
zpqpi1tDdr2v?n7^=JOBwm%_(`QyH3atNh1rF7GS+m_L&)qjrxHCg5+2B3;_=+Uk3C
znU%ejP5lj^k}~cUF%XY_LZ9qfJ2&1=MU!8YyN>I(4;SoqRm~C~>h6+Ys41VV#v3W$
z)9j?gT=5*uE^_pvH+hDH`c!K>e#ny*M${{d*d5mL)F(USFPK-FUrh&vK4!-7>=Yfv
z&?nT27Jpw`bUdGLDeLZBG;NAyk8{F@?{ef#aeO^A67nkEd~X8y&U`pA1vk&MJiF-g
z?%N)!bk$w=bKR4bfs2$P{tJA$xwzBgBqyUPQzt6mZ94Jt)HTjc`Bnj;VB+AJ@?6w;
z3dxdg0yUBJaxR@|KoDAn)eHiO;aB5%B12EEwQNjt1CNvQ79+L;m?0B#?f9e9z58-E
zD4Mwk-&$YQsbzdr&%Y}rxzq{yt*^BSy4Y4*!>2Vgt3$LQd5N(>eZ~O@_mr8h%dl8T
zqeffd65fIMH7jCVqC-LuB8+CRfyXe!0R7;a<s;YxOEK!cubj)@QtJ~gW8=Sc=I;Zo
zjEUq@5MyjK_KYF~L!g}qe%{BFqt?COQN@m(^g%;n`4t^2Nb1f?#jEny9a@*CEx!RU
zoOKZq`QhMBL!HQceD&HYK)dc3(Qu3fD<{83*gjW2!*ul}|C%jm$j7Y_aP*q?49t<j
z?I-qCCHh-Sy<hdT=f~wER$Ce$<ILmldglWx*_u9TNDbD78rpQjYms^))Cx3$#_|^n
z2)@WCnAE9(52z5yEeB|<PE@=)BEa?fQ`7zpFD<NNMD^LvD@n(IHT)gMEj!zr6(ckQ
z{otq+S`#kX-XyS{gtR1Ya$_aHfki*HTUm>1m<gpk{0}-r=;O`Br+bm-BvFq?x+z~c
zw!bHSVF@f&W#>zhp^lnkd(Tx>(#^R$BLAn|=;zQRxC!8423o<~VcCTNS~1>VT;tu4
z0%wf!PoI;|Mhk(X^DcH}w$lDou`Nz<9~9)+)NeARgt<vde<XT+B8bwXZV3;?)W~aa
z_Z8p1(QB5QQ$((`ZhW2T^S{`8%cwZNZOyl^KnNNn5G=vng1cLAcXue<J;6OV!QEY|
za0u=MmqLPT;SM3U{(GO^XYcMl_ud}2yYKnXUux7l7}OeX)q2;O^I3EL9x3J`Gu<xg
zBBvz*Dmq!|HG(Pwt@j+XgQ`(3SxLaM!Bb5keMRyxQhxVLl}=DWE!E)YqGtdElz=^b
zKc(i+pf;Tr;0pAzsTG5+Q&HHNNPTVz9`)Yh#e*ArZJ$f+LVda3WZ_fZLDh^q$|pL8
zgzd0rpdMGRdHuqmc<H`0Rd{p|D_<MD7@%!Cgd;A07)uq}Yvx8>ZDyug%y+MVyB{&s
zTU7?b<Abt(Prpxb#wwLk`9Um^A2V~~VF%p2wq3Tx-KCwcYG}-n8-am(mK(I}tTR^q
z;~4$aY<ibkjuyh2n0Q`U*GGkgm_}2_s)~flm%J@d{jPyaC!%R!M%y;DdQ9(%akGPN
zykNjqwW5Yo+#F992@8&ixcTPU@JO^z)-Ous__2%SVi^nNV(HxuW};L|6<_PPCOb9N
zHO<qj#Lx}7H`pv+gHBE%p4-i9pE^-cFkmz;Xjd=4<4`&>3u)g}tWzVzfi*O7WK_vV
z%UhZnGB%vLr8QDc)p%}r`v}Y!i#oQD`#=OZh5raHhjm+@pAD_t&;xb2YH4=8yRx5O
zaxxQTb*Ilf8O$#)oo-WTow?CF4f_2zDxvclw`R?D5DqZoy4IGYY%9-sRD+-4@o(Us
z_DSJE<X1r-rDymyYx%#+pzxXDAp*otez<gQcO0Yh%(ZrvS4P5lWvc?5dNoe;X?iua
z6HY){WgOgIes@nO7TgUQfLI(HAKZ{nE2SEplmR|nWBJ?$UhGZ}jv-For3UeWeO!;b
zg*5qq(17Liqt%4A+V&jM1+b~|r&hv9?^n|&-}7LBgXE%9K8mUX6|M8C{0Bq$k4E^3
zGhS#&G+KBOQ01u3e5Q(;)t}KWzSW7KtJoPIHECn(wxuj}8!1#M9i6(z?<MM;%Q4#P
zW$Yg`H7Bt-r0pO#rto$1nL7)Lg$eKs%P-bB-NP)IhVh&t%f0sT1ilJSf@SS_L=PHw
zELO*vV>^$OS$=H;%^&nNOmwDfKeK5E>6*E+HfgIs8NLcI3YJ-wV!KVOz<%M45fVf-
zb-Z4C#kH^wESN^`zz`jiwc@c_lX2jcE9!8`Wj4(1(8w;3<J-BZ++YX>eMiVt!<DRh
zs7fkIvauOz#oLL-Uf;7FzM5&+VSiJ(HmkiyADT*}ww_RlCRMk^#)9q6;yPW+EvCR<
z*-V+yF1u6Hu(BT)yR%Ig(=Sa}mHup@3)Ub(**en5r(WB}Z#Q>nlpA48Y1DZOan$3w
zDaK#GUkZ*4inL6?1`=VC&Tu9o(gKiO942tpXwNaIXdON-aARYW-h}m-u;Ok@V}=(?
zWx1#pqYOHXP{LZcS9GAV8~h*(H0t;t2S@a=#Ded!Vv{YTGD6l;QdbLVY3$D!+Ch+s
z>@VyO8-iV1LZo%baKGR`Ab`o#J-4-rNm5sQogJW4yi|wVy;H|RnPN~W)eO(&<kNM#
zv4HG{t1GQYY2)qg4<ti<V?h9){7%_XoKr)6_Sh~kjFc>e`p!W$&}cq@EPDXepx-MU
z85|Mey1jM76A#whCQwmWTZU;Ri+egZ7QM@_#qIdJqIVyjPaU3`J-s<wo&U><`Cs{;
z|5Kg<vl#g4Qk$P6dXwhnJNy{5!-)CKPIzIQth2i8zs3OlPcowp_WvtqxNz)8aPa*D
z*cTCP1PWcpAB@rLBXKyw1zmyOPU_g+Pi)%m64Kl<-I=bpl5Qonoe^9bluzPw`MmDG
zo{LTgRftgU@uq_<Hlk7}h=1u%2-rY$zQ5B<O|5nBdcxFE^J?gAw`UX#t2-naMv!0$
ziOAk`GDx#~7*W%A(6o=SvtCd++0v7GgQrhI6?5GS`1~6V$AG2YCp&mX(`C}fZ61BT
z)M^O<!D75>vpL^UH}R6&ejwITNq?F#yS@Fx=_{Zao-65cdlRHyVPpnLl$(xVfDO=`
z5jNlD)8s~2Sg@^^U(QKa`PBH<B%R^Kfwz4+D5J}m#&Vw}agdXN8iTA}3Po-WfgG*9
zv4;(k6?yjwu07Z>R^jlIVJdaJ>tOzAw%YcZxaph3$06=@(eUPvXUBX`4Ot1YneJoJ
z?5~!-B!MYSmK3^75dbm4pHedHWOxzh@q$JSlYYzYbaL4g7lq*q==lw$Ud9RiAQ)72
zE{HGu;BZn%-9%~815IRT`ZSlXj~l1%@TN!-l?oxkQN$F3r_0@_V8ZEeR*7&?)l;aV
z-lN9;X#(Z@crdb!n8DI9lg@$U#78oet?@6QLPMP^R-`uEPp^4O8z%HwW5}o#mG$EQ
z*~pIX>l{2;vI^!4%n|4}N6QNm5S&&=>0Plx_s8Yow)4n|I3{})+y>qJ(FCg8(z$1H
z+WEV^1Vxt$=vUiRPW=~K%AAzi+%e~lfeS|j;{IVl>3{W%GN;~|Sw8{JhJO5_JsU<2
z+pfC+z@ZO?JvjkB>M85`&ok2<!ne4Czc}sj%&oO2Ix%I3oyIu&Y+Mj5`227;{k(c9
z&+KculO5Y)HXoDAv<hWNq#}@5<z=MQj`?bwj-+~=?f;?C03q(u<2O0Ic|TOfgN3vD
z+dHsv)ew?uKiJYpezk_BQg_}<V{%fKfT-onbX~|~RoM?(zgn`^Lj0EguMYru^KHzL
ziXJM@CzpKLVWVpO2GqxlWdw?k9O***19cKvP;YNCn$0en+u@wFJ#mSybrcC@zk|6Y
z`f=)<Cg6X3Xh)6~G;qzcj2DAXeGZ5zvLD2DQF)gmJn8K)Id0wl&4dp3id90sVErkf
znRsAGvQBQnn?Akr)pWaAN~Q9t>83Mjr@Bt{QU72#%w!?_#_2H`$(kAbZw`I`BtHC~
zwDLb2C-s5thUPO|Y^4GXpO?IX44r3<Qz5UUFoliLuzv<0c4ZMO$G$si`Zx#m!VTd<
zh=n6lCOG-JyhJg<Z#uRPgQV^k^dz7|p*S9RK#kfRouRQcVYXZnh1$mFIzm~PPbWv8
zO{pTWX2i4F9tU%EG#sfJcTm0|OY5|mkK&^dlM5q~md2`(V^7Wk(GL<1;(u;d4xRbT
zv3BNqUA;8B6kNu4H5^cRjCMVlT^!kOMnX4R%7+>^ZY4P>s}XYqET6aKDl6DZbm}}?
zcLe&aJ+Kj6yGY^B4vLEL95KDmhQ{Hvza}jxeoJdfrzq3SU{(fiT1M+h5s6%3!m5?s
zT{=a3Sr}q8zJhVo1nQPnVG&!}p7OYN5=3i9d=T==lBBA7KU|3F<m~Ik0Ri4^<b&B)
z(}Z+7qwNf|n6j}M9CdjZ6c#!VU;4AYfZ3w0dA=t{slmNwA|d!)Nr+8LA_Cv;PzNV(
zjNaC#MepI`5)ZLl2>+?+a7gdYy!d+^sIznO5b*~fDx|cor0t%d=jj<@qnB~rFvdVL
z=i*@{!}$jgx6*1Ae}Mt0C+y;>x~a~L4_mT5oP2_x{27vBniR@~eRYJZR2LZpaViT`
z5>i?G0|-8lPdin5iG~PlpmpN;e-+5F`~!eu)Sq?xhd53@k_`zwoZVA4Z@7PkC{`WX
zla&(M3GRF^$ysU89%-)x&i$;gm|#6cpZhu;ElcA1UHO|v`Pz3C_OEqWy&Nw&S%rb;
zu_$22QjV&{{xRxHy}PqDjU}p)eRPur&cP>b$%!A+VDyRZvMnIGjquRS4`EXq$Y+rJ
z0k=vkHI!JG#NjjbnmFz;*K1m<7(-7^zEIv-`T?2V?-Dz#$pxsLIawuAN$O2PUu|Dc
z8Vr0c9-_je7%QrJLgM93kwmJ}9bKC}Ee-~!cu{76-&h<_BS`4RcifPfuX*AflED#Q
zqV6+Nu<Jn$xf}|`C8Y)F7Ac0NlHb7>HUs1}P|~RpSzM3d;sQNx*)PY6AI#?~9`)o+
zoaWeY=>|@egH={sqSE}5eD7Az{{YM#soqm0K&^YN74TvPw+<Fn%Z^NUUn>mRNWHF*
zW1HY^c59(b^_)cP5#fRzYz3R3-$Hl04>xP&23^XMiwTP6XZ6+|e&_1!@3pteU7j{J
zWzVd&7p;~D|BsvtCZOM9k{uphEO|h0%TR<4TLaDYKp4p^zOdTp4CXNwHLT0@Sw;D+
z+2kwVHqsSOM2^_n)oj{iJ9ntB&GVpmtf7WPIt`6&6Z@oYkY}%mbq`-jt3yX+G;}c=
zlP|LOV0`slWs*JSnKV}!RDWndpMcxCqZKs%cJVbuevqj9d~~Sq$5k2lwTz0F5`ZEn
zTmvxV*_R7L4+SO2`xmR@7AY}6unoQ)=KE3~#gB3Zdh+5XQ|E<tTRtfRRuN1{^+DU<
zQyAG0dn3NQ&Wk&bR?uWAkKcLA5fJ_Xiv+yn9TefTgx~`o+zr^M_`r|y2avr<62uLl
zjX+jM^w^#Xn|pQP?Q3=f?Dz83?;yp%MU=7X&U`n$e_eh&w8@op@Y?C$x0ruzn>bx)
z1%dJqfJ4Jxr(`dvDXKG_vvPEXTF)+*0q~0o-k&CixJ=Odd7xd}Fkl~qlq|d<uNb9V
zj(6Xi)1&6OwfJBxKyR3ilPS$^;}UAs^_vL%{)OAY*9E%Saz#4Tfv~Kx9F>%_cMpR0
zh3y_{VR-M<T3NL{xH4{(Y#2hn-EZQ+%tBy+Hf9bakk1GM5|6c6hmN<kO5X6<_Dm48
z;rlLS*L}*OyJTCI@hUCJ<x+_IGC<$Qj;!eI$Sm8^w(r~3<jJ=5j#&l)_pc`uU|-&k
z)aPqIq9N56sft3=AQk<drsR(H5Dn2NyB6Dt!>A~^eFwQ#)Y+{3>3%!vjDF>L1*z$D
z?`@8ftXZ7mud()ABz<cf9O}-hrKiv8>ZecoRAS7WAL-+oNGPI)4*9&ir9z1o2h<be
z?Mku|T@)u33`7mwPCX`fLuWUnZ|;&CC^4$q#2jAns`0RLClc)AuxWUD1tXU14Ct$Z
zDag1*2|wqWeh5B}xkwmu*G-@lIZT(}pX_$ou3P~ZT!&M4gd98^y;QY1_u2TH4oamc
z@K&)G)A6Vm$Hu*lwI%ST@s?dEkEq5^j6=nCE49N6Dod=5%mr4f{%*)r=uJ6!N}=QD
z*Yl9QNI5ORCdXs{D)CW`4xvI~mbw3<I54OfKqtc2y$7o#^oU1)&Nq;2UvW*Ubf;5y
zP(961SW)5!XP<dyS}`cKM(ZVko{aqOmiZ0W#hp+;sF2)lcasso5cT)`^o^&hy0#Zv
z5BH}q{V%~o@%BJwt)hDL>7-^<NAPH?0tqRN&Ga(9fEwlIQ1~46@b69;uM*Jvd2hqZ
z$H4KJ#}(W;6FD~f_FXw-TWQ$7M)5{@D_jFuu+lwgU~4$W+ASs^?#HQy8TD-;4SH2F
zzSi>R@1>px;3P(<luD`BAWm}0td;^K*B|;bCF-Wtfh5xj_P<`3q@-k+mCEXcbhmR*
zjx5bt50D*9^26(CNnV>>j`N0^ztlM7_RkRW`~l2t_sf$Ld)bp@1(c%SJ2r!PrK%`&
zXSC!<YM4jXm-jv0JAczA0aq~HfK9WXJ-qoD+TDELNpxTOhejKd>OWwdeQDezC@84x
z&PuQy4nveNBPJ@U7|_|uLs}(jR?vtup_3713u?YNWTFU~j@262vKi!mlFQI=Q2^!D
za!({Xy4w+ZB+RUEOR9Rs7;<Bx;?5=&RnX|lI;wo_locYBdN7yfm;`^4%A;IpycP^A
za^N{Vu~c%s)t0pTgxml67G5aWxKf7!w67jYUxCfSAitCa>%f*k<9CbEY&j%h5O%RW
zMeSr>Uj2+6)i<dewO;H?Tiko;`12%{x!-Q^`vPjSQ(NJee&tx{hI;K2#x>jw&78Sp
zGlDfMk4Nuz^!#baJMSsBVV}WawfC7_Iy{Uqeu$UIaRc~OU&hwZ^j!L2W(@$s8skqi
zgQpAxstRUcSy9}vK<OifY7MiSa6By{L#<rfeBFlPH(fW?t>uhKO+fR@wsxU4qLJrH
zjW}Vw`jq;m5y7&)HxZSntXf@tU&{rH2iHcmY%IEDOg0OE_qWc<_2b(V+F*r7E_+DH
za=V)<EjCAjB>A_Hk1&@K05KOSK;UVC?2YsrMUJ|r^66qc^JI0aiA)>Wc1I=d>23<+
zb8UG>JYr5%x3XRa%jp=4RM}Q44`}Ba*zu#Cef|dfo+Hfn=(hVuqdvny!&Z{tt<Y1K
zhv7sJF91nBFAwiCv&A)>MuYEs&p#H@|4bRY=o9Z_Ca@>)&>8C&NntnoZ$p3xQgD)w
zMKeO;4Qm*8yW4K`iM~b|aax@Ha7==!esM-}gRw&^h|B)LC~@9tK|XO|8((S8WqS0`
z)1n7X4JGI&Nx;BRCwYvICWn^u(dpj6-AyrJ_>KAsy&k&WC+9lQ@xvS2N%^w1T9Q7g
zPxO}s-(}Gx<17>_m_^xxf=-bO=;7(x3`-mPL2-?#NIgC}V|)p1H}66kUdZjcxP9Z6
zq?cSgtHZe+`^VR8H#{eDY7_jQ$Rkq(=s#f$rxW<j^^&L{AR2D{HnOD=d-Rq~O==K&
zJr@w-Vn{xUPm+>O?ffXayT~PgK7q{t#$I*_oN_+@-RL8;ctO+VR<{N@5-*Bc*5Q{#
z9<HIeg{qrlQ@b|;HY<lSlLE7oA6psiT}^jnqg4%GIvgwKr&TjrR`V^C)>zp*R^2{5
zfEZ&w9m}-yX3#eI8XlC9Bhytp+f+<A9W02hq$`x*&ne>0NeJoof$+xM8k1lWaTqiA
zTTQbx9A`>DnZdLbgW+EqZ&fs@u*eS`bP@8lD~U8#W*0HCZZNam=G|@7@DK5)Iu;{?
zhh05lCf2@Pu5HEW(_?OC>S!TNy<hnm7R34Ud#<cpLz3I(v*Z1TV!ZJn9Gc{MA)bi^
zlD?RR`CR00j6~V_>8o7DgCle(-j;>^dV<z1o}9DTq)1%9R7h(ruMvp{4!Y%5Bn~5f
zK5$HZy*S0Ex+pfVM_<Vfg`Yu7Dui>f2^<cjjG8M(nj+h2O7pz|<32%wpyRk-KA5nc
zp?%gHZq{)4poj}BEWmaT!ghG-suyxtIe`+8lgc)G()S^kU9au!Wy65%Y_rZJd-Hn-
z|A`m)&dcr8egCHj?JghJ>rxZL#n@;TlfApQsRmO{x-NEg;FB92CJNw~K!l%%EtCI;
zdI;lYM!0ps#82ge>`(L=G&oq_`y^cmt98}hh~tI#Ti^G#=?OLzr2=W}ARgYF-QPKk
z6*!lXD>E=<7JD@N%xLH_W}QL|8e!HNjljDO81R&pLN%L0kc=-6rYf-D*li8F41Uy>
zjy=D*LXRUqMe(GeONndM?l+>RVuehme@*P0gNIdTcqrQ?`N#3?>@#e+>A^nCkrK=R
zq1?D%CU*0sb{8rM*q&kQ@}AY)=sh=z97r=Tz^!stB<#aq$DNCfkFV1dix|b1mrPA5
zbfi7H4)b9bXK-n?@I4b^za+1Gm|KT~*O)nGZf!eyU-_GubBOyi7SBC5yxeN1xl_;&
zD169J{s(Z{CjON02T=6u(_S+l^VN&Gb7qnKnvH2yITNs{>C|o!X%%$JlG6bMVHmZT
z;CKlJch0N2XKGLyA($KFmPHbmDPtX@sCaN6N5QHZ+%y&fNK5(lG#^=@WHWkd3jsSb
z7#q(Ck58%Q0a<N&KD<rk(H~j&aQk={<{M1jxm{N)z*+DGeq{PUsj6sca=&4dBjuoD
zis$_rZ*0w=2HqWg0Sh(ciR9)7x1Us0o7QN@N%sA2>S+MCbxb>ryJB%#%f-{J*1^nr
z_3w<ysn~l*i|hODHc2>E41p<AeoO$EF{kbv3$k;MP<`||*pWa(%dDnVL2M}-@6j^<
z%|g!aX9zUqEVtxT{fY8Mwk=LuKJdT{%)HC&*HC)B_SUM%Uu%2K%yidfk~5-h6Sz|&
ztEYW(_R2ZNqT^e*n4l;9S2oo~Cdu*<$_R32?=^YTGGvJ$B(@7K&{15%TqlDdW>KYb
zrC~Ow@Qr6PO1WYY<F3aos9birI7xBzVHYaDC`eedgt@OkDAh@?*7%#<rlVk1hsxp5
zVb4-!QH%fEKi2-g6>R?-QcU_Uq?q7|O<Z@raILq8pjYVUsrp>|3GZ7OuRE;?)~+g)
zKyJ2+_;#J8o|N|C059Bps$cmBw|}+xSCSkTyN927J=?hFga@|nTwBg@*B4ixXZu_(
znr|Gf!Ci$KwJ!)7EHytul)PifKwpjycr%B7iyHC^*0mo7Vyfu=w`MURM*d`5M;!0A
z(V<UY#5j3;{6hq;cb@ZSyGFE+H>BM&z2PsOF!u;1+g_oZL|q_Y@3#oGl}5k6Tdw7G
z^k=D3&im^=+X%F#GqIkiwaHKmDuWg_=J<@p*^EpleKnrVe&+I;2rbaeAm&z=zQAmZ
zxSAb>Ukjxj%RopleN`W5A4?8>h38pr=HJwvX_Z!Il>N6YZ81-a2gi~^#C%3|b&oj@
z`i&cPdrJ3&Z2GrVUK4p;PCxu_6Xu#B1A@YP%`bz}lYZX57I$Kx=&*C$PkTqgRyZ0<
z-F}C{g0;6v3|dp^e--#J{F>>m<VZ_56FO6Vy4E@tB-+0h?ogvrdizMKz1~a?6IbM(
z-92nTPc-hMr!Y$=Qlb}T#@`{g?<|()%Rys*05<Atm-ia;X;P9SG=nsc9`8GnBw3s{
z*$wfHe$&*@oMnC9Ua_${_};2z>m}<J;e~31FDRCW1Sjr7hV_(e`0i~BBV$zy^PYs&
z%g6<ETI<pxfo}~bh}1#{2L?$cEj@QZ1syJ-CC)KgBpUR4`nlPPH<k*iaQI)rJWPFI
zFE;Y2<d>LOzA0x>hNxtCq_hlL|6{l?*?tioSCad9re_2&)<&HtdRS1bP@(~y?I*R{
z5=_tHtjuWETch4qf6(XPVM0DgZrisoQGO58jbs}HvRCyguV1$uS~;o8je$LuJ`|Zp
ztVRldvZ8~VV}0Q|42>N2z!{${%O+UU?hNgFTO%IX=<J{v*|85y)=(PpShx-e$=@RQ
z%x{mdKB>=*Z(U`KLo<gS^x7bg;$2Y^{;$YrW){XHcr(rLM*DAeSbBPLu7u}lM0bsE
z;n&*lKzlhw>FaOH!75vpF1x18KawOQJwHgO^7f!%AFK~dXB^DL88uvXsZJ~^B7$yM
zoGOD-g`7tSwv`t;V_(0)PX;!pItciC=;}}l$u^cFAC=o}IDWOq(ZJi&t8J(FhB-AI
z_!SE3{MfeCeSRu3dFkWSb0BRqx&&^}%>SgR_cdcICN0K2(Lu9p=lJwQViMSz?i)3W
z4V;&3O=gn_`c=#;GTD_*?3}|s7|FswqsYXvjF!#hu1m7l_udeOtv~U{iXNh1oDN-k
zLxuH-`%a{vJZ|K}Qu(P`wg`N(2#mqFkouz~S5=D|IjeuAezv5T#@Q~jfPHi>wd4~<
zUUlZ_aAX?z`znqm@xWn#!7Y1(a>J#Qj0_dLkbJY0jEoue(j=-e^)ZF{YGC-ua#d&w
zi=Gvk_U*Q)x1h(`mCC|RJK7u%;doYmV^|d-IYM`?%O<-$5lX}<IU?Tb<H`)F`=R&G
zj=^_xHCUt0ngMC<bu)(vbuJe9?w`Q=N~=C59OR!AO)S<6NsH<vWWRqQA)(;E6kN(f
zK6CfUQy8|A7pz0sIVLh83AZc2^(lckQ}<jsu{y7!_j6(G;l8Tn7T`V1H2d8s{s+*d
zjQPLd3V)W}QeT51s)5noQoPZq+t*SbsM82^mSo=T17j=BI7HD>9*oi#RHT<DM<y$^
zwknehu1=(XAqv1lx-|HfI4sF3tDf9ifYM-#S0SlfLCM_H&!*6V@><*k4#W7PdWz68
zp=9Mir5x|t##c`YI{Rke)1x49KvLS3Q=fj|zR<w=t2+r+?@fp?%>5UGKMvyMrA|Vn
zm@^I};5Ui|Gp({KShag=O9P>txq>fUyt`>=wc|iCev_urf+!0Hl;RHrL)dL5eutm?
zC3;wRPymh~$O$n5tT;#Wo%FU_mrz{Xv={xO8jc}@6<l!1(U$q@R*LoyPt_QQT0Bo*
z9d7eu&0>1q)<Gtk^NfSdRF4fuh06#9(d+C{%hh0};;#6^F)oi|5e<vPW^O^2_dMZm
z*?4~-n}?=2ZPNM<-ms?cm|zm0D!;#@VJ59RqF7+5Zf5v4VK5hq8NT}_4NbiF;PF$e
z!n-o8tOJ|p0E#C1d%l+Y4kjHFr)J{KcXBR$bI>TZ>d1V>Po-MH)v^6s#f;Q(pw+ZP
zwh21r5JxM5J6YbUD>=F4qPwc7>5^=>%%TNVPt^w($f)<_9eX07IR6GXM1AedeXJAX
z;E*exr0vw>rfmsLQ$_RojaN%*LRns}*Vhv5{Xyc&OGq_w5s$SW$9~=of`n!z6I}Vd
zZekn}lDNa%lS}cbk5`pk2rZa*#;H_Ac5{xR!M{ZiCw%&F@}fGX91r5mWT=+vsj|`@
z)t&@eu-UW9vqIzJM=^iG$=RSh-v<3?IX{~WfEg22;g-+D;_fAH;%1{N)GRCLLf9MI
z6Fn!+N1-}dA>6WKU5OL>cBfrh4VMhm>z0L9#JqKw8U8T{!x9c&aRLar&vm{$m~g8i
zwmWK-=ov5-Asgm;yuU|2(+Twe2Y5Qu6ZT;M>tK%#tbO>|DtsR*KuQt}>5lNIvi1nC
zx0Cy9GjZxTmUFi=Hr7-9u#sTZmnl4@Q#mn(;bXb<2Vnp658y0?RbcgezXTY6JAwJ~
zAfoJALb3{A2gVLd1P*!`A0_i<I(vhD99?aMf6QZC5Ffj56MO7E(={g&9h0d9Igdvd
zIiM61HP|<&I5&xU%9R|*e6`~<z*W@{$NN;#V3X!bT#UuSl37ARs>3(9ipc#3K)Ct4
z`^E3h8ug)db4ngU2F)P*eHrBa&lh?7e|zLT&N`>idl2T{ed#`&iq*1IWKV1^fO!)?
z-#FCs{Fs3Cp9FiiUojX!hJoc-PQzGxT#EI4#UDe~><WmvXkYDNV|&+su5%y<wjhJ>
zEz8B;4$Ah@hbcC8AHdkD4Tqig)P9$>#Qe~d3b7O(BipL%^4Z4r6+cpiD+~Xp^D8#D
z{KHQuyLy=T209q)d}&SUA#IEDe$Q&{4lDK(W*?Rs9UxWp*!(w%PMJ=p{cOIGAp=-g
zHL=B<cbeChTs*{z-})9@6r1W*DpL4>9BXuj+-j2}1_o6t-U3!t`n9$>UuNc-6!bla
z@OXc^SD4b8emGQZ)KlMgw^Aw1=&VhZA36+3%xqLvWd<6M=z$;v<Q0Wh-#55q5+@sV
z$(`BdghAD<p31JCVzIEGG-_PUujdyfdpXG`cj?&L>ZJ)|hn$LD-?vSq!d76l7`3L1
zCRpd;8plb^f`=XKObE;ab?%!hEpMV13jyUdWvA$}*_=#uI?Gm_W~9`doMr4gucx^^
zP&jjs@YT%uNtHFoYA9?moxSPBBQ|qrmEZc1-5I;371NBdhD8gnX!&uYQ_Wb#lb*~~
z>*JTXEG`N&k}AByV&@4cRL$jUOik6QMbnB+`wsX~sa@>SS<%f<SdY!^dXKKUtU}L8
zuE2NIih8&C9WYbc|15$0!rKw4SGqAeqWfZ8-wrV%nhT94kLZ)_SBs@cF44w@4^ieA
zNDAyXa>PO0H8-Jso9EZrj2s`O@Ryb4=*RLUsXL<Mo*Josu{!04hB6}2d+9UrK$=dZ
zxZ)9$O%pvE+Y~y7DE2nP?tw`s4pT3*;Tr*P*TzoUVC+4FP6MY(B^QX2%Vz4a;X@6k
zYd70<#V4DlthXC9zJidPV}JBBFc#quh6BZ3wkdS^g+m<0_595w@$D9AElD})a^s^6
zk6UGc!1IneXuUUN)A2S@R<b!ex%S$1_qz>HzAsvL*S?mzOSCbwKGDX6dSDBTrlTu>
zpKyif#<cpTps3rKJu*d9e(#7-3=VYs>W1pDJ1uwA;Oy_aAMA+aB!euz@<nIZJG@-v
zU!LIqT^DK-vltN(uaxSMD*)sOZsGtKoWyZppQ&$<q6G=g`a;ThAfblcCU=KOFyEZr
z?>QLTS(qT)pIujokR$s8Js&OO*Dy)36!W%`^nO;{UMiBH_AUo6*IBNiCremV>z7CR
z|LYZ>rpyFEhB)a4nvS_0+w4$kO8;`BIWui>s)75KK~W-YQD(i0B(CznM-&kp0%=%3
zcaSn!!^lK>Y<8wC$TiVE>=*>j&uo#j%lOEds2@XAQXnmJ+H+VYo;>iDIPb&9y0Bj#
zMLcr3<^LB4H~;qm(?5@nZ%4cPY((X>xx2J+8{9;}E^8!HwGhDM^2u0F>e$|jSw1rf
z!A*2FY#22vQievqnD!=w_RT2aEQUP->@kRY_GFqyGsaYN@*d|Guk*r~L-?1S24-^d
z&l}JGr%Qlq4)IdpKHm@q*&&DU(=Ocnncrr(lI5BRA=e-ddOI@`pmp;Ohw$q&D#=5?
zgcoos0QVKK8(joZL7Lbr+MwhWx1nBmgccMuIWc&I6?kG1>(`S1nuZ7c44C4h825Vh
zjNm|8@&_<4B7yz~KsK{nIvyUdLC__QZ72OkpWD69d0^{vF-n$6Y88XYb78quYG<KG
zz2VQMy_qlZDm-2T%o0|XhIwDb4#&Wyd_xGWHlK^_-W>7s^33={On0C{wlnPTa)-%3
zzN^lCj}2(<#{EVr=Ca<V9%JRWh8=l>2j3Cu0wqaWQeO{BeuV!D5a??{J_^(-9}RLB
z|8aD&J=8ZCRI1*N0_>4?UX{*`hqai)%mLvHf`2^v<e;|;5PR%#AQ<mMHn)cy^zQvT
z4^Z7d4~4Tr(r@FQdCs?o!d9(O5~fW40AMV_Yf=Ls-EofuKcd*~jF&s;Ji?XKtWNpu
zriii^AU^0-&~runk(|@U&8PSn4<087xG{B8xh!WLubi@T7G=c9Ck2#f`&9M(eSdI+
znOc#fzJ4jK<A{G}*1yKvaUW(MfU_@H?zExO3L}~z>@BAtIH8A6Zn#1K5zb`Dq?5#z
z){mKr!Sld3DjXZV-j>N9hepzN-`6hk%$L`^2WkM#t9`3h_uHmMeD`xKl1?CvKgM!d
z(OUtxLG}m`gB;bOia8q{?zBSpGrJc@*D{_HPj>&~hRvy6n0x8V%Tw47F!rjK3zW=S
zg4d~JO_*l>KKayp;puGK_iDeztHpr`W8KqfRoVms>~khXXmKvc7ILIPhT?~vbfpZA
zu`tVElH6@~3*LtdK44<N13v!3+H6);E~XIx|EMj$XxMfvUahi`mBO?W?6&pS1H0)Y
zLm|$)U66Qek{$^^ilcZe?2Vk>w?*Yc57o^{{da~bKksf*E9R<ll2sf9_*5=A7Rkmg
zNyqt+xn3@%J30wj>Nfkr6F{ZGkZp(|VE1L?@SPl_bIfe(cIEc*Y$E%Ti)d*-@Cl*N
zqy1%?q|5hYcH?KmHIACL%r(lt&gcIlEB)t_l`5|PT~<=~4`ii4oi&$-9MYP<DN3+}
zu3G+Kv~-=d_`gU=Dbnj+N-1gTnEFZ^1i1;N^PgO5)X#Qj=<Z=Ml2BP0oQU#jc)!r@
zyRZT8s!kp3+$WaQ__}O|$SJX^$m1K~;BOjW!=6B_`rNldt>qY#xhvH1S5~*lBc?1L
zE|6Od?}~rrU#M(-ZFVA2e0o4{>vWj#UGIb={*9W;o+&2$xkmV|m>yo`Un)%jd0cZl
z@PawM{<lja;wDSHS7h%%xQMFeiYeCio&oC~Z!;(tJ7;|P4zrr)TdSTK?BrI`P`@?A
zQN(itWm`t<H4{RT@V8Exj>ebEb_<5f3eC}+=>R)yN^$~1>LXl=T;h@$#|?O$H8K!)
zqPE|Cn*CJY9p20S&_isVVqBycX^9KBrC1|vl@i>XkuPOaRx?J0S35u>nJ4M1dGdjR
z7iMZc_B{9uIXe&%m9Q{c*!bzN5V<~(pR0Mke+UyCrj;G^CSXL=r5{P`yw<pNL3(>5
zGMG(c^G5Zaf#jHoZp}q|_qMEAOPJ0ZRL`UpZ{kc9%BXEo@XKU-Uw&e{v_w?8CR=t@
zGRW9-Px8A!lyu8YKU&Da+Du4S97kqcz2M%XkSk-SoZ`zw8nJDu$K>X?*41uJM-uJC
z<;aIE(dg)2MfDRB9!Vs1YquD4Rczc*<aax@vWNjWBqQF+v&~cGVXgG)P@vgbBWQsu
zbwILZM<85QFLU$+a-W%RXI$R{bqfCG5R=|=L|I`@n~vuD<572hU2mbYxH{H`ZGd3<
zl(Zil#@tU}hq<PzgI$5#y$>X;Ogw14B+l-(B$ZT$|7kxFB8DHWibaX9eHKHD8wJNa
zbFRFI>zm%X)bdbPT7<hMHQMO-;m4>L89}?WY{l0i?5i~m6ysItG-Ir_wE7(0=0_7Z
z!1{6>^o{pxCWhmDt>IosJzJ!JO7U2+YBPV+a+5?2#u72hDq0zV>gnhWU#F`BV*i6C
zsDI7mOE@;f1UuGJ&;$Uf5-tmJ?|?0zsPJ7H(C#G^$NCr;OYtWr{q4pHBKOi@;E6SI
zim&4>Jkg1n@GS?zXeamy4*ixb_vt;{t|8=iP}mc})bCipWi9ZfN8CTg>SZt}4}!mh
zJp9c12XNn<?iKhrRW5(o;BXdR%1*DaMtR%X*`@KY_TBah*b1LYKB7p}D^bvfFF9KA
zhK6)5iZY+vAc9sLn>`&(4e(l?SVRPoXL^msenoNYF5?&2kv<pMg28ZoV0VA~6f^;L
z2yJ~OBr1`vgiY+sw!NG!PCS8)6B)be$rd8vi4A=^Pb}wCRZc3^v*OwH8hrvk^7+UW
zzh+!Hb+r@dWC%xJkBKWxIjf=zKJo5|PTsM9VxJ1KtQ>hA88%K-pgzm|djN6ZqsCbA
zIHkg?VXULF@=+x8mPN}ssM>;p|11xa;k*J~^z4cno>~(lHAYOOth%h6YuPq_`Ajj<
zWYNBPL3YiGWR@a*Dn?d?Mn(*I&1zMx$DQEWh?VCLtd4fHr<p09DquSTQ;AS~%{{+#
znus(N;df3F;;64$5xpfSl%cWa!XF5uT<CK=#6+2oTJ_lIWRP!C@l*3XSPzbxD;KAI
zj>o9Ek*Uz|cumc|%+o{^l$39gPU08KOOd+M&DpfMSBhm6BZrl$@?>QDS4kyjBva7y
z2sS-A=(%gmI4f}!EGV2RUUoFfS+cHYoX5e}<z3X!4{1)6+lX*T7Siq|RL`;fcninh
zUdn)V+~$&ptRtDq8)x=9wy%nFx7uTI!gd!&PIG2uvxF_f4o6ejDUQBrR~!>n2tJiG
zI;m7Do1mD0wz?=~myT<Ba=K3%T|uo+H-*!)A@%kS1oeRI2SlH?b!k+02EBSkEP@<>
zUDd<mC4&95+NlU<AZ+VD<#FlV<vuawxm!K?tfm9@0CRVG`veN;(&IOS03UbIxVB)O
z!e&CGi_T@E^a|~sN?Z>cY0r@YcU?K}9+@7lb-Of~Cdqyg1Q7oL{D`@agE&4;VgBfi
z<Y@3n)cFiMdIM;U4*YFebHb)|g{xhAX6COAq*MtDq6=_S>Ay{XJK<YSRq=nC{|K_x
zde#s}ILsc;&G+NnH^?`n>PQJAsV8rF-ymc2cr_#p><dVaqP<V9DA9Yn&#T5O_YHTK
zKIG%*D>wj~i1-u$9nSZ;QqQOk>kq(8=VFO#`!W7-Yg0t)p9cBohYSwnYYU+bLPQkC
zBK=4(X8cRAbIAW04%7#>Aj%of$Awa5C28th=&khzyT(d#rSNwqXRdV5KTMIY?Ut>o
z;Y2qetVY7zcKgL9TY68t3N>Rz`Tb=|W#w+(k(=|`Wk*G0W6SsCRhi<k;Y5mf#aQ?;
zlS!HVhS1LTN*z@s*>Vy^FFWQa+_80&TDV5e-p7)OOPJNamz}+PAM9Spkq9iOJp;R)
zS+sI37XI*`o(`;do_1O16-F{94yr**Y)x^F!)_^-c?r;*^!!kPZYV`tq42bs-=27e
zLtkXgw8Cqz%<u(H{e^j^Bgf7#bB5C`j7ZYdqOigSJzb8igG$Q18iKP;l6-ZzWmk$l
zVhFZlPU%0aV*WZXB{lWAMNH+Z+kQw6Rdv&qFGhyvfwzJ79a0gL&UB6KS3CdPWzeE?
zY=kvt&V!KUEc+yN5ss*=w9*FcG{#ZeBn}P%jwEh0s3+Z(%-A-dW|PxMV)NWfPNiUt
zpl$Ru4TN44SUtKnKfB>~Z|?h~!fAwD-qsyDt;?6}^wTXx?)q0%ySuJ46s-4jyym(-
z^@$380n8hNCiE;GNI`gc4)A0OLeA;yX4^H$V2xpE#3MRu*vNjIoL9`Usbuze$+g?Q
zcI&8wbk`|wm=Rv?5g|LDzhu#uO9zZP@(Y}1j=mVlx~P^tEw_|zP$ec_F8c?jC^tjb
zg@4h)@{0>Ip#ZW4f4Piu6+f=ahD~!VheFL1Sy@-%Nei(>(37!Ah@X%Y43cXmt60>K
z42l0Vc{WN#C&!OrFnL{7ea(PxI0-h^7$@|{>6467&@sDkNUyV{D`1e8k9_)$^zow)
z1lh>=3i9e%+$U0DVE>Ko3IK2k+Y;=A$tGcPqZa57!1!a=;9nz%2vkA>>#$&V@dr?!
zNBTc)Q}O<nL998;fth~*&f-DNq_Fiu#A68!VbzNO5g_6f4%qB|4?nT_<b&kNtJdBp
zSw&}|7mD~)_^K41GRvMIIRt_7gIKbn349t%TZmh=elztSZDjvC=KrD_JNPcO0#+Sd
zye$9GUsB-VK3ZU<D435SUW#$P!=|VnH1IY(@52ZFpubd*J`mHceWk3$N7H1L&kndl
zyxSQBgE-P$tB8_J{fM5}*sHzN*vGG~ksbeeKmOlx8~-|p_H(mCPob-cX(>8>0`bmF
z!49_RGV^JC@^RCz#Gu!&71K#L#;I6Q2xJg?L=XX8aCQ)102(6eUyeETxv-9lv;TLY
z#=nmG|NILrz~UK<az6<EvOT2M8pVHb?fa5noK26IV1QBClke4fP>)ckA_?va7GU@H
z>3_tUaKZ*a0*E~@uRvIv<2=sWr=f5dtq8IPA6N25JdgdRDsS8TXHB(2dwa(Q+bq87
z@>3bKoKqQ;B5vJ|&L+k!*Ci!|GMJ4@vex4>P-*Hks4DU&=o6!r&fG~$C0>Wqc20-a
zXV9AKP)9dsj@}6bS0+)eBDuk9iL*F&yoGx@=&mW5NaEVZCP~0hf93kuQTpGKg#Ql8
zi1;X;@{vH8FZ8G+&%^W6IpmIp<k?Rs?}SJLCmPBf|76{~<GBI7^wyM~h+J{2cX1gp
zv8>)zPU(u?@!Pv$x*Xj91F#&gJU{gT1(sti%`?5tb<2kCG|@Hv?&Q8c0vpZC+SPn_
z&WyKn_V|5z>MhWq^XT=!nC$_^&CXD`f;?lCB|59+y6rj;wZ3Gc!n|i5v6@*{AG`Zi
z$G-cca4JqtPlXw6(aRS5flFY_#~-~!df2D6kCJwkD<`urp1*(INuR3>Ui}cYux%Ae
zNj<eln?5K|WS@EKXlEJaIF@_w&7CvW_Nwm>K+Lmmd2hr&9K2&uI`r6Can8cBp!)|9
z8>KTCgPVKCX@r-X<TJdaW!AnjmqmF;rxvZU*Fk$q-B9=C;2#~+fB!O3Kf50IOGm|J
z!8-YUmu5)<#R3g4Pkb&=({jy(mNTwSzFnNwW|;VEW4$>1&04Ph3xnsk=W{x-&p*Vc
zp}6F<NSgeMa>tnK7xy#Af*5}Qt|JE%WsYW_^)~r#c56nu=i3nWQM)~>eA?^nZa_I#
z?}dQ~^FLj=;~&3vc=(!l&b*KoG*SLj?)Udx6Q%1>cwo+&WsnGR2OnbVbp3?leK_(U
zUh1D%zt|Ob%yo&heM>gZmhFayy^Oc@iek#i-(3lx^Q_U(W&P1eJ6{wwRp8uMYYVwP
z-pbbr;Yn)2Rd<)Ub*Gzl4P@O`-Vfyqx%B#1DHXjI)>HZ|`U13F4|wa(vXB7cBfTxY
zRJl=@xecr^I<9zUvR<pC$Eo~JH8y`Qt4JZdtDRnm^5b#qHZi#oyll1VHW|-7XE$wy
zYF}iE`fYxFf>M;*funo^C+vR5#An@K-j1Dy0^gzkYO<P`3z(#?tPgmY3Gg+7Qft=c
zvi>EuX#FS-)Hq~e9rRz%zdNX1$l#tj5A-UL%b~g!sPD9u`~%=fxj#CYd1-9hy%G@A
z*Liw<!tzqB`~50Ksl()RqEd{~^$DjpeSF7xE(3EJX-jSwn=pk^h2^e(`Nmk0+Bt3)
zqrZwWe9+(B=2_6Au5^4FD1>i~n8-|E6(KpHIyj3*fyf^!9Xf+vKAH(p`n#%<WD7Sb
zknK%D=@R=5MYk|rI?rIg{#M}tlI^em5%Qz8Fe^!L5K4S>xUu&p929`w!GQ>Li`duS
zk4c)fB%5uz=`=O3R<lea$l#{d|8`P?(&0*kZfZbC?%-ADqCh4oE88C4L?$lDm1tY2
zK}D(iB_w!W84$9@YIXYUyp6*u?TV}Wy^;14-SI0mMQ`pm(p&Zhn67j@Q`Fs!)%Tkm
zZl7)NvDBOubjDU$^eQs%EmeVO*56e%a0oE<)L$(}ksD)SL!4Ft5<IU~`+1&3dwi{t
z)gb@?F0TVzPad)7TPSFI>`M!BSkMfW&?*l~+;b04ke?@pfys8AEA~|nu~g9fJU1yt
zzj)kJL=bYYe^AFA0Mg{KEut)ELI!Z`dOE9b3Ht>TjZsieVP~{FPI=q_E&xVgDKjyT
zOCGuSf5ZeI4O(IY1K>~jF|DC1{4kju>)gZF-J!#Flupf;eI;%_0|w^nV$wVG9Nlei
zQ?ihpRO0Vudg#cS8cDfo{@yRbdaAhJovfD$Y>9~fHiamH;Lvlj-Glr?U9+gvicTt7
z{}}9F;Wm?;EmM>;2gRRr$k7q(pR=IcZS*dJ9b(x)0#hDX7EDh|-De{?LgS$3rExEJ
zW84DrX2f=(RjEfxI}AraDH-fWO;(kj%G88i<(;$E@64A`)}jEHhff5d#A72`<c|}h
zIdt1j9F+%9#U3$&Z0_pxQ4Km{j)2!Eyz|+zw=J5r1PZc0OI1~a6SJ*QwGuAO*wXzt
zG$$n0@EG$$VWty`3(Gb}mhZaV?jVdO;}QctAI38VJE5I2*9mVpm&H0AowtBwgE9{^
zv23GHch)1H9~w|eNv}J#3!Hf9uc^}qh1lj}JYLza>5jjj7{e&OG%8@r%5}jJ%htC7
zmZI~IYLX`vR&gGSrAX5AaTV0kDd*G9$E0Y_^hsrgtz#s`>X_2>Z@PIf$e0Sw7S}3B
z+cg{iI+JCn+I6?#SlHQFdjeHAC{R$NQ|NM;4m6-?H;PhMt#HUhfbG}d@hJ83B2m?6
zBypoMY`%p8=;UB7c<>$mT7+8fqN<Is4ZV(<Z5OMJ2n|)T9oBRm+$rcz$`#d;95g6b
zcNvf5ls+}eDHkK54CvUGRnk^xQl+#uT(6AY@$!{p|HCN)pN003+&JDa36|~EWWYcE
zwax?SiMcmYpJBZ;bLf5G`5XFpn@VYhvs8o8{p-{9jv@nYUP+)V!@gBY>)PdGKzM6^
zPEnD$dtq66HL1?oDh|3C_s_+e(tT}W#Tf&yo6Io?PgYz6`PVTSIq$c({t4<xZ{w|h
z7d8ZajmUg=jBcATQOvqRCOz>K7Gp(1rI{ngS+KQDNaePox-8pXZcqg_nXsshSR^sY
zuS|cR`tx3k$Hw+LqoEr4wG?}tb5f(S+ynoaUa=-MRS24dJ)@pEp_C-M9TD*a;&75j
z)pA~v<wPR6F+gHkl5$uCy#=>D;;Je^9QE*5*=BiPGatv{ta5Zi;@44CKPt*hk2a;y
znAWh~85m3K-kz8ORoh4X(4&kBk12w-d=#>q1Uv<HE;R0mv;*vg;IPlncQ+xRu8^uk
zKfJEs!$Zz_k-rw^1`n-5W~)Ddy6swD%d$7i|M<TD7L)vE;_F)T5V=HpK;c%Azd`#E
z0rK!fP7vescmNzc)1z3Mh@%4K_cs)%o7}vMEV<a`_E!wA_AL3<K~P5NXw#w{X1lKK
z#BWKlZWWfE#95zSVf}iM?QaSuzsm#TX~@3`jcab&Ivv&hJcN@fN{(JJAv|Wu5MWU_
zc`rt9yYMx(h1t$oeO|8(m5E@Y|9fU|k;9QEYEY(&6TF!9-%3;eBBA|vOsk5(Lg91h
zDcyQ%FI5}ks`(_>Lb{}lLVnVn7deh<b5@g9svC?Bfw%Uf-5C3o)){GAso~d;$rD<~
z`*A*=_k!N|itXx8`v*tTDu$?}@z!MtxbR;|F@)*Jba0_$SiU9YwV1&V8b`^#O}WFq
zIw$RUj=R)>Cu9Cki#Cfvt%1MuZ7`GKAzKO|9(-PrUmj?Ek|Qgt$bAt15p(_xb}W{-
z`beg-RalC%dzEv`F=X^Jcd?_Lv_q_Te`yM2+3`dv;9I$uZlsAQoSA+<v{rRuEVs5J
zEP$$<c&>cyD~S>XrxABE)7E#_5x~(lCEmSy>GR2!XlZ8QUa`~LuEFufA?TsEcI}w7
z=0x{5u(V?^9wKt%Mm(|ol2=QTvt)4H9sYk@vlgFZ|KXZ-Zeuj>^Ld;t*R#z3k>RE!
z_Uf5zoi4F7(jSx2Q8yuQf+~Qoouj+zwAF74Qwu+!*Y^#KH>FwPx2twWpt_T^jq{ry
z-B7OZOn_!|tO6NUN*kjzJbMHyq^0+~Jj8uQVu>bGR|n+LI*4zPxkUGwb}SfHMNIup
z8<SpA1WWAaYf9Wa=KdI_htwJiM`%w)lIhQdTPZ<tq#qie<jm{eee()lt@VY;-uGn}
zqPi^@rT))hYldLU&NIWHxPVZl?+W>3#fxN5GbwPi$^TB&hAlbD?=kq@y|0$IAkL({
z7putrtv;ETaCxl0`fs%+n(fUPwuWZ{@0Qye*xzy^X4evWcx!do4@Qd16r^mJCk-!i
zI=pToWn353=D?Pp+&1Vypnd&i)R+QC(X&>91X{K9c~BaE;;>I2%ru#_M68gD+vR-I
z^F%K6YJ4uE{$l)Q7{95w=R}vyA$d-)X<iF{zU`nSV^2c^a4r0g)eoC2=f&PgI(MeD
zmMmmNDUYi6FCri8&qSZOPFk0U!pYQfo7EjdXMp@xxk@~%0g`B{L1kueFj(p!&TF3_
z<*QzD0R)pRasfbBEqx!T<)oKhBeNj}1m?DA<xXRd^TTP5nymoo$?6z@sWKaE9K9i5
zx6{yu^%uEiBtpvBHItk4?6@X%Zum8UHR9Q#7O8_v%y@HpLGGLZ@(nyi>}Bk{t977(
zIp4Gj1Y@KqEh@;oe%d8Y*bi$c(P%uQuGss=3S2ftpLf!|ZicA~#6WtdawT*DBzuMT
z^*=O1rFllZ{3I?|VP2JG(@S^qKDXTR^eI#!({<VQ@-u3wV@oBOHm!1Owud!dD@V{%
z7Key7K_DZ6#3xImR?1JO<;(|ziVj)bKG^S&!)47;%0DpDno?`GYa)HiHJ6(}7^e51
zJbe0w*puy=iecE))h2pWQ#svtG(?k5Fb@Oaz9$VMlQw0$_nNQd^Qxv3;ACEl%9fQ?
zTD`0|D6OY&I?@O(d&zngu5Q%ktm-;2-?YnAN~N&u$*MI9Y0&zW<!GuXLxl1Uc>5P=
z5g6_SSn_t+0N#<bKn)$sQAxm<G^!P4`K!%z%qtC_K`p!7K#1b8ur&KzvP*MyXJyCz
zS%V|xUFYDht8EY@?ztb}rx(7C9`4i0(`Qn(?HRC3m3<cQ&hHQ4mtIt~hoYj+F4G9J
z-NmYddg|`aNYt{w=J8(>;?28ce{5FiT3HxAOiSS}{H5yAVjEZ(K%dtqXm`Xpi8QE?
zzC1oAoPOz+;lWJNG=r@%*Evo7eH0UI4F@4fTD<TNfWfcL7aNDL9G6$r^lYxVQDIE-
zSFtq8TvHtV+c5l`o967*lbq288}1>F1}llhPLrId>Bb)G2bz+m7^=_GYVBbA#`a7(
zJ=Bq+bx)h>@!yZ!v$pSn{b;~L8!Be`6gTGP&D1k11GUa}cEqD7nW<qy5n7@SwQoL$
zv?ZJS?^)?-ljW&!(4m??>!ve=G>)Y02#bn|6`9p+4Qd({nX|dpm7^8q7TSGtB-iBU
zzgr99QAO(Me*Vm}&2~d22v%&1iYcGX@$Vg3(}i|EE$F>^I(3Z2fUArDuq1-W1qnpC
z4RWd(zr`&`-UV%Yq|A&_pY=Xm97wy{CIe}C4H+y}4vi=ux^gA&iEdOYIdXaac0iA>
zIC^a{{s(YN?0#lH59j+&$@E`u|5G_Iy&?%gFd?~v{Ah83Bg3>tcIf++_tC1j51{7>
zY>G28t9W)V(*L%PV_wbQ-r=&%vFp6M5q(+3*iMu&lNLtV*-7s?(h|6HLh*mG_m*LC
zZ&}-C;e-SW8axo(0tDA2xI4jvyG!8^+zIXy+#L#cCqQrtcQ4#6Q#q&mobKo9XWs69
zXXbilroU5F|0>pAd+oK?{kwN>w?B{Su=^fkc=S@gNP`Ja$NkE6wmy2Z?M>?#xVNIx
z*p05I#?=hJ-h!NBE04<<+{MCm-?Xmt>TwV#E3=3dUD%gkE>1<*&uQHoYATv#&yd~W
z_9tx>Icm(CgE?nxPiZ!)%ZmC3bby8K*X}RRq55bgcUR-hBIytOack=K{q~gPy3e&$
zP9c*!UCN^hI+y~kp?l!XD9bk24b>o>+*e}Pt;pcsTUou_ROf*x9cShg5sUnW+RTRf
zG9kNgJ>+^CD%E<tKT64O5kTKkD^@s~@O}+3sXqGB9+wQ2k1<{!YZvIiLJ6{00l5OQ
zf;J%>m~(sUR7jiO_7$nhD?@hwJOKQbe*e?g`A=Wx|GB=7z$$=Dr!t7~7H}h$$=HC_
zU4MZ{yI(&$9kF3He{~gGtt!xq2X`MgxRN+>X+`hc=wBTd?xv!pfjjr+MV|HNI9!qU
z+^@t{2kLPa<8O*%Oz9A*^q)c*97q#buD&@@Cs%Kmp7*gdfVF2B$788xMaQa+aOvOd
zr3FMsH6Q8ofYobidmRIEqy2+kMy^f6A;0YXUsWik0t<obWc+^Mqo6&AvKDaIYp6Bh
zdi=)gjkXAG_N;fmZ@ufISGRbo&(TNQ)6*4e&6BH!Q`GvxY590=k=C-jG-&T$qTP47
z^!nV6zNTu4DrJ~5^^ARd?;>6CwAD)5)p|(#x~?13V&ScoswX`=*DdnywpBE3oQjH8
z12+~;T<+U1Lc%*37_?$mC2>FI(IpOWqq}xTFnxN>_-TV%Y^QABnW!q(mQIFihR&%^
zr9qJ2b4x0O8WlGLPlm-Cf0I$Kzb>#S*#cV4v@Su6KNxkU*U?V6It*@DQSzmBx^Y-;
zncm+k+skP8EGNHDF<}G#aFL}M%zHD>Di7bQ14+_(_Mws`bTF*L)@$criL_WN!<lcl
zl&ShGH88JiF<0Mq%1#4axmHK;xh}%DD%Nmnzuvv1$?iQAC)}60ISY6eW%#rH8$n71
zq|TEEmvkGo5Sj2dts}nvi*%}S)X$WCt=JJ<>gp3T%gfw>75orwIz8<-W)`nE(wNQI
zK-nRNOLE(kksp`^o#dK=oK4QJ8QLA_lhu>i?}m9ic11+cJV)&+c+Vn+B0*=#1*aPQ
zr49Q|bkhzl(r73#ovUx(Z5a`8NfYDu#p7Gj^w~>H6;8%WfGE;CR^K-9Wc1-;LLw*?
zl@b;UEP!(6M@QGHtqq3p=S)+#M80V5AN%YMF^}H?p#W_X7&H%ONU3d}q8U#J^@s&~
zGZo0lhJv}+oz=k#qO2BUy(dV%Hr||j$gatd9zU~VlqkmXS_N0@ueB~93mYM9NFX?g
zVm9|Dpo2ai>W#CEn%>d9<L8ZMyy$&iESaWFiQl%>Mnrtv?)8Ma5TgEpL$S?bf1~;I
z;?t^iSPbrjPjV9zH|~#<x0q3sd0Qk5$^=yrpTS~s#7sp&`czBUQ}Wv+l(0ttzMzfX
z4Vv9UMoU$4&37kpOR5XiDUmn5#*68P*>UOD)cxyy1}9CtsJg^qDFq*&%S_wbw1d?P
zMNGJD71XTi3<sJg;w#vT7uN*KwIVwwEj}<*9C|WoyevxM3#w^8ERoYasNtgV#lnjX
zJRMmmPY!Q-v4&Kt?>iXBGV>)TbVi##c+zQg2F-~=2?-_ItP2W8Fl(kHEK%pAb<^zC
zsL83~St-T2tRZ&U{VqVxUMoTn(w80PJ8A~zPcK<9(or4r-16}yx`k=<@$o?q=Ga+X
zu=4AzGve-a&XX{ls8x2M93QiUSU$xvI}a)s#<2zP7uF|^8#nROEHQ1gt##LQf{Nw@
zvL5e1MzZu_&Kp2r*VV-n0+vHlOts7kb*fzY`4c}?Q_2YSsMc@gC+r|tC%iH9sXMY2
zlJZ~OkFXJ9yLGqJ4Z!A`*VV8-t#N-TSby(U>~gF97$dtIZsOnc6tc%f8eq>nFh*aV
znL@JuyV*#DFsMkA68|GuC=-qu<@c`JU&^q3;L3p2^ntU*^5rh_&aU!p+&t!;w%t!a
zE3N#?kU#R&Z`(FZ);fR)xc+*;8S3uxMC{ikfWdd{Sf$OSOJrdp?g~e>3V!@7Nl2{g
z+~bwPNT^;1b`}=ljdmp~H}{RqaKbVLPHFaF)ug4EuLZn)o9O6VPQ-v*Ym4~7G>Z2#
z;XO~^!rV>{r=Xefrp8=D21SBB6=M%adi_YM=wA9J%Gr#uT+{Xh@SPGY!w*?XWjqBo
zzksh~*j?~H{%C{zgR3{3-(1BB>AmU;9@X2E@;>nHderh8d)!31Z}2S_6pp2h=CyrV
zgbEJ8@m|DZs^jd;y^QF4m$WGrvtvXT$0u%Q&}+1!>#^~v$m|wuev)wo=(Q3&ACZ@$
zKsD{W0NJ&Sk`5`-S;;ibwVB}8s~Rv+Xk|zmT4p8OUzbl-z}Q0-vmYi%sYyXSsmO<p
zr&w@j@|Uz&YM+GvGr{^lUl{%W-3iursWp0#73*=pC9e>#5cgdiyypISF5Oknt3Wns
z9-T?2mEu-$LC{1qwN=t9s)Xs}NXA(N;X|V{)C~sbua$Xm#|cJZpZ8T_`+aV!3o(U-
zg@tzSy!T#@1db-?q}KK4N<~S&f}=?v;wnG3aQdj*kbj?IjSSS3NMEpSP6#)0@@X`u
z>E%zLDCjtat+RH(?jV!Kfo@j6XE<Rm_uDE@5^u?j7B4I_0iVR*Gf@4vdbAFhZF0i1
zLv&?#{?Q-xOU08%+RbIfiM5|guP=_;5&MQ>6<k-`)@Q%=6005%J~Q3#BwS(E61F~*
z7-CV?j(Vvip#w(Qa<jF;u>{X7uyQA+B}6+Kj>G4Go7fEROY(w6M`pNLnW-LPEd=QZ
zy5$hEY%37_ziGt1E3ewX(jXPlD;~63XSv)U2z{oc6i0Jde7VF?B&ygfAoI(FoQ5Z&
zDaNenlgo~(y%Z&WiN<}Z^&oFcHHGnyNwgduv7-UY-g0HbDz6lSv^?tY3p*UL7K^Y5
zv<5CK+a2oY18NGYEwFnqwMm1=GHs2cf5lpy!cbR1_C-I9R;IJS?swVZv-gzwmshR3
zd!q^)OiO;<JaGjtRG$m~DDuw>vQsIZDx&RF`$k<uP<N{Gc|$6sYod1#NN^{K7D>!s
z?7|TfTXUo>n!7Zws+poz9lF9XvsGgaK8c<gsd$cO-SM$jBwYCo8}4{j^x;BWQPxr_
zu30v?`WZ%+B2|pxYauv&$XI`hn1mTIOJvU;KW$`>!9_XkXg)P*ScxX4YE9Y}V{uKH
zYVMBGyDeIoW^jq<xYmQa%A0SpEV&5^{x2oytraA%&PuDb6vk^@p4o28ZuBr{=47J5
zp`F0PBUyjuMTCwV!jj_9PWcr;$0K{L@)YR?3u1R_7Hgnh&CW&Cb*LpP)IigD$lwY`
z%>`2+aN!exu@K5LLVvyC0<iiC7&X1JB-ttHPg*YGR4m;w&Y_yunyuG5TFKB?uV2i~
zV-BF~J>6ZpB)SwX{A<Ad){D?lxDf6nX{COG(SC<;fw2hB_qq;+Z&GV7Gj%kMJBav!
z=wtBh_)1hz4@F<uRimh9eOgTZ{q9Ma<|bsM_qhJ?io8hnUHzM5;`}WeKcv)lRw==g
zYH~kSEc=wkk;;@;m4-z1Rcn2?r_<Vl*sGhzyj-|8l%(MZ=LXoj0xb(~mReG1=$^w9
zft3XvUN?0{y}M#LAW6z8xsecb_-xTv|Fx_w-JCT%*4Ki)X>ry}GbGq4hor7VR?kUH
zgi{GM$Bv~ekG>P4kp8OA&eoc-B}EdVKBT&Bn62r2$X;U8y`cKw?$Y2wb9Fkd08x~|
z{SK}de<|`NMj9pAEtl!Zk#kX482C2Akcb2iJ*RT&q(PiU&$L%G&A{Rp&x6vvnR9e$
z2^@FQfNVNKic0J&nL&syp_reYa)j%C2DGX_Zj))tuzX(M!50cEKtcg!DOf$W6RU>r
zE2ny3)mlT0+6BD$IRyqR*LJm!mm$5IH9H5O+$9<S+I`@HOHfUcv!_NQy+rNd;n9SL
z8u8kgnKr}n)@1s8Gic{aqsrp%K#b?qMsBRz-ca(?b-nq2EsErE<xsujjiQW7VwVR{
zV6Xlt4t^yTxU4PRk9>XyOuN_7moNjLNXcfr%aR1$HRXw${?Z%%Jq6)1Ge5-Xp&P+^
zylA0$zV~M#1oy8H7U%O{qZkyDDGw<<8>Ofcmq)9Lv?JwF#LmNHiRfsyX^_WQD?fZ+
z`Yb}$>{_vjrL}g@Tnd~tvr}_4n_M|LMx;~U^3Wbrs3|RlB}gz$Z%X3#YU>R?d0fL_
zfBk!I)_$615uxUJt=1+n6V@70f@JS(HJ92SvDA(kD7s{XWUd@B&j^S{x;9%23=rSp
z;vfpY=ot3ZPs+~z*dr7R1reLQ4MtogyaAE65Q)uay>sgK^cUhF29csI91=d*2ASf4
zGJSn2J_$KvygdV8GXDZC0#44+I-p>~z*V)f9-$XRNM3q991AuW_(Dk5d_tUA>;W+U
zz&BY@GxOs<;?PPN6xX9_vrQ`>c}L2KuAV7Cv3xVeS+vi#!&YRIz$tjoVWb=6FkLGn
zbt?7o&QsF3<np|qpZ9^JC5&3T*z^3tiK4Iq0bQ(CSv(ZxJvtI;TvTbOb5t<@r1ES~
z=R@(OB>%kJCV%Ob_7BdpSivi!w#S3ogAT3srysRJgZ+?GsxDq)KH-jsIDSdt=^0FZ
z*uQT_{s$v`;YAar+|mK_7Gsra#pk#ZiB$e$rm1Q<r8QV7Z=t|kXE@d9n~lBF4#W<0
zt+q2^{iPmb(Ro#esOO^|#!5!#8U|#JvoJQuog~uzUQkdI<~v1Y5C9W~xT)FZwAts3
zxb++69CyGo9IPY`;s%>4(AqTmoe*DB8}S1y%sx!xA1e);^Ytluqe7<JBe7{gcaYQ-
zE~(NAeS-TH(cn)292I@WbkwU_yit7Swh5kuW|xA3hne=df!nbtX3Mr+)p6V9=b|1<
zVu$PI&3MP@)H`)g6GrB@=f9N^9t3KaIx*)jOqTXZy+R+&a-cT^NY|l;MeDjBhY4qL
zb<ZVqBg;pa4|vOU*}A8DGb=N?eggKVi!!;D@B6SD-`w>41ju%)#T4F;Jv*!3|8fv=
zzkzl3?k?Y2WhnijNkkAvYQ29N2SD??wNUoSTm^e#2|08y*XV5g3af0Kdsiiz-|IK9
z&A}Y{IxgF~*ev^N@Z`Pl_><++L^6ZOCJEU!{qjW%wg4TUylVNsly>}lQa^80LTx1~
zgONbsc9xYOzgmE=G$e&dHj>|YM@GWXk};OmI+b0<bOZOenu8n^+d_RRvk_jpE5%4d
z=!;-tR~)mFX@kA4Oj7IR@a7QGbT~~?)qxg#tYUFdypJTQx}5&j*Z@@7mT_K|T1b?a
z&bpMA9zqa$b0w2o${%LNetZk89xc~dUwg(@=yDMwO%NyEPEtq`Xo^a%w_;s*4p@O!
zpQp}lISsaeWuF2q&aLh!(jqXZq(90phyZ{DvjFwl19)otJ8yy4*7*uiLh26}`N()r
zPn@0Qv9V4T0W$qoVLd>|4hTS^PxyntN2s&G4(&C>=THFc11$DGBDo^Y6~aJonLf`U
zb@868wUfbMUhSipX<8N5Pt-KE-jqC1VzpeUq(~k~*{@t2J)n|RkexSQ1wMFa-3F0%
zO2(Thrt>(_iM}jj45d?(&4qX--MVSe<GpI6k&!a-Y;D0=)*R@W9uxO)e%JJMvOcAZ
zy$+#7Gr6h@k91*Fs8UtR2z~iU-JZWIlvx4}V@dCT%GZj7*L5)sqljX$X=6`T>K4A&
zY!QdwbqkC1CqSQ@y_;sX0%{!1Q@d|wZk`=pWIAsim-pt%1W89m$Ld1>k#Mt#`I=pd
z%4FKfoaxnfW$lcSlpLoO<&koFdI4Zf{0$?GqubzzWpQ(N^;rkc_vTI7SPcxR3B{CA
zcH}ldma=doYhK%%SGA!+Yq%1L)@rU~DWcBEHLQ=l%mr~ex;oS!q+*XGPTH^7V&}2f
z0(%w5r%RN24up4Q?IXWVusRv8A3zFJ)2uY?6(Q641zcR8ZBd*u*zh06LuC%9Qe;`$
zk?9#GTT=(=Eqh;93lAyAp%BZ{0I;(F2((fi-#};pm=3raAGIW$xP3|)c2&h9RS&Jr
zV_M$krFME(&h+$Ka7$xMUHJk?Z2UW9#9~DuqydTp-Vb^}x;UMhB8%0e8{uoI2*JTW
zj#G*+^8}yYdghgkiU7L7T+TcCi(6|QF5N8~?U#!5dNkdQq3#I>URD`rprZ5w!{f^n
zui2aQ?O&EVhtQPLcXmlYsWN8PhHk(8@C1a9S#+3I%>J_~K!A5XT<d=#Rf|_>b~FLH
zYxEKVCFoc7AH4Z?t6YVE14$I0SKMdT=HQ~}5qB|GI@?L{Y)am=e^f2a7_*2`(7c+I
z<Ev;|mO37^N%c>siw$jAosi#=bH8xo6nyO=)b}#@(&gll`}8WPmP@5@%#2-u4!8tt
zWshC4{0ZO`Cc8@T97F0GOWhmJ?MXK<XU|AyVGD8D?oA%J_Pe?grLP_50$z4{$mSO-
zgk&dCRdv>y3p+5A6)AE{j`Hg}#j^Upb%Gd9#??~6D(mr=sxsYJ@+nD5=2>s?A6z#l
zrp`+>n#jz08fu`!&C|@z^=<4BQKHx#r<EK|hQ|&qorBGm2H(uw+Y!)Dn$LUn7B@6&
zy*M%+tY)%DG;E4ze#q81&y?40iV~O29!g+ZvS$2jfJ}GDU?8tlm^_6Ya<cFBr6k*y
z<gl-IYU0zbCRQB|2p_^N%A~qne$>R~Jhn4srBzUJ$FhEkoqaJQkaF#j831ld=NfZ9
zsK2p#Q<mVGAOvMT!<^VIaxD1REqINWZ^maO><+V|FV)BnGoh!~wDig-)@FKSJRUr>
z3TO|^PF=bKkOCjt&rpgb`HwW8+G)ToikVKI-|W;c3c+>u+pZ)ssaRv!rR6S7r4A1q
z8+lX7&4=d3RTwpvI0=ev_f>8Z*V+DlNcexzMJ9SDEy;uq_k*-AV9P&;+HNd0sqeMc
zJGU<j+(P}YL+yPF>At~>lYNH4r11%W*>-rzvpX`)nC<|cUlT*>TTGD{Yf79LeO;^C
z$dszCsrhVeanf<_2IALOv}n-~{ngHV!FFgss?pwHfaj<@_I(vDJTjuI=@`IK!eDV`
zXk&a%|6~MtVHQpl&TJX<;;_?d)+(Ce1T#FXq(1wLJl*H&Y@fmqx9s;Yn2XPRpwP^Z
zgIAg@A)l9Ur3fcZ`(=-~dMQ$uZ|+2LMGmZJzD;sTXvWwiNwOKeNx~3MIigs~SER{8
zp~3?Aka*@*d401aiG5_)i^V!vQNN{I39Gj*-6`5FO^=2UmM<GcQc!H_L3llTJd6cF
zGnQv-t+{2GDj(aH#7)q?ybE=}-hvh5(KwR%ppo)ol#NF{rY-y27sH4$=9-AgH!((T
zR1BsTlPJHM_|7V1(Y@$J5)J$T+7u$~h5go|^NxVjM&qN5V}Pu&Q&mm~b|q6|rh2$$
zTkx48!0@<)iexU3Ouz~4<Z%7_?xcI230Jkr=WhuJUtw+XcD_Mwu32l&qED(U2k3ZX
z;$4>f-3|S`r*WP4gujZXaU9Ut`=G5gKzfi}hk%!19*(`H@RrR(bIq`vk}3NPJ`1tY
zH9`Ygi%rV7kJp>-iNr_r*9gtQ^wUS_gR)ss@(VC*wXj8uW8W^}jBIA0&S)i^=h%f}
zp6CfdrDK_x#~voBQ<tyKlczw#gqe@2tQFmj`}0(JH+kP41@hQb?rt|S(x1?EL}JUQ
z1#a(nqNmh+gL3BS-iQFQg8ZU}#AZZz#d=8giY>ZVpH$rPNFdZ_Bs1<zyM(tAnfI(K
zUYGMut4z8-0j3%sH#e?sV)ck4Gsr)CNs{D|n;#%&eECbK=dT6$Z$ox2g?l}>9y_fO
zEVCEXm&Z)~6&OXXS8R~v3V}g%lKtC!yy-9K+uH&x*A~3^FF~&8W3X{=e(TD9Aoo!*
zXmu>Fd^{$NV5<3Xe9e~;J|B4XuC5KMLPz5;^-(iRz_#Stt-quhw_Vfhm?h`x2fsEF
zo6Ao?A$`9|J)S4?+CKe<4}X*JQ3pIe9?6Ft=R9bO`abBjw%gK(7NE;j24$pW#_<5b
z^u+GFM^J^Zj-cK{_p4c<PU2`CGJU>lq*>)t1D$*J9meCl%TVlJUldOeCF<c^pRt+M
z$nT{1jYY+Qz_iTsyfvDJitR%sx$8``7^y*bSC@1@l?_kstOJqdJEdpDnbg*y)(3;Y
zeqh|4V4O28C$V3#{7NDWI$HPs@hTd7NpfpSyk@oP@-=Yek^A1p!O}$y6zH#pVtcW~
z@QCY4b3R!S-aIj$69&^J9X#z7cU+*BuUv0e&e+p4zBDl8ww2PX%csbJeErIa?83c<
z%Vp;PYEKLEAr0cQK1e2yf;+WAe=Rk9dzOezSD0J0Szcb#yjpDoREv*YvUz!GWAL%S
zzBwRz;F;!&$y9fO=})_OAU%qZl~Hw8h3@HO;R-qwIzNX-$6{1z<z6FcoyJs!ox4nQ
zA@c%c+a1lbvwPh{2Xb`T`Q%v#l9&>ZI8RPW=XnBK3ld>kY3*rzJPRrd@{GpCfa)ZE
zQEH^<Zj7XnJ$p@Rlh$sRC|(n{!dsdzLVZ?@!<<&jV-$CTmQx!@)=alHAkKoE$TRc1
z2e=?MfYn$KI-Wl*?pAGvEX%y)*!+NsXNCUKL|hKxQT=4yBWWC<iwySln2-H}H<F-5
zEobo(_`Do%@rJq~Rn^LYzz2BAakz-Ic(u=xW;icn#dOD~G)#RFt*Hi7)gtBANLGNJ
zS^gus__stCW}GI(jZsr4E@Hv*nB76utt+06QRdeKBg^Qi^GUoUYjX?Cq}QF%ksk5C
z63KPx<-V~Ii{#^XkLJ8A<Xc4=)L2L3!D3yW_pnXd!<>O6Ln$~hwM?_)Lx$Q4;2Z0W
zY7AQg)@CcBXi9V}-Z%SPvb0PbVfw&3Io{~Fs8?7tO1adFLeY0{@J_kXBluk}53B2H
zBHw98Wh>BIltTt^>lu<QC>i!9LMpA*8A|6qC&8|ks19VK%d0!}7sr@DA)D|_E5vW^
z=9v6hRX^0(T(KrP^Ue6<B7!a@!cR)1Bj6HP2u|b-^H}-wBCzTVDYoBJ3yZ+<f*?Bz
zp@m6#1Y34TtMYGVbKH4bM`X`lXC4fFRJG9~WTQwWKb%Z&&u`xD<k{z7d9!&~S<8^x
ztICWeXeZLsBj)lYgZvroEH)5!_afTd7mDP+Lh|tpGWeEPV{9-~Px7p%Pzzx6+ThqD
z`i+ZAGWZ<=WoR?98luQ>k96qLG#iY2s1Lse9DbFK#|&<&Q%z89PDyx;`9ALSRNK2&
zrf|FH&!vPG#NWY*TfukpXOe@lA$99|8@YOU+4)Ag{ev`Unfa#Vm>pS6FO<H#(=Rsr
zD*bKX12*4j_-9&4LTkdLE&vS3G_o5kB<$>}jliSd#E-!KL#ZTB2Kc2ZG~d&ImMIQC
zi(d&H6E<?0ILI3C4!+^qFvu1Gi`dWZd~1~mMDh-<neu$<sii$I`9#eFk9Qj(e<a!d
zN9U)EpMYUdO9|JoaYx-&xw@DfQFIgCOa8_w>nUuOV4cIqRnX@}cp=%fE@7AheAs5u
zoc+MWS%~cqf}HHin37z_?O3P<p7LISs`9ts!l_eMg|MTy7kX-#iu>bvqV_kGY1$%R
zx>b867(*SmXsviDjhc?btSC)VE#4(>k4f`0&S&ohTiNN!yit#Kv(unG{AR!G6rT|J
z6==M%qj;43u|$92h`22Y==S}6*aw`A;QZ6dv!ifFLq)`_A`b4Mc73!wXEKFxx6`3W
zPU5oa39Rm66?Qo+9~;!Xi+o&ci*;Hvx;Q%d?8>h))A2mh2BM$LooHnfIC3IrFP1R#
z?V5UzLfr(y-`-<*R<{@}(bTWUspMXz4DIn<Y1pY5Ld>v4Tr|ZJ{N_Bo3E0%~Mt3RC
zI8+-GUWNO8Eg1a>RF}%$j#ijz2~ytqq`{_sR>(p(6kqg`d<c6Pb;~3^-EfOna;D!5
z2laVYWo+pMNAd2SgsB!9uDPO%IhR{&^+ak+_(8L35rM6&b~$D!#7z)E4E6noY&nee
zW{)k;rWzUgA<5agSczqhcDw}rF9QGo^=nBY7b19o2N6=HH8eE#RcQ)T<MhzB-`a=T
z<;H+R*AupEKJ2b~F35+OS#Bk`$2V!V%ke)HF42Vmyc%Q;o?M0D7SLeUHk5)aYan0e
zfbQJOr>ocGtbx-R&Qg5sf^xCWBJS0?{Bj4wH|8QzDrxc3wns8FH19F1jy!!{C}5Jl
zUbcj(JjL9U-zF!6>GL_dsvdOJ@Oe}be3T=8a`p0;n$F@rI!qi>x;uL!fLV&U-S^hf
z?H1zw%bS`aIZN#nJ-T|M0*(efE(I%?*RqJg%ozojTMvJRst?ex%`I~_@#F{EU*8Ju
zeVnv2hH~b`M+3YhA4tC!EKzr_vQ_SMEL3s@iY*8cz3#P%8K!m`vASZgmQ`RGa3OQs
zB(Bi(5gSWPj3dnA>Fz<&gJ588)ZZ(A-k0>OFrg~X0U9T$pEeDD;bC9Y>>gx-=vv=y
zmi?%3T-Y0Ec{#~qt5A_oX-FW}*H^E-F<U|OW~s!u5JOo7_`Fy@HyZf^exHI8b{_1J
z9$>yq-+a7Vk8Y+%=0p6ug(8MHBf>hIu?(rOr4gGjoQUq#x6`LwMWX3+=1L1T?h>_)
z<;zlG&YCj{BKZS}1QSzlB5SR$M$v;$hpOwX6Rd9GzlU+dCunm_f(<`4!j{H@jU#^8
zz-}oEZAC;!lT(wM6QM_3;6{~|y1BKb_AP60_|A1Hq%IUJ6w11@No=N1X)T@GP^Ma7
znIA<JmAegx%%oESvrT*TYP|^LtP`7Tr>XI$!i@;6fU!0f+?<9rwe?>HU`agrLEDH-
z5B;9^u19d~)~oEX>EhbGpxX4Br37`&$Rm%6p6|_^Z4-~=p$l3D*wt-Ege{p84M^hf
zGD}Kx-p<;sI=pxBe1{cXSHr`}IQ*p`nxl(j`oG43GTpx#2+Ds61YE^o1=ek?!6yIn
z!>Uzmo~=c<sv1vg!sBjh3Mft0X4I$E^Ph>Me>)^m29Zd1Wj_cKK~a*JJ3RoJ=d?lC
z;>1QiI9M0z4gDL#Jvp!7D$h50(YhBZZ)l#+{`EWm@5lZ>cxE)ecpRzkV`NB9auLZ6
zKCWly_-a`&V@Xg=qQ5Y#&!#1g;&YNQv|X95d4HJm&hHeO$Uy?+gn_HrNLEKh|H{Jq
z2Os`_wuTYHhllbx1u!w>eG!J>4=g_u`&bTl@O1Zo0nWj@0+O=$glV4d{O|uZ`-x*%
zYyBHTg0n(SQhUOEBTZNG8O}?|D`R-|{GQFG$ov1RIh=dfAFC8?s|o<(tL@kAj6Tkb
z+&23^F!u6fL$&!AP8-)vl<ssU6XXZL%&JXU!{3<}+VST=bshUXgSYu-Ev2GoSg~)9
zS9}l4LXu{Bje)Q)-oLd}q!PZ%LIkdl;F+N#X`8ON*hstJ;Vc)jcoQuj_TS_W)t0|8
z$Q2jPO`>2%K<N{TAjwZgK7}oR#`GZ$o7RWsMP?Q@;zS>dqWq?`DmNB)aENdoj<jNM
z{9BfuTsWeS#R}Y^LYOdbeGpgS)pvPiT~nS5S_rB(#ne4F-#moGWoUdlLv;=2?zbiI
z>_>~QXW18ZvJoHZttG5Rd%4n-Q6=+(Ph>wkErzCrya@Ywi}ZMzs*C7NYsMaBZlosY
z%)=k2Ao%kcV^VDD0v?ApnTZQ)gAr4`^QK|<)wcYg$am@fSL|3c+#=*r7;pP}dkEU5
z>sxG@O3!C9#UxzLrJdOyQ7z~3829v+Z<)!o8evRX8PJwDEHchP1`zT3=*TIB>dA=3
zCJ&csgp}5f9pPsI12@B@+s`>HNVa(CzyloYeLvL4G}QbVta`1#*O5?JQKQo>O}Y2@
z10Q9&P^c@<(1hSAN^(b7_T2NSKf+HY!1jFN=k1qFSz@2iQv0ll%VLcg(CFB>DHcl!
zqM!&PV`-BZxK8v0>RjQKH%BQo)6T&D(kA@pGL2fNr4Gz_2NLpFQ#-nhQgdeXs-Ijo
z5QAj7HK7@w{shRYMUGiA-RX8usrm=}F;()H`<QQdD`GcI5ePTk?p~|%5wawidg%Pz
zf;5iD)v7$#<4eZNLM>k1w}EWsUwnDVTZDRCkI>7D*85toEJig~qz<rYpiPV7vi-$f
z=&>HzwML%>jqDXi$Kpo^fOmv~Ru3oI2xyZCE-Ap4V1r2G2ai(SM3@Cku=QCD7{biE
zKxujG{eF&J)|_DE+un!<$#G{L@wIiWL-o!uu#Qx09Y0&!udL(ScW!iLco(~cgaum?
z;?}16vIT=Q+OE=6&x<RrtrN>@0q??j$gB6`1%y-iSGab5-Uym*UTPB`p0sLg%9I^z
z^`!Lm$C{G%jz2b>jv6`GOLRzy%>(P52aaPys}?>=2pTW5t~=>})WB27mBFv+2xFWX
zZcb)#OVpmP8ZRKIcpl_WZJf6dmsYW<&4(++Ys1`>1Vrv$!ns;+&YRz|I(D~UC~J0b
z);bcmdU#m&mVd<tOGvb7TC-un1AD6ZMnZFRqF+sdI<>d)j9@k$zL%D}uxk+3URv>h
zCbWz=QSZn@|66#i(L9ZYoUveD-9l51=8a8)<g(`1g>R2_&Nokyx=K<4{?R)l%WW)k
z7sHIH&x6=4Eln4sF?hUH_nD&WNH_JWiO{5n(N^k2<_u7yFqc6@O8!v9njR5Z1D(Hb
z$k%O#dsUt%IP~`WPfr449r-h)-c2m3x3ty|x1Q96wefU=`PrYniv7t0Xonm}<++7A
z5NQVxUw0sdNXx%PTP1^qlTWk`CsbZ72A1;zBP#B}^^dq4UTDS2bP{6qQ&vF96N^AC
z+@FAgUG`8P#D(bQkC7d4?HCv|0A#xz5a#JL?bV*>d4VjWwPZGMX@y~_okKHts0?{j
zc}p>WQUVy`JbKHYYWv`@(jaql3LUECy=fmoEq?1nlm`-hw}OP{B*-{(c7gr!$XwFZ
zI=+IW;G*1Hc1w%uSKsV+-CvHt<sHwsTo78~A%j3cn;OIQA$RN(qO=-+2??7ibJ|;7
z&s--)=TkYk+cNK}=<`ex9kAz*vh1}M-0<6`C~(iTGxG|M+?d5<?$Gr4_HTW;n|Bra
zt(4pNW6e_nk2rUeqPI`z$Jm2PJ#9e=oMqze+cE9L+9|p-K&>{Dppq~_J^lbFLbgOn
zz`p1@oYr3AG~_8gu!2(IIs<FVXc}%7>$;D6)9g}uu_T`N)-luzHnBIVm<}NS`z^1?
zT9u9kvntg8V|Xj)kXkd_;YSI%+oRm-CXUr?C20?XNl#SyFVl~!kemnbV;<|LumQgJ
zM{2EmE;Js>E?H$-NJ?!_AFV4iYjmzFgv10t6nxMB5jAlT^|&YL6DRriToF_x00wHe
zXG2g_;Ie>6tL#T1*S)=HlRQfiw~dka!xnf{rroNMYcqA0gAX*zOHDR>iI!g%7}5D7
zs8@G1wa2AJnm+sa6V$j3ibf2@PEaM~+cGeVFhnL@G8d`3($iTiupf}BG}OP|pF1br
zX*LG;B37<zCpHN*F-q$q!67#{!eECQ?W{CL<MvuEmIW^=aiyUvI>LJ$NU;flN6A^*
z-jB;#Oj~m|=|!hFZ{V++sJp%7uqw^**X)t)<czXaW%^1ldS*I6*VZfJ*i`r>80jtM
zc#gAcsyMWnDGwHP7rFg9N6%o6n%C*?m~5=c%p4vNR7OcoL{05fVpvAh60Tt%DL(!h
zghin*LR_JZAjyma2jhIvY_t`=H}T%_K)T7H?V=`d-EngRHHMl>Q*y2m?(I@Fr)RTm
zfuNufm+{Dy1HEUsMgEKBMz*@RBbg37Sh_bDg2vUWm(NREspr7<J(GFIQEXl*mtvON
z?qVpqN4zr|?Gz{J>zBflkzBpi+zRdZm6lg4Eb7I2@8>@wIyB%5?K|^1+e4+3OzG@|
zc6;4SX6>kEyo842PUivDPh(BTr6r!+^+~KoHt)sRO{?ptgz50OGo%3xGLx}=PP~iQ
zFVmjI4&9Uwj;LZkaST*N86DoICsij$>hbbh@;ELEdn-zsgJXkJ4Q38i!o!^EF&>qy
zd;nk6`4P>+<UE;DUi<fQX!j$$ayN6=uwi;*#9IETIo@S`v7xq4{EPL`Y3ok9C{BZn
z`1J>o@<GnlIp3Q|-X)M7to@)5i_L5Q7=e4V4QK5RJ>mu;w;CUXMP3frz@GrZA)m!_
z0Cb2zQP(OFxY3a}KVqX^o&h+QdK|yvnxa3%ih4^G{TA_4+x#XN6w}DMQ}yAjNwdlq
zghfj*Z$Cgwepw8k8uQhJAv8!j|7}Owvqrb+YUa(A-ZP2f#auIf;Fglynxo-~%QHP&
ze96nouLX3c`Nb1`+y?X$5c?pj1d`b7ocbL#8*_z%nu)qzGrBo^y-M361(hP>_57vW
zDUDiH^BEfp%J|aq;Fr5fljW@!Rpna7N9Pj)-X<Ze$|X|IsWC4>PC_4QLegw8A>|=r
zhbpGDm33OpXHE`<wzScv2KcKQb08BC-D4DAgFR!eZ3?~aTdWyWolbqnK;Hr};v5#X
zl8C(d%tjLl1EPuEi2^>>bgrR1PPKX^zar#<DRztJr5c3dKXUjeK51Oy@Wjl;*!fp0
zDuu4#nOG*MHjT7rzp_PSBz^292T*iBShT?T`1}N10HM~&iE7q_J|_$30na1@@Ym=h
zrXxp3rz*JLw3I{GdoPkB?PS30E}JX7cV{(c@h{-Q;E_ye8N0utf`Q&zyLT-}QyUA4
zN3@cuW_&3TlQEiQ<Gc0l16Q?WK=+-tho1nk9jCLx)fd{8RtV%2-L{0)$7suf_ZNIx
zd@x{@Fik}zdvo;?kmamo^#LfW`4vrx?U~J*)pk|~e$D5f0GL6~dhL~2ZbfoUzFvQp
z@1jft_R%ND6E0n&4LUUnQW?9p@b>ex69cIY%x{b7V6b0Hzfos@DcG0ft1$0##IcOn
zheq}~Q3F?ezYMoi<prL|OtSDkyZ8>qYpRZ1w94R1Nr3#yX8@`XVEdti#pIAEalvTe
z25LCrqdzvs&0IeWI^6>N7Jv58__Ns<g4u=%6a2i{w!pN~9|!jx4%e&m*E6<9(I6!u
zGCcal+*>w#lgOjLeW8AjM0o{ve&Wj_`HWw4g(d~5hCH|m@%`<`{IG-xgVRH%g|ZME
zmVhpS-QY@DNW(eIHlf3OsyRBldpXoeGWI-LI){?;#G$U8_n|$+gcT9Le;m|Cs6S69
zL@2#fW4VcJ5xHsWr1{!NOHNUST{)4bj3<(iVn(GZM!AP3`eRZcw+wYiI1$lHl@#j~
ziw5w}wX4R$&B-p4Y)-0wktlH^b#-nTaR%e^tgEgsNWyQvx|%WkAjygmeJ$p@7<bJ3
zh_%AzHmcnwqLh)|ewQ`uvZ+O$Wc$I^PM0^MGR=<Zobn@+mZ3c|LRtWpjN0|$1GMOG
zB?KH+N^v=1y=sb3voujVr`I4H)7<*F98d-%-U8oU%TDDoQ9^zi6^GV6Muj4*f8Bt3
zfp+GbB5OMSDBV5Bf?`;D-f~`oy`ZI;_cL()CVQ2&hE#XghqqS5#6*3YO{whWOI64z
zM(?qyLX(A1WPBv$X>95jOIr1y34PA|lO>;;13qLs!JXb}d0|0jD|OgeRD9fxDd9Q7
z$7lYnJj%*OZy>n)qAUxp3`dhzr{50)I=CvG0lqPp-hIO@ye^SXvWMgp-+AF(?Jz4E
zyI3MpfSw1f3ul}oBi(E<amM>s)6{0yv#zxji4!+J0U@Pt5g8%A(dhC_!_CLXJc8~%
z&dLcAOhz_iq`4Uro{$(VoQIYzkG~0k0I`1~X#R0*0yl<W$2F3ah?|Gh6&YPZ;Ml@V
z^uyjvHZmE07xE@$D#@tcOr*~PDZpPH2TUDly%)&B?;z9en0#V@auCtPolzUKfi@=S
z;8BV-(vm)(ivP==?4MsX1W0l>mGHu3n?BrWqKC~_7-+9SH;&v6b6mCn3@Dhiu=kT1
z(O^YP@D=%cp@>0;sxlPKgiW!=S`mMqkgp;wO~#+xhH1!a{1~c0033LzORILpKerJ7
z@o)OiKly*Z-W|V3__5?V3$|v~21ZLv(kreyE`8Lx{?MWFBh<0@11(pQ3JwRiB)^D~
zeBh>jFe=QQWea9_h6ZkP0Y(h9%1Ap}^B2lVXX+UhRb)E5BQu$3RzT7^zjE-0z(;u+
zY(N&0`Tu?S>7V;q5RwPyG)eljxB{BR69*|7qr(UuQ0=uwUzAX20HxZpUYR*0!>|Pg
zlQw~{6KF6bmJry8@?p;s0s13;4@lI*^@q(s=&AEybRM?tzfG0@+@t(mMh2|DP_KkS
z34YJkUxb`AHvsEr>WS(h&&Uy<yJ-oD9yI#{-iNLeAv$^$yoO}4(0>yLv|=}*)7Zl9
zO_Bl-*LeZLxiCFI%(Z!nRv4PvUsKS3mcs-u$Ox@r8jj>K*T!lV7QWieA^L<Zf7SW=
zd{ScjIpOoq=O)q}Tz=#`jV@}l7VfOznkrm|sAz8|9jusxJ_+Djr^{+!c#!z_cyF4=
z7}{o<%)-ySPrNf{iN<mhI`2~Z4@8QJTaf6J%7(u@i;J_sW3&|NlUu@V+(5|mpi`Bm
z(pRy(A;G}KGp5NU&sc}|`{UvMBjx%Z>v0*N3WWrXju;C#y>iiru~fkw%Q-UTv|Pl_
zdkeCkP{>Irk7e#7Qq;yCCH{f-72tY?&#^#s)*aTX$IUXUWsH^+$^5KD!vIqP!Zoj4
zsbYjz*S1%V=X#O+JVpzZ0Z5mU7}UpD%0&ICmJuadd%?CSBJb5QpHi5I%DMWc8zDzs
zox~D+-*9X`9KJL;X8m#>agSS;kUuK)x&(WRQ>~DUUl)CoRO(9htTAYUD}{B!-qCFR
z6w~)wMa-~7#(^U~M$Nqxk{&f17=Ea=8AO<E%bpOJx3I5QI9etqNq6L!L(7gtFFG=~
z)%JjE?N_F)<a+X=nY--)Y2d&?KRAjE{WV-E9|r(tqeryi8muskxH$DLel>l~Lq$4P
z5`1{7&CgA0eyF<XBqFMTi)LoRG!wsqWOz30b)j~D$u84Q1maMDnG-5y?4*J5*o7L{
z$~-D8cJZFV0M-<r;)$xfPjP+%&Y(Omu~=Kr(8Wk?s5<cz`&ORGrNrq*r;%m<_5Oa6
zSfFMK3~B@@`f4{gu0>{=rYktJtR04_0~VGL*=JfXPmn?PLB#u^7>a0E{q8MdEe4&%
z@txe(&9Rt(Fx-Ny$6Sal_S3v?wNjzOrciwKqVFTuy;BibZs=jJ3v+^_cG6;rol}={
z`B4Jdnf{Cock+i_T!z$zbKM<3;R@HU7YhMc59gdfWJg_#2iWied~gFSwEiuwK5-o9
z9b`DndtyTm7PLwev>daRI(tQFYG{`ZCu;5762IO~W-9!Gfumk2Xm?UlZLq$=BeuH9
zgB0sWeS$Uk9-8Iuv-hG^b=H&X3HcJ@K1EEZoaZn!N;eKn<3c*56@9$2Lk;Izo`1Kn
z(+WDE`a07lRVOBErf#)I<QyJhsni_5&l9UQZ^r2eH3_V6EwWa30*dW}Xq%S4y;H{F
zC(XHt{T5JwU)YD;EJ6@UM_XIE!w|bP5&osL(%=`Z;y?2o|JFQ*slWpVlu_w&wg!TM
z$^uRSXNOs2$y#9r53eT93pM>zsuPxC*<wzOX)N->?kV1=&NP;Na%Y-1h$$1Nk9m*v
zu}g}(g^)kic5oq|+Ht8lFWBe>84_IB^o-UdhtrI;j}`yB#d=wZser^nDaqr;ol`Tg
zY(Qqg?rre2y@N&efggf?O5a#tHVu2Ys?Y(yLQb}yEwi#T{LSBQEI8vk7exo%YaFq=
z&upRB7T^g9-PxBiGL23HN|4JAcA8PUw(og_$KY7Cy&4e$IXAgH0+<^8VEnQlw0>WG
zC47Q{g_;8-ri-W3H6k3i2TQLvS1=7Bj_Dv*9@M+q3HJ$4%HLj=i=ytp0CEKs`%di$
zzW<(jK(ce4z6G<)XcJwCzU*?m8zN71`DcF`0j55Gk(0Rb#?@Qx!Bf&kmRm=1*y-3K
z16t=l<7@)A8F_iZds{5<*G!f*hTFs7H>Ni}Gz<&P&2*E;izT8S+krvws1^93AMBHM
zY1zpKCXg`pIin~FU=feBKRT5eOq`5s(6x$X@SJ|qu&YVSKFavcW-qaGxqav)V?Tr9
z*8Z(nKgZ1MnFs?hzJ>}lUThuXxdfOaKK`7031e?U>iHb&b(^pPcldEi-iY^BSiDmD
z@T8Ok$Xa|@uOxnwyf>si8`B8iPvK>@m_)1kTiA+L51wz|@^aoP_UgO0IP>II(J4i2
zp{S(B1T4fg&*ENFE`zz2PBr{WYR0)xk{%44-UY^%@mWtDE%AIxq&?&>k~oSZtSbW&
z$89W`2z0KZ+e84_$Vx#*<I&oF8H*n8{-&Zqh^=mY<-l3kPi(dC?f37J5WQVV<%t5m
zoS~ZBf&H%Q>R=WMf7jdp9z!ttXFmah0vK2F)E&cJP@2M@55SXtc}D2uwv=++>ITbw
zha+Ib;qR5G<$Wxv44?f+1pfZ_rd&3a&JH93sEubZFq>rLM9vFtu%vfl?`i{cnRmtc
z1Pe{mh>M#m09$Ed#5io0%uYR<MCe})jPz_=Tm#JtQ|v4^HVq}-cG<44iPqRvZHj!G
zV!@`ty%!-6YdeX|vb|r@-*+A#Qa>kJot>MMb!i){UDm`MsfeV&$K^O~)NIniV(hn<
zWfAdHpPn)uF&?3ykebnEkrsN^^M@$%Kb)cex%_C9ez!es6Hcp#uN_a1zXVC5Rk}>5
z<&y0CBPILw>8<G%Pv0~>!;1zYJ<9i4T`TUTzOlMOk?t4I0xEhk(yjLRnSt*5ER4E-
zkQ$&Cc-sr?h_oL;y6q@%45P))aW6v+1QIPRyPk5pQUVjUA)DWo%WqdEhETN6+w;Ph
z4CTE``F|8oo@932<=rr6#VJA^HX&``BM-?5)cbGS*XI|wmg%(hvF<NNN?Nb>`K5RJ
zw?234)FgXvaFsncb9nVR5pKJ9mVFfV{JK&v<9@sGq=aJlXkzd@6na4UT{%@a*6I~m
ztF+dgcmEu{=s13u{~pcoC*Vsg&X{?B`26#o7kDdX#8TQVN|R3(x`&1yJN=3Ex{maw
z4K@fl0@gWKcI?_=*Q2rL^H7s;);f>d-|hn2Q0ea~BwPd8jxN#LMZ9Xq+r0hmds&rF
znZIpC94Ip1=2rBp87E$*&oU>k?XQo`$?17tnoo_T6?<-=XvKKwdl%@H3@Ur>_}{8K
z30?L)o`&2Kcq(zyZOJ}qlV|$mof_<i`d}mcHb1Q3VyTugJRSY&C!qS)^#0k*ZrTOi
zKt{XL4L5?O(;?F7^H--72Pt0<_a3uNp`P&Gf0~)Q{d;ES@4L$?-I{|>7XjQ5km;4P
zLly<WnH27UiE`7I2_1`$BzNj{em?Nr?Pg2fxy?60mx8bMd@qRCUihONqZP|5Je!|@
zqE`}3IieNT#6PqgtKbT+I)w~Bhjld9nKuib%uhLq|Dq4=?R9)UxLf^F9~0gV9?^}W
zSU;rbljBt3WX|#Us;V@oY9A^f9)iQhN?}l3H3j==_peFxA0bx%IFi<JkNt3<y6bOj
z-GORqpu%c%xa$nwJ91?t{y(IF&;zX3E9w!7S3F<6s;!b)t!olxRo`O1A<RyWQxSQg
z2OH=ATDXtJACLz_GD<2y)yhCgyM7=envUEk3t@<{YOcL@G(KBb@0tA;*sWF(lBV||
zji{KLmoYwdbfL^1v$V)e#@qsn(40i(Wz%E=Q+owHI)ZSEIBsMsQ=z-gt_^qWP)O5A
zI)@e6kVsj>i6l9r7Q4&bHW#M;h9}O4`i8xsc?+CpVq&H#qX8&vYLLp9@VGU~SV@U?
z3Nw@VlWm{w^|}L%60>v)N!xJ(l-iR99vqd8;5?cxyR<jyX<D&1P|aN@Cl{F_-op>H
z@<li=Eda;)irUnCwRvrB9^pJDI1;i5W+q=x*>HGAUn26Z-ab$>t9d#;L$&_#_LQ^O
zNaSye3d6$+QLV9`0KI;ls}Xs;>c8s&pui5URh*Sp9oE^qgJFbphW_|p{MxY`iAr`&
zzIqxhk(>!+aX*LKBpAD_K{~cDhXeiHh6dXU<-f&*zdp>8`;r*&3!(VO12{o_fjIUr
zo7s=uF5n$5=sbq3kRO@;J68w0O_K7b{LVFK!-8lRIeR99dYUn_D2=_$lfhuvn_G5{
zYiPU673=x;#gi-lG>D&_+{+`rtsFs~Cyv*7{lvVa_)yiM5yF(2cAZS)Xq72i-j_pS
zBXcqJqyyEvq%MpU{i;e;-ge7FgYdp<5cBZ*Ug8jAYGvb2xtWxz><5-`M+dGXUtuwZ
zRIz8}VkKYD&``+X`+h)wir>D{fBF*tbNdp%^dB{;^-toTy}j1G3n)I7B$9$fTYdt%
z=7C2z(48;{;;zuXuKnzKAHHjLT}k$-W|G7%<X%0qUK{2vN9;Boy}3i}xm?HiO-1R}
zR_jGul_Cud3i>Zav2xLk+?rSEV87!Xe|VW9$&IJr7b6$i2MMQ)d`E+`O1%p8@9n2)
zs!#5Of(ASh=X?S)-?EtoY!;|mAE2E<+5lFsnyP?a=%V<2TTnJxZWq&_Jz1N03<#RK
z9@@%y3}W@}7!2L1Gw8EII1hY&TpO8JzD&FK6Tn_^#VX*iF}%Rsgj1`<={ugusc_Gg
zQr>&NBXJAyu1(fx^XU4urgahHAJuYOx?f$p1+8SaYnZMGc0R5r)43%6IP1GC!h4ud
z+bF4#x*I)6{C;w2XL3kg*?w1NZZfJOa2miQA;2OySvkE~EO6fCQQDN{w(fP)^AI6W
zlp1ee;LQ{**P(b*btxi*0vos~@cNpjg@b;?Xn!Ca$~y9>_4Ge*ly$=~l+!JGAo4A1
zx`Mk22)~KG91(YRd>Vh}ohn$HFNNuf6wE(#-O7xB9vX#>Q{wZl;iv9xSGpmPg@rr!
ztdCx$?^eng^oo+x4IDjqdVx-zv1OkK#RNXL3{y~Az1NYL5C0^#fah7^+EbmPin19J
z9%6vzTwYdWsKcVLCfTe+qacm;CFBHM0<p9XA<%xiJ-f0TuY2CPNq!K2<JsClg3e=F
z^I5oi;6bO>Y~2d|b8}7;LW@RyaF6syO}n8mnzi+yXmK1`Wca;565&5c4!=!P{K_3L
z<_?)z+cK9?4(_98KF7GAfhnY8KH!CLAsUo4{Fn6No}7i={`P)t*jw5A{F06{{u;>l
z;CA~EfMrcvXX14oQDJxsvUwMpiX5aY4B)=9MeX+lt&))Nx9o$Y^d!I|V5-s^6N}SE
zs=P&YU2_RXYL69}mKN@TmYFNDMmB^?lU+@;A%-EyAT6!~n>GXd5Gm0*Rr$;Jr=!n8
zU}#f|COSsR3T&euG6cl?=u*GJ5gmAa&4{i*)#w;D|5PLy$y!U`1qIt^DvW3hO5|ra
zd&BqeoaPnpMNkG#LGWm0UFNI1%?PH_9|sp33)oY@gWN`pv(8}WLBEX!O>EvtG@fex
zg>H>{kHhd64DR-Ec_QJ)><uYd)Nip{rwx3CoMyP)_KWqn{iW^7k8>)O9hu;sf9Wob
z=jtkPl`PG%-gniwJyvZ6(ZAvpGyN_Q+_~!Lh0BYXC-R{*L9vV`kGK*1X7lI>LQ1Yq
zLD%7oiR#ncaNM2w^ns`y@$#D8IJY^eE@gCf=t(xQd#K#=t68JS&{=bBs#^WM##rEy
z=Ob?&<5uV2RvPNO^y7(8<^G)AY-^1&!r>ohqu)2UY8Q)#5(alht&A(7|GKjhBhlTx
z+9Xv6inT-)qJ8MNMXY(W?7W|!_km3Pd1!=2NR_MiW)8AD4c4mBnvkyKpBwPMVoCar
zNEq((LNL0Zg*7QO<M@!cxTzn@2o{v`Gi{)nm@L#_@97`*CbncJY~}IN!^RzbGU6z4
zG1Hvh)DjNen-3B4n_&!q!|5zH(pO^ryf{=S&m`b;x+APoS7_SEPq5vOH)BSqxGUn1
z0@_*<5V5M$$kGg175&~_W2k4nSVW}C)2r~Bq}u3hWCuLtuK|ty50=FLuaInny)t4=
zgG=<h7hYbGzN|3{5mYHh6~K1R+3#*dPdjnW-{%9+(b((!egg8H4u&3MnJq+3NpL4{
zyt=YIO-k?aR`<*z_=%4f-$B7fo`jwpXI=v3ZVSlA^<5Mo5`CCqP>nTz{6=%X)AV`M
zHt5Dlz{JL!5{i88lb!LnHM-r{tReov0R{G*tP?_61~<+p=p|jLQhe7ocUCi!(}E5M
zP>n-LT7eVOy)7wX_Z;v4!QNZP#kpnMq6LJIK(JuJgFA!-2~HqDaCb}4V1*ZM!QCOa
zyKCVP2yVfp3U?{oEw8e7_deb4oj%>K-@W&}`_BHme!u$Gs#@QiYmPbQ7?xCPYaQHV
zc(R2DZ9f52Yuilb8I7ZsGZ^hBV6^5;&=JB$?^oEMwY?`&Zvq4zlqj@u{-C!Sta3kE
zzNB|pkmFMNOmVHjL!sEJH)Ge!yB6*bcTW!_aSp_Avn^iWm>=C89e;BrZJK=O41H_6
zHISV<51cDo)IZr7DjB3p{D3{nedI}69F;y{wm!2p*nW2z9c!x&#ydY|Qb#Rt$lz_r
zQu;jsiCvC8u;#q;e6_gLfJ>zuSTHL(Dkncc%C{dM(R${j365eTuYC9vwwL)aqOSsr
zoy^U5uh_Y&!Xx=K;d1<@8}(Pz{mmhYr`dWv0Oj(4?#7NYq|x?1&SLbArNDDVV<0ui
zbEb~?f#yesXQm8`Ck@Bpo&fvSi;TLYTjL+(#T;HqKW<x)ZEEk($wO{N^nTGp@hHD|
zXa44<B$eU!K(ITM>Li~tJ1|9Y7&vi-ul^<Dd!pv9skji+W1{m^xH2PQxAj(4&BdF-
zr8iwLb0W(9fct8TrBU*eugY|Ld@po<y-UCQPKkT$ojGP_V34xSYIw-Imunu*+SA=V
z`&#a%#&7aN|Lw=N+Yh^1u+{X8Q@G`pA=Q>3e{1z^mbO@zGYjFVBY915@xFeWLq__4
z8<T?f+*o8HNTVN@K5Y_5Vl91yibevCy@}wuBl`g2!*;-0!^taKgyVfGa50|(fIbib
z@Crz~j|COv4sP(i^8E<${~Jk~f9NOh>sD7Z;^~{jJuNVN$Txu0qP0Y&K;Ck^H1<4`
zXZNr1+&6QUtT5nq9}$tC?`;r}glidLWox-uT=udGdgwG%=CPo!2clvqmTs;eck0*O
z=wdRS0LGGXiWW*+$jpMa`%k;|#3mkl4@}i3MGXur61YbUl?@s&G`%#Q_ISg%l7oAj
z<Qh!)QPnU%TIS<&U7)nW7_VSCO+$+sD{BB^TpJmM$g#9Qo`o(XJhpB^*Hr5~gcnU*
zngGf>JKXdFRVmRUNRrbi*@F7evVo5)$(Z9!0VI%2Ylh2kP<KFDf#*UDZIG(JXj1h<
zz!$M_Yt125hOqaPu<4OBLVVawP(0MPE7<khG#QRE2dSkX;f@Dmo&Z&ZVhN`rj2Ad)
zZ^qY}vamj}^HxA~YUk6Aw8mMo{dlq>^ZO}j$uUrf)br~2jcSNfvn+FS4#mkQP@gji
z_bh+24kaSl^H~B3(kr0e*w$9V(U$H^-9VkH;?@(c22J3awbZWLkPi*dWVo~wyDamx
zy64o{{M_Qk5t*lRv+XAFPk`QvYRW+W=Dga8^;YZsqcthk`w3aot_q)4CC%LHaYn|>
zKl0W8R%SYaQ0{f4cI;Vz+arlUHh?YZy%ZwGje3p$jukX$70T{!gD*UY-<EWWUqE;0
zer?T>5m<99PGd&BtHcE{{~Qc{O@P;J`zA8P{F|D{x0lg;m*(Kdn;PCu&k*9R+`I(0
z4dz0=%^J)&7MHzRaG|-MGM+oK;eL-kYOeJ|9lf)klZuLnwB^yDn@_08I-ZoOiut;C
z)H(gr#6K=c|J?y(RcoC$ny09Fggh!S8Fu3!Y5UD(@afU2cuDPAb$zTE*JTL=bicsQ
z6TNM5n{Epjn)icxU31no*spD*d4M5_C6LOatyUM`YT)Y!^B?FU3<4CYbWLzIs&iho
z{lm7c5<Y?r<N}V1Da|D)u64rUL>bhpBa!Uu6H-xKBId>wj_*7QcA?GVKORMOUWP)@
zUN(V4a!%zqe4AzmA-mt1R{*ktSNzS?L7Dk<8fgkx$8Fta_3?}@K&NI4duvwT`Q!Mf
zQgUyTgPZGP`c1&Ji8)-=Ctp{_B3%slO!?Pk_-NBxt0FQnKhp5Ojnvi(>EmfFt&5R#
z&a`!01IH4gP{8-W5u=(zaKO@UKf?9S)GC!)y!8UPBuiJf$ySmXmN8`!3ru__CILie
zD~Rw~%is^R@FJVL%*Rbm>2p!%TMb1c<MMJ|h)H%z?~ihMEKVta$h?t*^_`Z)hdxVL
zxHHVSY3lTnPT~8z;$lZB_Hf1+kb*C$wd@LVSHs{ebibN?^VWfG<e{2_m-6&c>vg)L
zy2&bZH0cdS^nQTabLaEvyiIFU!XC0~=hRG{GF@@w)10IqbyEj?wJxq=mB8cokYs8d
ztnz1Rx<q5@@o#BH+r2VN9Lf@Cb1eL0cHp9v$X*nJ9YFFeXV158&KCg>Gp3CB->0uc
z?w@;TVz&yx3Vs6o%HixFAc=2T`{Ze@x%un5w{4-l6KM|>a-wIlJ?8~(8*W+;{@2SY
z?<K5!gPqRZNnD)Waeb9^XY63f()R6}_n%e$a!e}dn@yHB7c(-UYi$slqr-ds0!H7a
zbPHqqaseJ~udBr8a%ckiFozRxo>!7qgUVm{^&)I-k;jfiXvYB;_Z?Y8sRDwXWUh9e
z3Oh?irH+R=O~Wld&i8?EK{Q>YM<6k0pP*AHxvp||xZD0rD!46inqC1OA4RGbo76Rj
zAzsw4gxYO|CZbDK2P5}$^t@KA_%2a<RMtf=S__wsS^aL$5kFZRGZc7f?nJ)*eqTBY
zVfDUO%()v0Tv%rEFIfJ3r#@|W^2-Oa2p1O_<riSjsB%Yl)UIZ<*5pCfqgI6JE|}3#
z-Gci--88Dq%YLo#YNm8k|7wKEOCUU>Doy-4o!mlUjKGK+7OVFF=e);tCJAZIcCNzf
z=-;sk#y5xhIc2z4Hani3UAt}@cB7tMNkYp;-S+cUYg}KhyO1yEC6s2YanF7Nl4ouS
z$$5@9V2KN;^Q^?*K98mp-lxi{8{D7f-d(iM%arL0e)?4rR`^Oil#J_r3cGC;Jsi_7
z<FL+HemIti=a=4gF?5{_eK2D>o|?<MH+3A`barj1xxks#zt^gJv&ER<+?hz7@gieq
z*8D`3++{^jO;<s{R><AJQ#?auc%V_aJ@G@qGK0IuO?KvAs0VR+#CV(4=SJdQ`)yi+
zosLa(0;tt=6_XDb@6;CD5bi{J+&|oG=JN4+eNGk{&X`xG32O2EPPXB;nD`~ITOtH?
zs^k9_?)o<g@b65}R*q66&mJ}W&By;7p)Z`*p4WX@fB=QNu>%JRg^<AX?7$63-3MQ_
z!qSz_;;%Te9N6@Q#c+t8H<v<K#xU618^Cw8re^D0_^EIp$5zq5SZ-%dgvs1xj%9EF
z=00XpC&ghl*<%1#k@e?MNrP4A$gwI}3BYJPxmS{&gAnj%>WiQo;C{wE8CRnW7lhCk
z4GaNqsErHb`Q}}(72iC;J~J+2STI=WenZ$E^Fw(3eo`j;Q~1%Oaaj|PNW9o<_yuZo
znQwuM1hCj%Rw@k^KQTy~V`kGvmQ`0V-&q?H>WU^V#aS7hFeM04wd#f`_e&OW%s0!5
zFTCE3MpZ&I#wf(inwsUBef#8b2Y?u^stK?pR+B11;UvIV<#Kza9wKR!FtE`6!{n_v
z$M!^s6#LwqVuTb^5|dGa3tMJ2A)ifO1NGax6dg*EM<75Yt2cTJN#o(J9YV=?36Tb<
zp;@2#&~jl$c3R(uF<wa_JGSNE5E#L=mXYJRdgas6vlR7s`6XY(Dp>&=;3uFb^W=-&
z$VH5WYx|1hM0abqE>BU<Xycp<UBZE7!eN)k@VtcGwyv0%%C8dWFHk*P;A$nwp6kBU
zc6zbmit9gBR{aV1|A@5e++af-2z%Dq8{~b80JjqiLO}-~J+Xy8k=UXA?mVM3AJ*+^
z*8m*yDM$^Es$^9&HZs@iZGwHMObke+4li@txlx*LX0Zs4@0i}G&D}bPzg<wXPNE?)
zEuDG?>Ti%i182#FU$L<1R$bFk?DD-d4`kJzeg<UeotQaomTWP3(^K`N)Q4q{5^>ze
zTec{Ja(O*DHlDwEra!s}*&!B~B>eQ#pSPgjZI~I0bn`~WZ^SO_H%GUJROpm31G?SP
zf$;nvo~j(r1viC-wv_m2C{+X(9E6yY=D&7Q0;OfS&GQ`&e>)mVEB_Lf6?qI&AAPNU
zzl8L;Hl`N@d3(O4pF<`w^1KmWRUG!LY5<Kj2?O;7D&@-z1a#lXFgJG2UbQ+lk==Wb
z&*QH<#*1>93Wp{4n-yZsS)^EZ=JFe?^se>i1p<@v%7~T<>O#_sw!3q8$2%14aw-=r
zq^d(~L)6Xp1X5wi;T2|ElU!gbSgNOAeM-3{O{^Uv0kx*?+)SIag|S4R)~fGHBEAz*
z>)9f&CTHhcBKnQkS}{%%M+nJ+TzT84!7VB^RhrV-kgDWEGx;XD-QkP*v5@dYfkTt{
za!XP>@<FDOx2jV-E2*lJ#Vb`Hm%Pd;B1hBOL-*zE?6i519M^s*glZ#ALty#=)Z(xZ
z%=S#80CGyOv-eb+M=|~?tZvOVXn=w<rW#G262j}x5sgX4UKNdv$^l%D2wUE*$4U2s
zHxZ(p7WeRZxtwZ?$F&vZn-mqlbJiKb>S~DR5Bg&gDM>23OxULiCzWDDZN9A~TF9>q
zPj+@{GhUj<q>k$jX;;6VK9~tG^zXbX1}#O7AAA@ZiHfH+>m{>7FKzym<?oR#KH8io
z;#d`h4=KE>nEmiH!iLcVZqH!uRq!B=lQ3Ons1>Wt9=2n}>N%TEWenqAsb17QPk(1e
zI`}B>ZL5yr+?=^x=MH)ZIDE-su8TL+ur;RWiSFDtS>00i@kirN_RwDK87dOO3HEks
z4?R&=`Ot4y1Gf=P8d;>ol%2BOb1u52vZSQ8I+UMti6u&>iQW#S2*gnM2<!0HxwoCX
zo|v7MF*0=o&M6TK-HlA{4P4uRJjR^{x*6?g2GIrL>ORec&bOO&8cgPxB)63O$h|FT
zwrr%%o~WK{sSl{AT>$3@_*`^CZW8Cqyb4An8umCUdQP0q9^z^q{I2!aimobdLju_l
zic;~MM#Qy2iz?OmCL5_vrIy-HYf}YXm%G_-Lsf88wp0}<o_$1&dylG&X>6BFm7pih
zalji8HrDiw_iA5dil6hvKu$c|y(i9Gl6%;3+V)01CPI~i7Nb6PoVS9XHvHSS=^g{7
z@uC+KdBltAHPsQ9>!W<eo&z7*G(_R9B~SJ#hPVN<ki&L?E=Xw=bnHuNibHN(Hq50E
z!nENv0_zcirEkX-buq@J4~sDsS{W}ko3;`|0&XK;!L?K=Dq(HkM!|_Mi=5a)liLO@
zxLcR(>K}xHRW_F_YC+>8({+iYb4w=;De)}pH)6jNCF3Y>;XGCX#mx2>*49u|?4quT
zeTuCDx($r93#nec8@^IVA3@}-*Q*}KAFm{g)Pr6`3Vb`C;@6mI8+I9V+wm~)|CNN8
ze)Sep7r-Z?NVk_b_dG=1It>p4oLdk06YVyo#&Yk4Eat<10-kTnw=2-%w?9vII#2(4
z8YVwKbYH!VIYlHiwC%Bo*r4V4II`Qrl<hb9wdi-k3NCKOmm+!conC0?CUSfQWBbPD
zQI}micW6xC<X5Fj&)?*b$NQqYju`(0gsp;4XXkDSC#{oKF6z*+L86{>oQGSH*D?&T
z=V@WAzmnyjW4Hg#*zNDPjIJ{IbyDMbCACRVT%{cKAhP?IDFJT1&VUmwS4f~nae1;8
zA=zx@A-F<~ms(O0kC6KVUxj)TP>~1;v&#B|Uf!E}@;=m6Sq9OeT??!&?yqc&hAuJ@
zvHZrCgW_V`aV`!ueWf|nM#eC^_&pV6#j<L%nAEL<qit_o8RW@vy*hgg=}FoT<8p<s
z!odNj3;vN*)e}N*N)|3eduB`o-I&lHyS%W7F?l?mA$uyRIgPHv9$UBO-L()WW*kfL
z%!Y4NlC`2-7TwHXXA(WjNZSQt*+0F0IR{qKwBux=UBV=|0qVA8uw@TQbtIcAia{!N
z$RYXq#4x3plK2S;Fzj@*%-%9??QYBH^TYEVt0yr>r%h&)9uuj#k-XI~G5Y>#rK|eh
z@KAG*iECzql<Jy~%@;`|MT$w)m((OEHW=S_y$|?o*7`&}z?e@kL%f4!ZulH}99*q?
ze*(m0ovvZ^08Hj&1u~EJ0wP3hQIFn;Wq#4fDM<H#_tbyf^m2GzF^moj>%#3%$RB)L
zAX9LO=q@<jMt$8J;Cvhyu`G~r^1i`pPT2nJSEC9^(l^kwUXYB8<@W6)4Ff|jQ`gEy
zqiHwp0Wh_})2fwN92=uB5C_xx)iQtYcLZ=_+nPGAA^$um)JnG&mVO3agxc5Zyf-Tl
zsJ-<|v2e3GR@)xktt!l^U!_=qcppxYi3ntPL5{j%QntgEg%CkEuZ;0`1Lm(CpU22I
zDykhQ4;=P29IuL-G`-%KbCTC|W$k30VO1IqRG1m==zjO8s-Db5BzdPnpA{-oqkj6b
znF++uuIxl6T@H4jz``c6FH1JW)+dxuJ5z#vO^(fB)(!(r+DsYa(bw06N}Dw@P*=+>
zjB9l1)-F<!2ImytkT$L{PUbM%hv~%GI2y&p#z-6Q=Wno-Zy0;t(Auv+)y+OVGy7~X
zNG3oJF&_yu#U_i9#OB0*6pO>-zWsri{z7cF{QhJUhtMfPU1Jl6p0<1^+d&o`&o|Gv
zt9&y|TMpJJ2{G&6Tv*&+)O68zow~;i{E;k}5Z)z5|08_;&_zxTB3tLlUEM5A-8h&|
zs(=kx6~u;kQnc5TF|Hf3l%}7z+Is@+X~G7`g1(Y)Vb7m8pm@4@1#t@>-wSjUp|M<y
zrnaS^c=NlVEFDescR9ACc9i`=$@vw<nB$WgOmzCF8c>`KTft<bRJ0dMQx{impM>>(
z+zZLu{n3>E)xyTb25cR8dub@(tD}2gPHSSH%{SbNW<*B`_^hr8>oxsH6#*|O97u{0
z-;Tb7J@`ZKo4pRyv*6}bPhf^~%lvnx-%wuNhg;P{-O1IbGWi_;eno2f<i|NrY_NX<
z(kM{`^;dP5Nz>8=b(|FlCV8Aa^c?=9Y08^--ZR*Y)CH8;=Yu;ep;X+s);_X#_$S;V
z8!|*<m$9|M;P+fua1Vn>%pzfzjh$wnvAEiDzNq0FjXvJm5qv$iJ_1;pvBqngs>e0Q
zVlj&%+ih%v$#Sk*6r~O%8r?iVu_VR^Sr_EwH`Zier@c6g(d<;bEEXYi37O{&p-&gN
zXk{mXX+V<5$OF1WRQp%5+2M~nl`d+b(W>J%&)KW2v+?kh%^ff>r6)DsKLb3vg;(6H
zHy=rMS?WU%*vBD5wY^L%u5&O*ksqxH5;c=`Va;)xl{H*i-awXw85-=PqIqic&LC$-
z#i!C6pD9O~VU2(=g>rRkjyo4$h^xNld7}PPi&5GlS3#D7^;|PqSB*Y#DA+&0s_$65
z>X1}I2lYd?Z!dlM7)Q4~7Fyo8X57iH3yL_eT-O}FJ;vZJgkDzxtS4WmrrNq-Is0NI
zt%S}o$DHS#+U~2=cw+}dOX|K2ycgxr;wTJ01yUqQ^#Fs3tq)vEKUD9NPslQD_sbt!
z6q2aAY|QmKalNmf@NcjRH94ry2TGJQI=votnrV>^>W+IY^2HL5&6dp(3Wy)7ink17
z`J_Y0L@C)CR8tmUaAG3n_w0UXr^GHO&{6LBTv%d?JH=@}`-m!8r{zn^N5tPP=;WK~
zc7n%HzcTKnHTE0rsB6<S^jMRurvNm=VEuXU?j@g(9bS&bvnaN8<%9O<kEgAC(R_yu
zPN4fqOc9m;R^qsD`#^`>aXX0agyf);;7M>D6Ycn1E1WNXIcIFo;*fYE>hy5(`XSkT
zeDtJ8txQ^>?&)}$T1rA{FYV4`DzV;1Yf7lP`p9T-lh?Xaj(7n*??tgvfg%ga7c;q3
zIJ;oc!i<k_lEn8r-t}a}8^YvEHQE7L8p!T?V<N~iXv?>gH^v$*&?*<=z`zA*g|s+T
z*Rg@tgcgs5F3-L8s1EqaZ_9}vRuk_L(_uYct*c&#yFUTb0=@9QxMP<WROu%mQ+c6b
z^s+_%{$Bs4x1rMO;H&j*i2fO>8<_CnGWaK;z2qlA;}Re9egYlda+ZGZrEzdN8Ci+c
zqxX8}p}AF6f)`vgNSiC}9C8{ufvX(0I~k`nmmtf-F%zAG4EnHAIeASylce1B!!N<_
z`@BM)+-uz-@Z0V;Q;)xGy|u>>EdSQ!g=byNGp;_`xR$|_V-gdWtmfoWnlWw_6!ERM
z)}y0HqE$a_lr7WGlc2|9Y{+^t_^(u{KReZbcB(!7<&$Ky$n$2%Z^z>i=;Qt)nf*$z
zzu|BQM7k83k)mZUXLn9DT)!wIa<x)v=-Hf<)zD1%7?Ib7C95_&WS^jyIB1U3!fQH%
zppqx^7UjoS2SBaTsg9^+QNcLJ)8Hn~v-lGJK*mjjm5E2doN%-4n};^T&c}Nru}&&N
zx~g>6W@@E+x|Npk;hziFA@OG+heS!8tOs;}g+>J=epiNyu%`)jChDWguC*QWk<$mp
zV9ow@3sa2sLBW%d&a3X6?bR+rG&$D!uQBW_DOLloqEDPq7cR`^zMieC#X8iq?G_Ts
z#-Hntou)ff(ZrrweY4cj`lwFZ&})C<f<X$kDxANlJ0L{!=%*Y1DqGEx^HutdbM!hj
zZ&m(>FI5rrPl&w3t?1i32vDS8W!z(_&r)}SnymVsnA@r2Gw)2NX6Tvg&aPj5PuPtf
zWQpv#qBeI@NM(tZ60UlXayrJ)qe1#1s8@Zk?;8o^lj<EHAbZicF-Q0$HQem`d~EFW
z_}%RVFYB3a1FS%bV9PG*P;pjBoAmqVCy%vdiN18VvT9LB_T6j^xtwow!{Pi2<TviK
zFDMt>OtWxo%~ENIL@6R|;|sqXvr>Iv=j*0T?kk@BlyXg$BX@Av?8EC5dP2v>?Kbn(
z9$d<DZHwMHJuBJ;{%kekU6BNOe|Fg_*rA~xGl?|ROzG;jnDLRBt=}ABQ#V>b;84C2
zpJ2TXaLU@3o_<*{i-vZh<?WJaBroqav5)M3%_7V{SWQ>Zx^scpd6$q$W;};8v3Pci
z@X1qJ0e_8ct$8Patq)_-B(z>J#Z7Qo6KY?`$AEvRork}@Tc--DU&T<8wL^$iRl%1O
z!Rj=i7xjJtfL&a?>0^2)T9|Iz5=D{oA{d(*ImBq3AvxC|KWVZa!#>I%i$TsXwFdz0
z!G?auF#LCxtzaAQ9%0tLb?-aMPe9;4HU@E|RYmU<0(t|Az(tN%PBN%Q?PO~>Q3B5(
z#*gl4(Go~f>u~RaNH)xlLeyrIpy1`~<E2Uv*-K)p4SkRAZ&9!TWC)76Q<5|^Q1N4n
z!EdLwYoqa@4isuBw)S$Yw=As3tmyn{*C)dXLW)j0`6*TC3yId2_pS-7^B=RzW~mS&
zn@@l$JB1p3^l|7GgzB%Aq@%Du&rudu7k`x!A^u2j!>cdZijZyV43%j%%2OTBc~p9+
zIlwXAc1c|1z<m07*)dRqVRtk5yCHkHN?gH#D+hr+k6isE#oKx??V&~fm5Bxm-j4RG
zx{!=X(5L<xe%8#IBF({tAd07-d6g!?-kk>+eI~CfG0}+@Uq$d%%u5hH4t)!p-ID3C
z%ux56EF)m_0BclDrq(71sOHwsI8>gbHfz_|Ni!_y^O;nC-g6DiHkQiMHR=5}DrttS
z76KWtc*-MP$CW;nox?TlWJWh+>XZ~A3Rp=yRM@k_WSV|_#|`l)^JoU3d^*}k&^<`B
z$s@Zl<C<SEO?H8>KpKFrnHfM$3)SdzeEs|fEs18@AxoYHIbn7$AFrgdDPjRIqajaR
zTfkU=x?9jHe``~S)PY(}g7rz$Qz_dt$pIetw%x2Po6tm~Y|js-FT28_CVWLFqX&(-
zg#3C$Lz3T>cIFb%V);$pv#cxF7(=p4Xr6;&{rXaT3zLmBf(s=P53Uo}-~9w^te&lX
zM;f^Qf=GM}v1VK<Fh(f(cTU|uepxVI?+5Q3x_Wq5w$l`%{A+I|f?1iRhdWzbbu4ZT
z%k<nDmm1HaB-K+B(xGUAPs}FkHec#b!W9lF@!pT)Z@)ASDf0iM@|__BdG^_-=w|>y
z;AH`DY4zMs5n>C02lP>(mzzI={=cz~e=9ot-G)&y*!wd1-_&h&2tRtfP#NgCNUQ~k
z#PGy)KvN85-#9w%kz$hR2la%?B=Sy;Mf=y_afIfstqkSHdfNj_pRhK{y&qa=R0N9T
zDq}wn4x>ff{}*G_->u?b{MrB2?edz4_sr}`BVO3;ap@jHD9m-xSi7D1D5&+aQ<ZCq
zr&P5epUYz`qVhlNQWl$I<>O#&yh8BCoHH?W_ioLxC=CF%XmP782C32(K`waxfl0HO
z2Hdh@W&U^yK~Yjka#<r;0}AXThA(7=L0+%#emg!Dv;X7c_&@)REG`JvTjAoQCPHmQ
zI&Y&<G$VGS_=W=1_SJ2lnoyP+Rxl=t+}}aV%GQ-_8eWFLf_$^fgYs8ILnw=IcmU1c
zLQh_LKQXnK`2@gPT&nDFK3+w#03z*TG9tG9Z9@6y3;Ac63xlmM1@rE9(P#Hszp}yW
ztW4O^V(rB`<s4Wcg-_3fUVD6%<^|dl-i~MZ*qD5l)|JQT9l~y+kwi#%H@M!S*@^F#
zX>p#I!BtGYf)HN$NeR<p0RT{!hz^8h04k#5=idRvzjmhIhJG>gZ$x3Y87MTOTsFux
z$xWn`(CMoT{amsY691@UfYE^KoiZNB03A=rBYNOR1jI+@PJoD3>f?I(A3cWETAS``
znPEx9*q@Ezs-PCu`ZtQh+xY0~tKkqLMw9u*EY_6#r3AQn*&HIKMzmk7V+v`PDxm4+
z#TaWD>@F?fRITP9RV((M>ywy`CtA-}kEfzy60Z`1t<jwpFO6!ut{vA+fgd)phM&v}
zzN3|y2$oqliV3B61SAh22y4iS>o%_TS89)%L2yJjOOA;HI0+J%hZnF7qZcZRF~gZp
z1Xt2yLW8X6=N#7hrnt+W#rWyk9?%_l7$6Yh%VA9~8crS8^(R$!$NSM7m8VtQS${bS
zwe+#ltKn|lv;AtET;a6{DxH|+I+Ff?_u&kZdB8vX4RkQWE(hK9CT&)^B35r~8y<_u
zhuuEz4w-(O$QInZD2?V~W}wZw9^Nz)`pHSNl=V_+(_qTw1Ct4Pi?Mq0<1$@d9TKJ~
z87W~NT^hKBrITLONq+OWYUJ{(;VJeIDw_c2u1W$Unww2%O(YXGx5J>0lNgwS>Dh@m
zP;L$!Z|O@QnL9_V6SqxZ7lgnf5@U{2oNde_L*^U^;F0kB_>wQKgT&XP3#bD3bMsS_
z&S%S+XrzqMwPM(7FN4NXdH~CYbB{2<3P>JcQt(CtY{WZJ3PB}d>&J~xHF(e5YbxbB
zM1`s?a8PvGbTsI)?alD6YuNSBuJ%Uw=)-W5`U>>i3P<Tq>BmEcQd04~`|+sUFEEL+
zf5tq+{hsv0F4M0LCf;*OZw1kBy&F_}lxaDum=05%D@R_8txA!g*lSD+fkX%NyWG+H
zB<prp81E4Q+fP6(3&n};t-EApM6W#dD<K+8rD#q^KY8Jhzr)Gse^c`1S8-T%Z?j+Q
z`MP(5b90$!I1{AOa!YtIG?QTz80qLffwtQbcxq4n;<hd=?5TE3;Fgcnb3IhD$j@2W
z!i?2N9BsV2R7dc8)x2?e%)<xZPr#zM{zSNz@`ENOmLtj*)M-I>+KZ|HhNdrQ_|2pF
zP2q1u5l;uIqqa8}<k>DxC)<&=WgjwswX6GeN!@E^^=CrJf9R{Z?SH<nF&7%;@X&lg
zRjqvSrb7R<frlnpA)n+bxeb%&t0U>vcC+kjYY(<k$Uf$aTmK@_R=xCk5h1iSp-=5f
z)c+&(cC$1Nb(u;xVeK(5$d?*2cKW6nH+nA=D};{f({B9@8SN-;^(L*a5=A_2d|uUE
z#Z9!Ap7o|0cut$b5`{er#~w^Y503CUCyqEgnf7>Geyv$01QpTux2Y;dPn;|LlGnBi
zf{}(yDuHd}^2?ffs9w)?O^ecA+?Bqtn6_uTGsSC9?-`vP$#EaWgwI5TRuf<JS9tMz
z4SbTxDBb^L3y0VLlnwc}WJ7*qprEKVFuT3pS4A#Qk(+D8wN#33iv7W^jQUcfkkJvF
zKUAGE#=sWmDb8-7hKzLtHW88)%-2xzJ=i;@#!B)DZ3yBpMLMJkBRjcRVK-iFY_LW#
z85K911}Q&S4nPU`h(Ou_flDF!n>DGz%8a$tCc4LQ37inDx<q+38&YxccN-#%=8Ulp
z{26YeBdGUq^RaMn9|7*AHPzgL@?NUfS=BAa+c|}bl%c#Rl`4-@7-U@euvU+6&RSgX
zI)vFl2ClvkVfdm48;d)Wg-L5lOnkZ<i~!`)Sa3|~=<5AWS>)?km_~mA>jRdrfi>Wr
z4}{7m{s^OvTzi&6q@$j0SMQzH5h^W-ZO~h_5UK5#hC{c{)*6VVk`KuQd^TdG05q6T
z?|mAK;a=Ny+(%$XF09%2NLKax7vPQF-g^gR*Y+mvsZ3J+1>)Tw7!?QM=bus3FU4<a
zkxFiJJhTv|?#J570J{&{OKRSAJU^O0L6cunwZ~axzCA`*BwTh`hMjZG!^hI9L<c5O
zUXfGehz6oxd`^IzZor#B3;*C-5q-O?`GHI7HIb7mL+*UGg|!Q1opbg-0g>G7(wB;n
z1lOB)v$Lx2Cn(|+HYjjpdU%HHu!5tqrrE<j?fMAkBD{(;wPx8Nid#dCaeI~bctMhn
zx?ahu9<04H_%&RTzQ;DMS?4*>(fOSOR~oF9P2A=5y8~|o=XnXv5GhW>czHbVIcLxw
zt~_zPeXJsNXfGr+)3*?f1xJ+9M~vp3i<t3q_6Y#`F_26n+;*B!t|nd<!3P$XC!*$H
zZa_Z_YD!{)fsq>z<Y~tO89>8s)A3qO@w)nB=J`YprfEbCrTpUsit~p+W&i!;em-C$
zt63Iw0Qu$llVA3geg5>8O5Omg4gf$Rl5rqPWvBC%|J$BdAz@0bF;au9VPU<f4n-5;
zyegBpFSGE|*W=@>tJQP}#({_ih{O@MPo6j<rQf3Xxk&`vIfGPN1M72hr-2u%4Rq^E
z+fZ5EcjDLcBRcw9JDZibKLHLei4QC7NgHjCY{`gKrw=WT2CkP%!7nWP>lzPqAnkmE
zq_JHkQAffkFI#~KE4#DYl5S2byN{P<R9Fe7y(fPC{}p%i|L{yWdO28^2=o4fLxdbx
z13*uDLyw8Y^vn)`1jagfZC%!IaSSP024In1E+g+DS`q_NJ}#5|<5TxvKLM#2Y1E7C
z&iRPEwJVX06v5bL7P!THz2?dSb+hX778gDyf7)<NhEV^l(&{_f)ZZTGfAYou*Ym%v
zsle6af-*|%#o#0eiCAe6X(gkWCwUu)-W9Mi1XyQ&?mc_qe2jfkg)&-mM>LCK)%*zQ
z<5maKH(}7?fBkCx*K0uz9qgXp?j*)U7+?tUK89GZ9zjaxkye&m5pNttE8in^AXO^-
zlb81|PiXc^6`VbU->CQ2U+*ETwKl)G%$dogO36~*RQ@cn<JkLfmM1Jl^LPC<T$JUw
zNK@u-`#N8qHW^Y6Lr}_8*)+MC7b#5+q7=B%CsdI!lDca1=e<wBB@Yq6P#E#n9vg!Z
z^Jqp@uH#B1s~cnQL>ont^TVbC>>j$ft0y(YD;v)*mh^0zXUG|1!KK<rg5)HY$dD4B
z-C?9JJ}{3$D)&7ggT`^-l!|D$**{worivNq9?0~bKY`$-$qqm&o&p|fN`;DF+}h6q
z&fr#R=(Gr=4KQ#~JK5+27;Z`)@{#YD)K8%G>Iud}X27kpXCC0Wb=xNN5uo{r%`CqN
ziA2QO>MRoQrOF3HzQeiYoAe(bl%-5gfF;T0xy0E`dLF`a!o$t(Gnact*W)YM+#5ul
z|2ZBCxWN84V_VfCsP5v0`evJ+YW&hhOn!x!<c4|L-V(GfBe&qX3x0bvkBL<ri(jo^
zoDC1eV6Rp4N^SC|E*6z;6i9r=HLl-k+RSv=e7BH3=!zmyl5M(JZrt`zD6aJhY;8(j
zq=&NaEotWy;U?KX?i-_z+;mmGacvoUmV&fzXYU&9Js&Z=-Ce<wBWZjMpDY7RCX{Az
z%$GHc9T1PsMM!PRb`Gsp$_5g+37PmRlN49bN^0lNyvr7*CM%4046_=naz6UDm4&*a
ztugDSez7sb6FO@Deo%uzI+X6^Y~uQBwT(-I1=+Q=JlUuAos>7SwDOpQEJW9WtJeb(
z@8-oBeH*+Htj-C?$^Fl^V%Cnjnnl@7NE%L{TE}h?HmY%*mrX3UlE_5V+@zh;;;Z%Z
zy<){qWw#5$pr#SPDnsT=WlQACiG(~`xnkQ?klJ?~RvIgC<VIFTZHzf~&Gf=I1nL7K
zS7;;UZHZ5KN<h59vG1mL^*ZG?Ty9XsFXRQ<>C7?}7=sVv?#gCmH0UQ_YxMI1Oq>hk
zwqcnly{sf}@;c!HERp6dxHk9^Mvk)0&lqd36aw$^S7dO)rL^+YUFujj4$U3}<*3Su
zBYnGm0x*Ag$hqm5<vGP)QbOs#Ok%oB3uQk6YuiwZVuIgFP=A`nGr^p02|2pKErETr
zi1sX@3Kx)ov%mCcRfgrT%f$!hOZDMbZKY4VxY>^*n>A-U;+y+CopG~hmb11mLa=cW
zOW2U4*UGbnFt0rFwRYIn0}k)kjql%1<q2&>9iu3Cdh~isE|{M1dj6<8-Q*{L)bRc;
zfWP(oRr`i6(Tg?cR%$ZjR~zA3{MgK(fd9R7);)aSyR9b@!j*G(DlrxPg^vcVO>lln
zOE(q0>sy;naft|C!+g}nbdk*VYanvCt>ZYdK`V}LYG=>rx9WiSoKnO)s}VEEV*uOZ
z%qz6;hkh70_yGCI<jT$k*6Bp-Nub;<jbVic>9o~4TJOHlS3}-yc&)(n{h37i^385K
zv&E80%Wp~wB46N54!nq8(vd*OHHFXpZSX^CxIwv&mod@lPXL<PQiIy-0!s2am^fUB
z*nw`T5C=^^SeEZ*RK}%;4rcOq@<c2`ND`}UHHp8Cal$JX`rqapl?qmb{BLULNGG;0
zUe|qaTRD3`j$bY~rkQq48jBqG7SL+?8qSTj)xO@hybs&u9y+?!3J7YZXtzwhJng;9
z$L+rv=^^;ljrr4j{@*p9|7$Dj_r&x6WMUGbwh%~ifgaxO#|;TWDT2UMa$1ct<YW24
zj%RUU+9gl5c5K@2uQ_}BH<qGFc=v{8g6|5<giu&%hv&a2I;-Z+Me5A%5h0qVU$kYE
zRle-l5#jP6ePW+VN@jJ6aY|px@RfbB(abeMVM_Nov^F5IfPq0oQYzJuI=?)&?$CV-
zW<i_6ZcU%;hP5TF%E1AxO-PJ@q%mmaAwQ)bU^m~f61O(vC{H%usW;wy(gmtV>`sVa
zu1WTFPBCGiepUr0c>dZVIBjsSMPb2M-Gza6Wo-tv*pQx)kp{<72{zAx5uBG*iF;I8
zZ_ddy%)}jJevae9qbEC8+nhqxyp8@;RAkv^)>YC;B?k^$#l#)UF-*qx^Kf&sBym*N
zubu|JT*|Prmv{@WIt@-DvuU54?+6{C?qss%{e9B^T@+8~K9JN6!ufxe_n}9l3uqsx
z?ya8cKQux{aQm$i`|E#*@WZYmEB)PhqJvxCjrTe;W|W1{^bmP#Ss}*4A6&9u5eJPR
z@j>cUV6~6nV?A8tvp;z6$Ow~%?NN}D>HV43y&u@4KZ;HnYN^he)_ww9l`?6b{~g5$
z73p1vGsZ0dW0gesksCLpCViRzZNS|ZZ~p~X&8F$vx~zh5`OkYY(~DkC<_6M(FN|<>
z)i)~4xWd8ZXhAh*l>yXecgoqa$?qPsNm6!`t(!2sjj%Z?daH=!6UA#hn)b30hk>*H
zoG`mkhcH|%C=s-IZyL`$*w|$j@R`!NVU!~CV5cCTIR-v>KU*3W1CXmOrhN<*j*<EU
zv<bSqJT<uCe~1@wzM~!YKw!oFpTi+3R`HD=x2iJ}1jUU0_zB>vNv<EVvaq5_fE}_G
zcmuGxI!Jv+1MVae?mGD1^MGy=DU`U!z`8@W=)I1yU^Ewg@FsJEr2Gdd^g>27>(%20
zx2^W1V}W=F%pi<lJpd-w5P10%u;e_bz=Z$>xL~4xM~S#A>ELgdFpXnj;5sBqc@qxN
zW~!f2153GGpKuY*o@(JtS|Un{iG+lh@kt%$9(vr9FH4$uS-3(k-p7Xr*X(lv2FA;@
zCABZ@`<Pfyqdt)gyaUIuD%M!P<_n85YI>w4U~2wamCMw2A(ls7lKmNN@}}GcSU>;b
zrb*_$j^xxjd2qa9Tkh^V&zX!~!UfYHDMdZ)r#>}ZSR4yXM0miMR1LkCnAmjl>1}pr
zjfSvzNF?bSFiUd`>T8BLkqfI96)NrgP<5-6&_)yylSwB{`^9U>U4?sD&m4N8af-W>
z&D!YDPXH6Y&*bCubG&fJL^lg2sU&4t=KK>i3UDcNU8ExgAD{L2*Fn{ClPrUabBT47
z#_N!4k|~KSN<W{k-<wg)3X7(BKkPSYk5_Mj)_R?uwR17mVK@OJS-=}saK4J%8fkmu
zt$+Xf8pjRSC3R7>RVK<k;9Wy0WCWplxU_Mdu(Rvk!r*LX4Jhr>thRlPRpTlN7gDgE
zj4pMrS3RP_6Tb9CmuzWZw~i}k<jS4Qi*5{P|05{a8^r?*H&{4ExbPO{M*#b|;}H<@
zucbsjt7*MFII~?=97(mz-%nXh9t=1iZK*)_mV(by%_7;{G+Qr|QNbdn-fxg>;SrlU
z9QLw4tuSm9tKpiDjdN-6ByI$sZmO3w1inNEKfh^hX(0}5l-b|E$X%EKwWd|ykmH@{
z#8mfFH{ms&Y@WHHJE}J7e=RKbfxrH!()QaP5+M<(@`)pEvlFCVB3^nY`>w~Ri5Z51
zbIGUsQO5@r$)7wlupWShYviDsDjz1NsJkNu5jaLP2E(PKM^@8&oY<@T-jn?3R;F%T
zzaM?+wr}vpwnWNnkYW`h^X@j{wy0@z-QhmVVr$5m4Q%2_5asL(tr+>mu<`6VlV^R<
z#lDD#j}gK{79mqkMS>gJ&9UeW+U}@FIC;I5#Pw!m3DqNz6kKa?Z&OCTglY=Kj9>Hs
z*}8}9sk=IiL;n{=JjKIjjohY=-l=3_x=&)o-E^OO_r3q0#P&b^r2aKOso#o#KT;W`
zkigw+Tcl8)yq?h0+6-Z7|NltGXv(SgD6g$c-BK2v;!vFH+Qc)3T-MsNX@u=35xs$A
zSeZ==aE(fY&!_31YL5h2n(g8@&$*&NwFVV%r3D4eJ)haIS<aVVpVcHrJWFPvmL2&d
zxJ-;|5isGZm+s#&-7`5_r)(Fd)RM>2FV43yGCtCO2({tu&)+e>2<+K$jbK@!AIN*l
z$D@+?q=#?A11838wEro>&lYcsv2LkkXoC!N9VwU^6OC2gu`oeou;EwEd`pvrdD9hV
zpd75`AVxi-rHU+<>HT$QMtw>EQz_5&raw~7xkV>W9Z<}5@#@?lE@oam&m%>QWiViH
zG~fKOUy+`3nfZ8#sx7ZncK7FVEmpgnz`~q3+?j1d=o~$SIk~1LM&SD+cmYow+$skE
z2&C05X#=wrrRzB9nA?aBowXMA4Cxzbq0BH-Rvzx5>+&lBD692U$NzusJ(A#M1W1G3
z(zo6YgcWQAh|RorFiG?nN>H_$aBDS5dN!vu3XQw&n;GTpPz$yKLInKX<yd>#7I$Ws
zvh#0#%AC4{PcB-g-o2}OF5N_4GL6L=k<`S=UY}?et5araQYNvaR7PREf`$-VEy)_H
z``$!B5?QAR_k+nxr!t?HZ#Zu&o$wxYO>t|RBT8)l^)&xyDxJAt!N5uM_;zjOhtj1X
zr7Y4jK`pWxL3fGP<8*7~Nz)Nir6p#d*M;RkJH}SY=j6oIgut_#xMB6Ztq51AD*+dL
zxn`Byt?gY2#oroOh0(a09M@ll^F|oY@$=NGouq3AQ!b6fOJKk-ZY$LY3r-L$W4xvL
z60l0ik5sITBG7UG2?`_|?kyEM6SW%PHy`Geb9BJNYpUmYceL1&5#cmyYgZ;Wad-bX
z(e_J|1uM6e%~`=vKXiL{dQ)ZMB0ZsUzt4gJs#viXW1~CrDh_npMolP^TP()M6F`q-
z7oTh*gWMR8L@3u(fB;~HlTYKQs@*hs=r<B&w@Hz|bgZV6OHOyrByhcPW6v*^LVd0f
zvj_ntIJup0Pu*qchPgHlL{iO@%a{#Q<-U^356=q9J$jzw0vkv*hTy64v06|cB!HTn
z_!N>3*i_}qcKBib$HO}m<~6!u&wM4J?Z*6(pEo-AbTb?)PaG?o19snH>}SiX*d^7^
zq9RnHc()g1(EkM7k;7>+b{0-THr36#OoA4-$DpTnuJP@#$5q$b37CQ&XBCMUK=e(U
zA-myV(qI(istF*vMJT3y&*Fhq{?N%&)u`rAlay5xF_#31$BlXGDTLHbAp=9L3)JJ1
z3W}6i-Vf>SISUn=;V71;vqfEkuljFXo%j^tPw{a034mc#ZFFAIw6dCW^=of~xa~I$
z>p6wqAlsPS5yMqs+^l?7vvr@lz1Dt8hhGS0Pv$c;-e9s#jTJ+#M#jzi;rR~HM+Q-m
zN5&&PHD~K5pz4fl?=p6K1tSe=)zftY-%0yVK-M&kg}@cxhSPn7bxHD2pN_{kUQCs7
z4$3FPREt1n2rtAW)q$oJ*-G|JSfqLSCKjb}gWsw>Nw4N$0gCD%7)5c=KVH``RexeM
z&Xbq?-Imj$>_&o1=k#t380+>?qrh4zZ*Q~bG>%YqVxqll?S4%o)RWO9Q(_doIUHrw
z5@DTx8&f^<6>%8U9g)~(Dd3#`!uceSp<7z)vqpd0mI)qrofmZBx_<TbKKA;piQ2?X
z&xzZ{mx0~6`Iz?c>bjc7q$b*FIoV0wO9yTBQq}x#ek|3Rad_T_lvDkwE|g<m<97Mu
z-IJmYq+fK1<cpr)WHSZ<;yk!#yXPp%`D=C-D00rG(Ih@?e>8r(^em5%6Hj&l)~(;}
z!KJ5iH#WyMdtM9;`z(tW`J+i*%Fx`fgi+Nn+(E~XY$7Z*Rh9l-0ii-j$jGjIpX84x
zFfbMlrnaXJiglr2K^V_bg*`WV{b`F@SN-0xm()tDP1_-H?beF{2KqXJ^am20rHtwD
z`Iuq45lNtq*suKcC&u+B#&tK{KHGOa8u?2ORc*{>5l!5EL1KT^yqVxIjc%*%VcgjK
z`t%nZfb34Bey$e-*ArRTCHD-uH_LH{vnnn_jEYwEMF)webM;Rp^&`rM+4lI#_1|<&
z!MzdFJK>TxQ$-m(Dz`njjnr4a^vt`|V+wCapPp3j`s@YYuHl?;+~fmQ;Zrb8VnPVg
ztG!crfWHt7vX5pfI1@`K;WCZJCu0{lWn(<ss?h?T1xI$M^?S3JtN3qLC1yA&<|c06
zk3CpEj3+URt&_d{e;MN%@&54=d6oD8K(V^X7i6`2e9oAnNxzZOdP&zg$ekAI_OyR;
zShCjy60`Kp(=*nnXBY5w69Aw;5b&SPX^2>>hoA_d0B@)0=#>f>B(<=bQ%-=3r`o4&
z`;7yQd2)<7inDI2xhJ)*_SNn31S9WdJ91jxcN^;MXb6&(#wQ^z#6=yL4j}_cnV}?9
zE7<goDeZn$v(zh2d!(XY%D1%Qihx01*^SNNxI1s<805ifEWXUqb4HEzwUeEi-vLB&
zV271Zqw7Vlx?Wv=K62xzItM<LNp*okmCA<Pz__e<{$%dBwfUG^wNV4LQl{Q!G%B)8
zrbt{%$Ri3VVJy-RG6ya!dN=I@&bT!6WZa$H97YS1kO;L9i!Y$TF$*RaduD^m*$?l;
zUlmBP2jD-YzLgqal%}ccrEcZPD9mX%KEh<_inu)x6zU);5*qua%|C8cQjj8B(7N%!
z%}P^;JKvwXaDCWlJ8)Yu`74~U`l+Z2XEN(4p1uD@)=Kw)pv&>MVAXH;^lFfL4y)-;
zK;>G6+rMid5&5qeNQnRE1`?P5vVp|kLv`5K|7&_&ZW*F@Vk>)h{`Y*ZhjLHz0<FE8
zs4|WLo7U&jce{-NM@_YGBf+<&SC?T1_64QGn?C_5de$41P1V%$(rp3eHyiN};iUSB
z@*BF6pm!EHlC)=wj4&D}sLJA1Mxph3fz^f3E)3#zi-yt)J}OEg)~ew8`fICrypo_<
zqvmyy0vFaVioA^X1l1@dA0vJOOkd2vg(_oz0$8jkf{mJubrSRsHrty8TU4e-dEt5`
z<9=7MQIWt~>s?C{CacKSqLYbRZAarD<||IMo^-1gB~{QS0!4P!U#KVUFX~t%9_iZ*
zw$#h2eBKAYht`eR^Hn@H!5)tq&aF9$wFP~S&YMb#J7{l@Ak5SdQSFf8dRfU2uDX*m
zPyb#4wX_ADEutytPH5#K$s)HshPeGQI`M2xE$$~ELG(N6yX#BROJ&E5zMWf<nbEID
z`oN)GwwK2!jrwU%?%2Za1Gm$4@0zG!OmO9C_^w~8OXxFS5S*_Do)Ju?-x3K~&PSdO
zldFk8*^j#q+nX8snxQ69j4|#U2t{2A<9~itG5@*kmk1@-EmDu*in+p}dO=xKWariB
z?4RREe+p;b1uW=+O<E82yd(aFail+hi}#*x*Hs>5UQ+2To;YTL-ig0u`QJarFW|Ys
zFj~^M<Zd~n$2hid4;S`4?e%&u<-JvithkvW`nccE>9hwu5QtRYK`~1VwEB*6h)Mp8
z!Xsk(j<UI3&?>hamY}CTB327GKB#eH_qrQpa)Dc(p;;TbTa@tW;eTf-D@Y|<E-}YZ
zC|9p7T>*-+mc?4Xr3W`wE3dw!1P!xj#P&(Qu>-3%roTF%#d{PWvp}rt(CVzv^;rgQ
zBbTF2!oQ-*WP3b04zwQIB&TQ#x}PpROyJ%?lA8(XGT#gXYROB>LmLX@n;br?C@H3{
z#KpvA(dVa~SeT|zJ1l*%(&brY(#D?;ej4L%ykKmM`@HbR(_{O~_^F*77gDqPeP2*v
zQ?_A<S#E|#w@kDBO>5@PXHo*cZ_);iE03yFy=n``=dGIOsB_3!D!Oy3&C4s4r_wO}
zJL-#94Q1WRZDe_;?B+pMM`23skJ9C~+tP;6Dt+Ed1i|V%X_pZ{VF(Li@Ykbm8FMW{
z6P@RM<(CI$z*5rN{j8)h(%_NjDYXX4vu4JPbDL8qPCJ~I6m9pJNGtF#Ecg?!(@Jym
zK4)E)l}pDe9`0O3SskAPDtpnIwy14=!&9vte_*Z1MBNzw#YytCB&0}DSfeFpJNS!$
ztNa~e(`U3D_;v1&a{z~2{x`Cc_$xE~7dVXizX;A?U>VIiN6kJue-MQWI*ZXP+dSBk
zYoNUGD%Rgp9pd(zsz_BY?4kN*!9837qx_Jb=<#goe30p$h_{`!eW3aNV_o1@i+Yj5
zy~s}h4fna%)Y8{V&pIk93)P;9-wLsErK8HLK7Y9r-ACCDsyA##m>N`vYebhbCtg=8
z1%gTyca0SXqZN3Z_9N{Z4q1ZK>w-eNs1U<LbNAYZ$*Il-iB&hk^73A|`q%sJ-_*bU
zBq6KuUXCr8EDE=Hc3lU&DfjupN7*9$-Q@tYylAzj^}=H0S3<@8m5=`<A$zIru)XT>
zSj@WIu9!~d9zF7M&X-E3IpeQtH5dB{;E2E4-5-CbY1%mD7tqnXeY(%~P^Oi18ZY1S
zNjgS8O#W=2(}gjz<tT%RwUDeKqm5mNO1{`+L#JqIFjx5qw~g6DQ33HUloK}ekW)?9
zYAT@%eHQXhybCz#{+c*5yIv?SOMtE34-|%i)E@Rqq-jF6vgDhiFou}#dhb>-?OG52
zeG>9x>RoAdg(j<La5m|RfT)S4ba-+Fg#8=-Gui#-$A!gBbV?e=<;}XZZfQ*c3?GU&
zac>GBqW}eb+dwIkSkj&GgUOP{+v4XlRcxY$(#1U~;8@dmgKAS-vCg+JY2g^T=Sl|-
zA>FE_u68|QMgv(sKM>F>Nhf58ege9h@>>p|hR7DiVe;|&4Qh$}J<KB@Ut*$bfx>&W
zAdV)gHL9%x@rqAsy0^H<1J>W(PdP-Zyw9ecMS~UUimooO8d&6TY)T$7)pHg?@mx1>
z-C(y{jJWqZc+bCML}y6%6()^+>?_97VFn2EIAH_WM`cN=H29QVh?FtAwXWT;jY0VB
zi2-jfdFL!GwG%d~2X;d74oZw`hy&KF88D`xxmp;ihjAp7F8FDn4%8PP9=o469+7rD
z0-^gkgHXr}=T^`I1I`|O*lJH97~7evkLO%QXh;7J)TP5%%)GGr&gYa#Q~Qk4s(zUQ
zeq%p^L|r*;fiVF@+kee4h!b0HFuz{NVH8G83yju7hXh6Wun+^S;W~c-ok(k$e68iy
z$99ieP(C}udOZ;ZHlvQnNRXm}0tL2DWKWrnFX*nJN_&pe6AQ$fHyD;GSQzix*19|z
z9z$Jj+6#=E-oToH5JncJxy_|m7uCiAkIsmT(>nqFp3SxTTc99-97Uk<A(e5>Oxoo7
zu%;i94(V%T(i0eO$1uJ=vt}vR=&#Qw`#;!w%doh%bX~NtkU+5D9!PKt!5u<ycZU!x
zSRuiofEWZ1PH=a3cXxLv+zWzJK*6n~S9h<qz0cWw&b|AtpT#WZQ)A9y-#cFEfrlM7
zZm5OSK#0C<8nNa^rF(CwJSZH)S(M)sn@#O2LqjwvF1PesGR9taDh^Lee-Z6a>ldYO
zrWoW{5p6+`_HcQwU)3<Ergp7zIl5!VfY?ap?S+epqC>|-RFwWPt4Mk<`3+Pdvz`sO
z8AqI!XyUqo9&<E34K&a2`t%*pYa(E_irj2oYh0hyKM^2C<I(*}5%iJDQI0Xi5tlb!
zqZC_OjPKe!cc=d9z|7WcTvoO7ZN#M@BU8O1%e;!W_7F|7&XVC^Pklhz{w}lTGef72
zD2I4iMqV#2_3GS<n&+9BF`g=sfv%QJ<%*FEqDc7$fe2;dDVQQZj#1HiG5VeJA={)I
z6<SNluw^ACbM5)4n>sq;#zP1k8}1mjLbQ2S`h$Yhr@Pp%xDYI-;|=7`Uf-@+0RX5v
zQrNe9_9hwHIxPDjGBp-MgdP4$72JV3H3^Nj%aujmSV;>!6F?IK`ERM5aqTMffQRS)
zworgirzesxW=ALY8cJ|KQnPp)B6&?wE2)d$G{&eg&ba^&0);R?;<a5XGEy>F`5oHm
z?>4CLd@CJuz~J}Dm@VVb+J~WQcF$-#MH}!$TS*^2{OV8c3vgWoH2G3LMWiad62wD!
z_t)MCN3t+zxm$(O$v+CX*o_#^a&HOz^uMuTEg$aa7_wTQMg4F=)Jy(Z{2Kt$@%5<m
zB$dtVcY7s3DnF*i*~?x*QIXDuly(0%<g12$ZKYT^;5X;*u$JBs31-YAbOKo7glbl(
zg(bOh5HQ;iDBx+_q*!(UfiHi!6SJbhi}>w!3McV9r-y<*Lu^qEt*@2f3BhnRT#p0x
z!5Y9wnY<OX1MUJPqSdiNf>`+*i3bqc`fLSb2Q$yh@4{?S7~Rq0sHa3!isx0%VD@O(
z_<k5{SJpR5_HPuI-*1}m*sBk4C;{)WF+EX%3;tE2YDGQ97&TyHu&=Mn(Hu-2pipBG
z)OHU@xupk?A@j_NiUMAzQ@HW`@V|YBc2tA2S?U1xr)*hU?U_ua78kZXHsKcnpxR?F
z?00lDuRHDvO3nXJezGdc7_oRwsm!ejvsJpT@L`wz>^cg4=P?L9rnu~OZ&Ff8;Y*V9
zDyxGB4OKAS3M3b$TNwrFQcmotaT%2)^X-CH&Np=UA!<h{8F}OqZuA~P>FKYPe;rma
z26fCpvF^U(#eV33c7ooJ4c+DWBx$`^#?@?gFw7tdLn#Dvw^RmDdl;N?Xw5K}oW0w-
zh1+X09-V)jrc6FI%o0CGzl{P<-2=3|-?m*(28R@Bkt+P!Qr5|}cdS$63-mcX2^iDb
zY5FGgCafjc8f?@Dr-%9_LS<V{{uK@Z$3}Ah3PPx9p*vFs9GKTPkPoCe<Kip-++fH6
zL0GxRDiZ-G&=F4(lad{!{bhqNLn_tN1B(R?Lr?^T(A23sTLB{F(4d#e;k3&O8sG_K
zG77!SuXcZy0OSv}j$iHPhIQOgTE^7NL8%(<0q(>;id>dTP*Xv%Ny-!lurrU&ZD>j*
zix)Hu<>FvC{R9i!Ga5Sj3n%an*aQ!x3y)X9V=Um-Uo=k6CnLr_e{oScD$BSS%Do2&
zj-8vn3GGvw9d7dK3ph9RNQKaS1+^Akb+oYs!m@*YGCnKpN(D_039>NHJv`=qVH<uh
z>2JT8=Y2K;QLgQA60T!FZa%!?;`IQ17ChUyO`B-$)7}S5IVZRy9^YVX5e?Trcz6(a
zjD)jYD_oxwL~=P-!uM0UrJVZnP~y+Pum8z`Uw@eZi8gbmdCnIs8p?i5r$0SAf`?2z
za@)^>)R-8(eAu3PmF1RZbF)NPsP*<LmoJo<-Smx3Zla2I!Lx1A6CPXH4D$^+>Ss*D
z9j4~muGx$|DU)}Z>)_8JpJKKTgI!&?Y7X<n_!|9u1rgQ~Uq<<bN;Kl3MX<Lo=H)%N
z=f!>R)8DlW;^lwO%@Y}t;B30HPE!<<yl|M%tSG$nz2Q4bb@!@cpTrZ!5;MbB!eKTW
zYtDKffvWW1vR)umOklXqP0R|&@jYr90vhnD_RWMQ)4beh<ZLvBHBbUumva<t-D}E*
z4JsrOpX3TY@A0Bn?TO1#SfQqw_g!(O;X?p9bvQY2b~~22BExe0IjZ_VMYc`WjBbf<
zpw{H^48v?>=_}p*s;i#1#Y2St3Oqel5)0AFRwmiVeP#{=4709?wK7GYk7%RgLW&n?
z1(U+cqrAz`bZV+Q7#?-Tf3=}z{$A{J`no|GoAT+tQP5faj{nV?@mY4iJ!eZ1PFWv3
zqXcf!P(f8&IkXuy!(RiccyinjHL~&a)n{EGt}+%4*zdJ8xE_JeH#-f<;emES9BOiS
z0}tYX&<6slW;o({C;(U2kTevhd4K9`F=ZX;;zX+7Bis>`yLfeQt0pS&zjXnnW)RIn
z<mKXLj>pGeCEXQk#f~$;i{0KB4_HR|2|hcJ#iQ!$oqxdA-guf<O4e%7?F*USJ-z&f
z-E=U57vw`7=Zd2%xyP8E;*pJvk0#xLm-EEgqf`F1ukFPmPez4;9#7qNY{q*gLgb0)
zlD1=`u<gVxf#TG=51k)oMOK+=Cb#d<&-vW0?5ao<Bh@$3gt5}>6i0X>_Uo*l<S<rN
z<L~k|&nCDpZ8}|nxR;~*(L+T2?*V*kFHX~`5olcO>9NS~0d1ZD!0E!*_nRsU{x%`R
zocT8V#R?Yhj+9tgZ=Y1#>O@pNT~f$-*H))mu3V|HSkkr`(Wi5YqZXmRHRNW+HpunC
z2LFX*Lb3Kh0*i<ll_Q<?2HDq<<|kiz`uUHZh=jjF8EuG;6ML=^h*~Buy(^|Ygl1!$
zX{9+0Gm8n#pd-i1z-Lw~LUkK%;;Z6!JW8L;LO0-XT9EA~9-lfUxhb*}UrkI)t<<vW
zsy|y=Rl|8@tDZ$;LFlU3muFrf+nG}>C0JqeNisa{1Hn*)48hPC$f-sQS%pDO;(4Sj
zJsMp4CpRgdh4?KM1pok`^~Fh#cMQwqC<d>SI8l+(cl9vo3DDL+pH!7g8h=6K{4w^0
zUJ}nz>{SST_t>XE8Q!e;J{{VnT5XunjB>((rdyo&8?ZZR>1(Nuc4kq4xb3Gm8`J_A
zhIaK4RfU(QQ1@DU)`=hLV1k6$aPSE%%k%FYjO&&7f2r%K)mvdp8Jh*T0B}~Hx!AS5
z0l4E);DFC&(M7%~CA@-me1v6lJzK5cy7gz0x@3q?0!IyeB_ojU?}=7VaE<l)j(N`L
zPx$nMI)fT+T&f|Cma?xP@mqsh`x8mX0o<B>$B0cC88nZu<cxksL32apa>tNLKBL?s
zwq{gLXt5Um_Y2<tv<rw6x`F2KTuH&4<q-C?%>L=_6BQn;;#cepi2O#4^1Z!1f&I<_
zOP(u)RLFs@Po<I5jRCA9&Xa7TZn0tHjo}Tbwl^Z@$|wKlfA~kxtA_{7Wvty8pI^yB
zR%Zo{;1vY+p0E3%AZiGL<?-9PmgbkzT~*zFyc>CAkw@DzRH6}}Rjj26i{<0~M{^%R
zt!n&B8lSh6AH(CGz7ksem-@v2-aPg1PI;*Ez5UxCO4*bnEQ5k5H#Jv|COCYYi05?O
z@s64w&lX7HJY!Y&SPNHYmp1W5_>ANL5HXAMy5Rm&v}|*198ZGytSNX)a+eWy_!yg!
zy7n2#TYO{bhqTDS&lx(GR8j>*{_@5SLgudWeO(P+!B7Lddks<o-=Y3?;mh1r_CM<a
zx=`duf>-HV%IEVYs<mW+DuG9YGgM=ozPhoZ*v|zT5Q(`{2Mv6a$`s4A@BnDYKL-oN
zey(A+TP$#B12{%$Bgy1&9EJn=Z|-Cn%?D-lKPX}AE})|y$__B@RQ{&J>eP?J^}d5;
zJj%Aa-Zh$%2e=5Ygn%_#ZlxF(VB0IZVc*WfFRV44S))FBz?#c?lfxP<?-IS<sG0?D
z0lNWjf%mC21b?G|{;CdstEe6b?`fgR?BSBr!HtHeWjbV-?7QXUmWZ;Q&&ym}{NH}s
zNLmOfCaNhLFCE$#hTzBe5JBj2zek9M`(j!lGC2VWlPPwo2l%zS>h{_8o+(zxAivt_
z16Di`t#@3%m5Yz5v&|m8$aIl!q@goQ>Xltk%6$HuC5#rIsN%9sp99!{!zgN|B<Dq&
z#>#?h!E3<6_AS92O7x_rT%&8|*@7(e=rp>`<#X-&##@7Kz88-G#X(Oe^N6J1GmElm
z1KJKl8b)wuJrX<v*wpJ^InbsRGwV^`fxG#qCx<163BDX#SHx6zuho`aMiA3co4nUo
z9+&NU3Ow-+86D<UIgzPh!hi0Os&Jv-f02ySGlMDLG*O3FZEODOLOmuV$;GH*?Sn#3
z@4jlUUrJrp^ol@DP@qc<)^z6s-;evQI#(uIAa?%eYB<~j7$1Aav#Va&*NHqZ*Jm|x
z7joS7?>+WcVws)R+4dC-a0P!t(^{o$c;m%#11WJq8ifJ!e-hM1=k(62MD~VmyBKl6
z;+tvv9NV3qVB)QFAxW<-XO@$0I04Oku;9m`M|D7+H^Vz0r9Am5?`F5{9G_1!;imgl
zO3mJ(na=pXlzIU;uYOgk`D|E58zR@Hs)ndo+c@XG?|H7X?ws{I+!S=7T^_-21qmv=
z=I_Rz2kg3z4jJi}w~S>Job2Bgv`nLGdwX_yL}N{|ov8U}!3>~5sQaofxvTVTIe%8(
zjGJueSixV_k7!0^=_HKOFGSBj3g5ApfPCog0ih)CHyQ5%1MznU*T-sFg8O$`vbbxC
z;_G**Mb*L=IRi=%1$~xDZ@RMhy#rU)FO%|cw!@#)tv}_7IXQn)xBdWdy#=P7$z`*s
zE3}Pl>swWA^VxPk7JQz;wEk=^bgqkd?Wij-P+JRB%tlJQpM&mfh!iHA7Kk(jphvi&
zKGVyJ3ua==d3Qi(@yyRcF})&2Wz2h%SSk<!&7E+9)u_m!_WRy9UtA{nWSy7(dv?-@
zz*3SkNQNz^rD)`4D^W3j$9pF<LnYaG$Nl>4EYj*H=q|%R(lw9GWTG>P=M7OFjtr$w
zv%@2SDhllMl+CEp^N!WWC?me<>_WB46pf}p9l4lJufobf`OJ_N*kDobxyhi6$TzXh
z$jf2<M+ri&Nyt?mNZ5!nprIn=gy2|O$d=<y5D{W`&Q7>8In603kvJYT<QSC6yvmRA
zc+b<+;VAne5apqSV9*;#1?EoR1A;Yr>gPzza<#s8dl$Yx5U??+YF@1GIj#@;20Z>(
z3b?;5dn}6Y&&c|k%MR__mzBxqXUU7=u$JA9uto!1{Mq&o@@<wKT&v3W4VM<z7IfZk
zynld1>gZ4LYzD9*5W3hXU{66Ne6FH?7#UQrI9@;EQLW^C>1haMI;^VUW5^R@V6@Fn
zA%$|BoK~e|Aaf1nlBN*pl=8M<M>DX9ZS$cNCaU4R4HzKV9Z{*w+NAyd8dKZTfF~rW
z;l(9qr#}+$TU4A~kEQIU9<g`99PcN1p&eDP;vF5@hgE_TqU`8}_ZBKFGbm3!6k;KK
z3jH`1GC9pc!w3war=uZb-b&L|RGxPLay<#OI&rO*Jv`dgQc@<d=(t$s`f8wK$-NUC
z0(Nf1++}k$2$TK3bx7<_O>Sn;E4W|y)~_%7!CtoL8dCq>T|iDMrwts+>(q?OpU$0#
zS}0gLkoBAoor+$Hv9;q%Qkd#0sK*SYs7mH8(pj^fva`xevREJ&YKFE8C~z+5GzdSC
z0el7Ed0is<+O^<N2!wrVYhIl*_%NYCW@Rv!!^)8RqkPHhRB>i~6T%MEV3c^CftP%U
zc@qNxubVjWEEr?d+4fAXuR_k>MHm_}EnmL^O-3AP9unMcY357+!hX>Sqd+(>q)KGq
z5z<-;2p~8p`FA}h+%-NadS~GKJ>Y<Me|at+@#6P*F?<2j5#+klh!Kh@R`PsGk3RPY
z@&M+iXwc&mtipzjWvNM2syAMrbndF(z#z7wE_;6%p4yEqCn}$Xq8|Nt`Qs0AR?HmG
zKgwWvVUekT@vdcpW&ky{mWkh5NWXl{<yw(Vt|%*Ue7V9D$D5F)3*!Z>p_|H~967E!
z5C}=glxNelH{bgN%>5A3L_*Y{Qt#<&`?Ul@ybxzTBY?_L<6<eC!;(1m&Cy)c={;{$
zEgOMDwNU-}Waa7RQj6mzA<LS{@Cqd3EgY_9i9n)OZR8h?H@04LSbo}GLEO(nFY>Cp
zY<3y=*48=!=XtrIlu$$+d84<sT-`ik9RcHKoeXDL@$VnLQmY{0GPCenGpW>r+plY<
zKtZAK6Bf(e^4_M@<6-79JC{1)^}6DOT{qR1#JUo{uac#TPgsSy_ApjSS}QeNl@e-o
z_L4SGcyT$Oszt55Vel1|G69ffBWxJc`vM5P+qRU!DI|!`Punf&CJ4tMixcu6`PFB9
z^rc^Ylc#dD6fvGntBw46{`$4}S4a&BqQO7OH{&Pi?u&|>$31}R9xy>2@q^cgH>?op
zd&?BtY5o>zyVcAv)1(Q`N>#$v%tiPgd4xur26v^p9jD7oleyk$J`oAmEqh0U+9JB)
z;Z^zTmgzrdCoKjyZ7<W`69)@%E{{Vz_n{)U_7f=#T6g+0mliXD@paoqC-urHPaG*J
z&rxj4E~|ufeXOijff>g_!82MWbxy45Z}WeShhH=g3PyehVI#gH6M1LYBbetv*?uRZ
zAt?C9+<K&3yr;g|^2_-facEswFlc_b8@Xs%{IoUe5gQ7YRHgFQRD2yb5+ibFoGYIW
z$jccSpXWS2N6*-HyNej^0nGki`9GZ=3LIwdi-b>Hd?`s`Uuj<YKKbi(yM-U;%82*|
zYfP{K>LfL<nfWuDQPi|Jv{~nXZNCd^hpj;9g427higG(%xG)%Jj^T!QrsO-jaABA_
zdFC-zEq)7;8_ls%<(SvcXh+)%)L>BiWZH6xk{%W1m*bRDaNZL<6pUUk@k8aHql{Yv
z$&a_jO)d8Hk~vsPrT)o@DN+2cb4=u(JBJl^PYp$kZ##%*MBB*24WGUhyhNQ*KQ=ME
z;oAO{z4J?#S?F3EJ^kJV-1ivkXi@2G!y4al{f2%f<w{Qx)?OCPhYc&S;<=Se6NZV#
z>E6m(f~P%9hS^1|IW7iLMbzKj131zy-OyGouSi7j&l4Me{%A%C6a=wffsN53k8atf
zzVjZYNR>tBi-WOpelVAyhy<`!Ob!QoYd+@M=Zn4L2n3IqSR7cWG+X%3PjX7kVceL`
zv?PmczQ|UBAE4C!oZtRpauIwpv$HQDOvZ0iT6&!f)vo}TZYf<6v+G|Jx(%f^+qZ!(
z!Y8X1x`afws_wd_hP{3z3sndC{m|vu+7q$B)tGJZ+7%Hhxrk)a8j1V)W|&sSTk_9h
z<4lB)lyA<o_7zCCuS6QvXq28@Q)n+&QNsf$WpABbEPZsA9HGe+#munJ7lii!%_4bd
zf6nrYtUkJKx@#Ab#%QT!H>K!Ubu4`)Fi}c)&J>4ZvHH>GIKvg15rJ@S?q4&)3H`d<
z3!8LsQf}B{!b+2X0VA&p1NP5FK7G@uF>Lk0E=zeLT$hJAoH9=vc4~1E)%W%n{nYR>
zYbEViW$j~?J-Oly6r;WgIOMt9gg^#EOL>{_N<^+$<T8s9QRQDO2fr_M5q<v+ef+<(
zJD8rsL4S*6JS$1T3vfYLxRZ{>LBL)V5GsXpLGas_;YW(;a|JbTLwJ@KtXII!!-P-4
z)x+%Nhn{!G;fl0#Ra6_aTa?9)csOs{aBJ1-0)`Fi_ddRfR7V5u)Njk+^_wK7ieQHK
ztX3*wHivf!#Tl>NBoy@PrDu=|70l6?j#~>>@WR+Kf5hXWs>!U=JycG^8fPl1>&sqC
zk9YM{nd{TNXqFCb!du@j;o8tEF_;U8DB3=X(x`qRk$D?PpY_2nu5cxI>4PSNSmxB~
z(=AOMsLE42qDF0EGt39n2DloPAP`fg)jn-_kaJXlYP)3CS`3M!=abpUY84sU7zLI&
zq6xK1MLOBt$C9;{+KSr6V8c*E4%N9!a+&5C))~R_&u`PGw~AwQ`rS}-5n6-_XgHJ4
zy%R5i*N^Kw=WhpYn-D(^{OVvo-^l-TPWz94s&CVu>(8I-&!6kh|F71c@p<y11Rrlh
z@gBQgq}y=Tw}JpcFF|MZN#KGUU4oX(4>Pr3Bd1BP633H;0DPUzb%_0*tbiOr!wgCr
zkt~qn$7JH5<45xvmJR~bP1|5aEL<EhWn!gg&x^Sk;s_XV!c3CT%ntKN@h$nIs|J)*
z$JC1}7168J#yPw(voy+RlBTMG>v+{AOR^WhStCW+=B9me7Ic3*Xc+RI+PE`|hclC;
z`l9BvUZfS1xe=OcwUu`Ix=yE5X<(>T5bxD=NX8t8m9or(!MV)Wq&yPT2dcCY&#BPh
zXBr5iNWKr|c(5d+(eQT1J_f&23whY(5KPctu^w@;H5T`xlEJw2%>L|c+G8PlmCx?0
z4|B6?t70x1i<T;)3pS7cR<aSW(O*-)2{VK%YXHUZh}ZwJ(i31Bi-imBW*uglD%4R9
zeqSWQcLn8c*0%-Bo-F#w7oy69<$v1-{M;~*qcUC-Z&2#EN;QV%V!mpHKB$1%wB5|l
z`$8t+m*JbH-*y>g?minC4?XE|SIkyLjb#7!cjf=qElS=`&Nh;n-Sp%=pn&HTFINhL
z72{txl7e&OWsxv>PJH@(vSxrZ1(xu#q@!+co;K?$D-9-`#i4NxT~AKB!!neJES?gE
zUh{`fYW<V}F+P|0WsdSHw^7LF8=Qh4F5^Zfk|;HeFbLj@g-*T4Smm<b#@}QA`a^f<
z)cQs`f-Vaz&INMpzXwQ$mJF1fz|-QjS&X4a_<QOi$}N2gFr#zVEoRCh>E8J%hsv>^
z&!-@&ky-d9m0Dx07=04HchDTt^uwF0?xyqGlsjI_ktlc?607x1#eSE9{%Hx+fL<wp
z4=e2YWA*IKd<u(56_<UD%2jgrpXw(y|D0Lz5k;kba)E>B(55q2otkfKl%;pZ!TYIi
zLVHWF*(p3f?dQ9dm4M<;fBDU!{p_Hc0yDDvd<}LReyEp+bk~ktfpreBDC?11mK^U@
z?f%5C-ZNFCcQm6Z-l_5|-ZY#?`$Fv7L@8C@Z_Gw1^ErLKkKQz**jL_SPzGKP8~!Ai
z<HPw@sV5f$hhJn<KHm}U4JNuOWz6<ZQ62eDToGt}OzDi(*|U%oVR}Gvo;UzEckHs;
zE~~iU$OUtBK&(ip5By-|*5DD!lQ=cHeE}l<pW{e*(Z5r8Qxdd)=A8bg`sp9Vj(#g{
z{sZ;X*fa$h-*wvRLPbR^1=+hAgN@UH3|C<|$uBs|JYuN2xGw$b9EOrNOp9y0m}t=W
zLpzqQ8XUU%_^h4O?UT=bZe5$L!DU0a`{B~r;b3(3rfu@toFbHGXrp{YxG~sxsyf~D
zMK*q=Z2>~AlEvfdV1ux)MW!J$j)$NdytdAX&uSEViR$PMNB+g)j+yyGxY!hsS|^D8
z1RH8bQ`@KLv+T&vd2e<*+8O4S+M%}gRY%GCTOu<{B0{7Yj5`OU%`xNZ+ftJ4d>V-m
zrh3cc@|x^@2gkF3oQDvu)vE;Fqp6f;TE!yxLAsRa10Of=slQ<K0B5-$V!<MrY=fIQ
z%P_Thk^F0qo;~M~%e^iCJFBs8!D&S64p#UM6<4xVMKnBkhY-)oy5)VYKC93Vx}@S0
zA+KRenI?}Fg|ngJ*y|P#rWFSQn_Vlag0ryK0Hk3_vr_FSJOJ_ro<%nsfrq|WRG1w^
zwe1(OT3Y&0)_rEk&c1+I%<#Btlr1`wg_-$5NsnMr%RmeaP8t5~g~9#k{zqY$|9(IG
zXG9(RvU*5UH>7Ck^*QU!Y{aA-ezyO2eQ$i#Y2Z0gryGkov<j9fgFAkd7T;WF=93|%
zwK)UT@+l-Jy}b-PXQa$E+Q_~uxo$Wmz?#l9wY939fA{kRY4jTsP!eP3Ez6}wDF(Jk
z9DLYE2Hlz%Uf;p#%w(L~{TlaxST5My`C4|d`^;@|6kTVce-BJd&jtihib<J|uhq-F
zRWjk4nmsf?&yD-jitNvfZI@#uC1{aPS@>xRh2b1%v?zVGksugaA2!!H<-!K$R~oT~
zW%f^<Zv7faSDYn#O*C6XBMp<nF#PKYN$4Hc5ni8KXS06{Pmy%XRzwTALA=qlt3Dhq
zEC>=Y7W#4zm?}^ccFemBW^j}_4!BG5E1^-jOvc?bn?jnwz3isjusoMvEQl6>It6+o
z#&(6}GXj3|kd;_1Gqh&Yd~I<zu$gf{r<eYIw1C<JrnItwZ#z>;*~x6t8CGm1UXgPV
zka0T)%X+E(7iZt5-T(EBZM!SsA_T=Uces#%O{;75BKN`S>gE;CCGRL65}x}*H;_RM
zHW=vj8r2SSksj@kQtSF2pyb-hi6ScMj(NxUZ%HM8+j^6Rlu^6_!2Qd~I{`>Oga}}`
z{yufV6I52zCn3yN<CTKGkm2oFEGbl!!;%wU1xfH4@H0T$usrkMk43YrK`}RX)GA72
zkBHOV_OlHhtDVJ|&A3VX#vRO*`<e{Oxb%$X>9nXKODm$_%Fb&XK6zkd5&XBMbG`kd
zG(Og8iKc41O6L?EX>GZ@bfGk18f;P0nE#GNY^=&^vP2FibBBR<#j0!mePDf%)tq32
zOwCZzpwiBIZ?$2C<7twXsa*Gn8Wl(7fYnMP{g)veI=+wt0>sf7n(3`YAhdrln^lG;
zJwe!`z}{o7QSSMS3N>pu#sn9x{2S*M*{KP)_^}v1bq)J?zU<xA<@Wj<mVO4q!yARA
z#tIO6B)PzcO;i~vy|Yl-t2uS;MB1+o3uzxc(O5YRqSG64n)>><p?$Le>=&48)MPw7
z2-pCO){X(t)#-W*1vOW~RH9j1-&RJuk^ozQzobR_I=#)5XJqt2ja2+V0$rqBpyUb=
z@mX)f*E@{ri~Iq#7{xIi^W+<05x=!XRaF;v!SRa4(}vp`7ohrhx!iZ{TE^7B7E=Gy
z!NGs7sr#Sc;Gf{&pWxvCcfmm<5{|>tvxcm$w6RG=_ii^^y$_hs&$6((xK^g;iv_u}
z1Ds4%XiU$U(({Ezv#?Rr*j*Qq=`qa?b2v|W&;%zxHTJyWZo?T%RuCGXt<}MZm?pz~
z-E}e{!59C05l`i1U^cn0qB4cCsL2-BuJX>-pev*<m>pPgcH?a-*VJAJ31VFAWq-nh
zyk9*O>$O@b?;-1IVMRSu;g5zxP1G95NclmQ8erbE>bgLHQ6%JPcv|Mtlw~c^kWgF9
z5Q}Vvi-Ak=t@uz<Vl4-Gfx{-?^*-Z!1ZfoZ&v2uE(T*fqA<FAdEz}NvI8)2${lpJe
zkJ7$W91QHev~rZ!O?SnVNFeBW8RUxhL`4II!$uXmqlP_|BQdQ^RFKpkOxd(@F31TN
z?2f<%yW3}Z`yH#?kv)s+zc($l@wFlQ?25hJf=NyK5GHy7wNr_J<c~KpA1%t6RaaD4
ze_8_aFfeSy2ZF6>Y)Z_p9laK}lMmw+gY#e-BqIZ}=F8=4c6#soEWYa=M|-@3GLXtL
z;<`;fo@u}#cAiVB#mA<$Y@}w$4o@UTqz$(>VrYyv;i@5gY#tmLd=YD-SdpDM4L5cw
zx*TF+j?`Id;XYaKJ&c@baa`v_3hZKxh@r)|cB!}3$ztN3u(O(0k=?sl03M$SoH;d(
zd2`inzbq<wm%zD%Thjk!8xg9brN~UkVv;B!_348)WH!p+Q<kRH5@)pXl9*F|YXl&L
ztuH%WNFnWOr^)1I@LcGi`r;}<`ec-69`hxSO}TZzup>ubk*&no4Zoh{E<TaYtBfT2
zA)TS;2|=nSJ$};hMV+qlLMjMr)B^9<_aH0h0^DRZyh>L1R8dyC8q}M0mC3RTDU@vJ
z#OpXH{6G~;o}^e**S7Z#_kagLz=QRcr{w-;FyZ1U{*c^d20lBl{-Yd!9v!I)qAnu=
zyCJZ1$sqJL`&<L41@8PkP%1Tsi|lcha)shFN;v*|3>9vD(KlxpT$gay5!wWa|CbU~
zJe0@|qfI`OIjfD!azx*zs6PO(JA7)RBTugf>V-ByqK*Wpy<92#=2*+qlc>PI727}g
z5b$7|d%*K6@j1~HBsR1^V8wf`GrhJ``!3eY=Q|I#chsR#^ElTA@a8r#C_f4KE7JSd
zy~=N;?l1foS?ZUN{Aa)Hf%nUuNA{`szvPjmOy1QwFrAjTtXB-np+=Bjq+Ykj{|;80
zlLiy9_>I%+)xdpn>ByYql6N_B+D0zHHNRGptOKI{yIhq29FnrbYw-il?S0_?8)WN$
zn(t=bU|wWiG0r%5bV$@`A*TaLs<dvZsM)1H-5N*?7h=68&fAaCx)yZyHfV`<y>8t{
zUx6E2jil86g_R=Q12d50t|Y*`SY1+G{#6Y-r*K>orB-bDh~p7>jqJSrIPx9#Sbdjk
z!8raX94+8LMrXP6uNEKF6yV5UH?8Iy0In`Lhg8#`yx7k5?$G-3_xLU{_x8dSlWoGP
zDtJK&qEqlCB0U)vvgj)O;6G8A7Qhvz-c;j16{gbg&_QpHOU=1^K)$i7*YCW$8MhyD
z#c!|H{++^t7io|mQQth@4?Wy^W>MuSU^9Qq2AbLa1D%D@2hL{E{aAYs5MD382V7L|
z_x=nr;kx_ge7vLwm!GC!VP5^6+A?4%m^?ACBO>ucVOUdPlhRT*Tu#{Hj9tuP@Xn8(
z8JaTMg#v$W#1ZxUP>a%*5<HRA=EBR_u^aPApX7Hfl7kg5L`<{wL#&5Rnm>2WLyhYG
zRnmzkSdP2rn_JIyzKVi`0oOz<!7CP5om<*J`eU7)lb|2_UBLQrl3nDy-9gB|m#1pL
z<*Am|7Zeb-^T!qjNXRa)bM66e*ol3PYT?GQ|5Bx@`Mb=Mz@M2X$@;qY011+l;u5%D
zVdbF~+|U)RTYW=J<g6igco=*U`^usNJjojg&p$B`xSo(_72fIG>}ekYf*WDwJ6KaB
zLO(9aA76PcBi8DAKZsaMKdddhDn@-VVOyD?<({>PPaX5O4wVW&N$^Zl&51fZ(PQIj
zaB9JmCC<|mthH!^(`A??-3>O+3DgYo-ESYUDO^%yJDHr68kgt{wj3vF-g%P3<6=Ti
z1%;>6(ph`$7HFGpkM98tUtW4wlzr*?IqZMYgw)NJTcmUueyonezW7gG_HQOP`JK4k
zh?~pX2vZe%Y6_Z<8u7kqhDz`0Le`R#PgboL2Cj*L+rsbHVaA7FYzZ-vScq-leZ!fp
zA|U@j*L!!|UBr9lM&^!b_a9`@yxX1=DApXg2blD@_@ELFdbfK%7D(=HlAnjqPnJsF
zdi$%o;NhUZ8Ii<j3({OoM;K}?Zq&{FOOr{#9o1^kpsPizI@;W+NLnB|yd%E+)VoY|
zN#DrLG-S5d`}qW-HE=ctj%EM1%}GrER@h&k^}WRuTSL2SVnf9W48y}O@ZEC-|AP}F
zk#&JyhAa{7Qp99;deMT+WKy1OfwgGAkgpw|m*Ud~f6Vm*Qm!yZjkC(c#g6kQp+Pn%
z)Dcfm@S|P_603d)O2Xy$XgJPB?4O~KEh9F02a($L1oP<;5utrNb}JufFe&P`pPukD
zqwlHuX4I>ZxuL_sIHW4jiH1&}%lPPn3HxS-)m}<A?RoR+oAgIP{DSPOg?f&%R6*)Q
zB9oZPU+p%o(;v}(S9i76#htM&|FJs5&pN@cpZmyca+%IEnG^c`MWanRb~&E_<)Wr1
zpT8xsX857GHvMQAfy@iONd;W8Y(9Xm3Bbmn=Nop&n|lCAM1l*Pb}JrtSjdrF35_};
zYT^)A(&OEX;p^vJ+e9bXtuN{mnj?^|Nt7*Qu9NQ5D$X*Y#Oas@A_R7!NW7p6V*{V^
z4p1z*7Bz&hc{Q*<>@?u)sXeIPiw)^?JTXI)D_^Zj6<BE{^_3p($ttVC7j;v{#@yC)
zr+aZ$w-cebHKv(UsupA-Pcmvti4}v>5zxD|JGiK8K?t-hEq}SWHBu}3*xGU6aY1br
z+RJ0unp7J2)#Rgz#|=(+E~nW2)S%YK<iq`^%dY3G>~=Uv*q(e?*f<zh$|Sclz!d4*
zjPn?a%`bpcgL=@jldx%>lo&juA(!2=uznF8b8l^BEUtx|aS?i$*>|Y1sNjF;U$UZ$
z%z!|Y>H(<adPFxdqEI(w?=gc=#UX)zxR;zXaRv1;FhU@Q*4CNN{<R+7TKUHawA|<P
zAJks_$ck7jREJy~YS%sa;4dRc6RAb560FL&6EQ@vkIxNHS&%Am#Zh6n*asxN#8MZx
z2m7iHe79&}*<>#9WI8?OE8$wmNQrY%0e37Oj0~XVscbbkT}zo!!fh)47aQ|mYyX2!
zX8WngJnh7<k&MCT0xvw45dj!0WK!+a08)=il(d!;+t$6Sj$(pN`1ZZ5Mt|ZnF)vFV
zSk*#zuvQOUllseOIb!`IW(&FKen<o|X+A@4x^KJ`;t{mO0I6e8oc;&)J>ccw`U>3E
z0aM@y36fSkWMY>x%Da5<wltN(x<)C}TjhFy<D}i!y?9AD94fWX3~xTduhwr;7FLKb
z)1&_P{L4My%*A5USD7GYrf&v~Sc+blmAbcBE8N-yU;7N*3jfxfL~0P9-3`rvp^6QJ
z_|eq!Q}iXH6=Kq0yJ{SIIkCqMg|-Mi9GQY!ro`Mf%R&9A&fV=(nf?hs^*?za7)Y^)
z_Bgo4kFyo30Tfx$Q!g%jeV3`KTc1*!*eGK?^z}usDg!FkYV-C{hf?)M(0WGX7%@wV
zS;c8HRy_O|L@2tpBbvJVR62*3=j9H92A#(HIC&I!6#YN>4g4p~!u%fYSk!yc+N8_o
zSFVPfI9~yo^w_Bm*W;odQ;g*BNBlBfzY_S=SVeIyFdspd`t`7F;8X7g9%lWJB2wdW
zHxt^12N$>euMNs<#(*mUrJ}y|lA4Dy;?L7$(eUtPxL#)?;I&HS16;zm_W#kD;lDPR
z{-gP)_-|7df<-y}<xuJqMVlSdiidHUzh*j15N65Ocpi<ex?`jHSfXiom7|W2?E5Uz
zryA+JDz$SJwOnxoH*wY_(g(8w41Oy}+W}@rJz>zAuYSvz;?)bjj{mJ=3Gq`bFv<VU
z<RlDEDt|e6*U;{}PuOfw7h=97LG~<4uBm~U+?eKjnd_U{M<G?q&!#Xg$v=Ubbdm5q
z@IFhy!{^tY(%?*F3**p3>R!K9CHFVRUgX1Ur)r@h`B&NVzf~=kJYlw1?-#y8O2hJ6
z-(euthPDa-1u$1Txv|1n7a+BOgJ(7Vpbjg<kJN?$YQgIUz{AiLQUKs9g1?U-HGH!2
zTNl-GV7&pvx!d)o(tsUiUG~1-ZG$@9Tt-R1{IvHuyzu{u>Z^=Hm3(JoB$YCrQ$qQ9
zUMZEP-B*Lc@{GOqcHrS97cw__{~PXwlf48`&9R#xsa<y-^6tgg>W2J0riE2uN-fH~
z6d_Q6N;j8lg<pZ|n4(@SCEw5=40h72Z288g6mYI(E&F#34iBG#$uGBb8bqk_LwpJr
zoCoCj?Gu!(KN(?q9@IGaB+T(c^fXu}5=nK;fY~SD^CUy=C@RLNeDiV{86dcMaV<9Q
zW(@8Pbq`RkVmq*cy$u;2J_)HkT<TdxSH0`bKWUhAf}-fROD2LQ7~p|tj8c5GM-|MM
zEqScg8>)>0flmSt4cyp@`P}*9uufa1u(q`ZB(bB;Rne?#$3q&pToR39K{Fy;f$fY@
z&dsU~ka`wLLLIuQO-&tORn6w$FQ3K9pFWEe0%z++u_K#C31OxWQZyl@cb;7b-4UvZ
z1YL@Ea|KW%jy3O;dq6~lfZ&IB_=-PLPHAy%Cz}V87X?P>qD`;mLy9ymHt&v%Zo-ba
zGp_BxS!W`hYBg+^aPh*T&sP}F(fBpx8pA^DO_R@|NT+wg<t_;87!NW4TW-0eToX~a
zIcxU6io&&dN{U4xUcCyrP|usn+!Is6GU8dxBBq<XyypE@Kr)=Tkw31;aHIob*|^Kv
zr6Eku6?ME5!$!a?@)NvrVBW`BZ+TJl@d8m1-?>Cy;+4Z(l4N<sk{CQ5JqhG>4Li72
z^~<+fT+E1}F}nv0me#9)GlUE0gf;*;;Lw{m?<>%uppcRssG?i%MSiT~lxFhyK(p=9
zE|&u@L@_GaCu4Q^;3#(LP7<Q8lQkL;m<#XWYN;8E*A$SuPw=Emwt-!H+ghKYV*X?Q
z+b@?pWLLI^7Y8n%^R39~_$KB;c}OEH1q&3K1uv^XWD4#slDsTFB}DDOE3#(|3Wd8l
zLotV>CBx!Z$2i9!77y#KG7`xbb?pQS8xz?Q22xVn7^6#1gPIaa)|S8=?wbu^c&a%I
zjOvM5Zkd-%G}AM&I|J{TCuY^t9;xNg;kQGbpc0zrA|4`FK6`XlMzd4bU2G~|^f2_h
zv|pd@0XQll{Emr?8hI%0xw(*oW3ct{@`-2A`w|t&EJn_J!xxSXpRCPCSd)d}w>F@;
z{kC)uXbf)2A*sx|Er*rIWLuHt>Z<6v5-l}kj(2#Gr9|A^HtN);Y*;xJSZ6mgi`%Eb
zkGU-@b2(w-Cw8Ew#GT=YgrdQ0dVko{1s}UuxVtloku{HhA(%W)z^Jbw7e>&}5fa6y
zRz6TaRPxNV=^EshUjj5bJiiCPOXVjb*|2LP@mmQ2$9sTl39RQTY{DDHmUc&-ROiz$
zbGLpgwcFwy3y+4wQCw_U+P?=l+&w!3x5(}I1Vq#efN!?#yBF*>yD)t&2*Ekv!qeH&
zZMB#X$+_S>Dd<|vd>*N+oY^L6!?9jnN(PdP>;eqKdc?*`O&1rZbQKisG~sBZXvaLF
zwuVS^{pr$|$-BswzKc7$V(3TBM9OwR2Fl{Foi}i9dD&3l(g|<xA;8xq!G6)*Z~e;c
zWIl(%R*~R}J^E4?%1WuFc{23SrAieF=~v>ZKfDJdgD!c*zuB$i5lSFr-qtKQtRd{#
zjU1_);d50m&7ImAps*};>o42`S{Eor$uDOCLM2#~%7VfqF@x0v1EI2WXGiWg?@!G2
zAmJf8+*t{$$@``)Lc-FZ-F_?>QjSzxL|Ly#%XQz3#huTLPC?me)#N7PXC?;klJ3HC
zJ(XyJrbl)c4s8bEveuCY@@ZywEnoW1Mwq~TXUR<aZ^-BSWVv-%7GjP*TleK_xn^iI
zvlxzKu**%tcJ2XiN8McA*b6T2vj56wIA;&fDmp25YH9OU@%NFZ&H4<hu{64us*M*5
zQwCgSiaG{wnDm_QLWTGm6M92z)S>E=(kh>_TOgXB8}bMe439FCk}8%@_tsws86$ml
zmVb%@Pwjh+n!<KvXY_T5$Ix24vf&F!M6iR#(IQVQFwfe|jx90{R<pxzxr2d2Jo(x5
zhnpEF3+xgHRkOA57G#OhZFR-Cz&=OjhL5m!mD#zPo;sE3OU*%gJBEqFDw2f)UsMz3
zwujEaxBZizf9-?Of1BNW?g13=>FnxU_+wag<wTzBkigNc0xbd#>mpoYHV@}U>PPcv
z1{-8nTew<hk6SL#c)kbQr_Jw56xG(y9$rLO*uCFz*MUh-U1Mmf-`MO7Gs=!D6zDX(
zhk5&N4i9QDI_<H}CRi3jxoi$jeH0+EQ+q~|h)CIFJguT$IUMV=A_cXWl*+<cc1L9`
zCa}j=wEYo2gU9ntmltz)8clrhGzVYmH^Ynsa{GJ)8*H{4(EatU=6u4VxDp3he}aqj
z!dTaO{y|?<4($>|QiGy1V@0kOa^adEdcVZ?vo6r!jj=^!K9uhqEjSBjXxm(GHKz?w
zT}fG<eRA=ZH^8`)KX3-QswU@KMnB#tjV_8lg{WP-9uK&RohI{GT(=!gkH1iM%H5gc
zYabt*)K%!7oA^>&)djL<<<IGy>YD7d<jn_;^~@;MuH^TL6!gO$PZ^0jv)<$$op{p}
zsI5yDcp17kQ|Ec4G*RA)oc~-TOQsqAgHLdkQL)dEhwO>w<%5?ORggoE*c#{Ng|HS0
zMx)_)o-+-t>pLtSPqy<w_iLVrya3lzfinM!{!DGpH?Q14N)yrE{5LN(cwH3t3ysmj
zoBhgzFPA#-6X0=VGw2Q?Z(#o477lDCqZz+>l4x=OG5g1gz8pGwUO3he=$p2-#=oKJ
z#OMLGE`p~@vA`wNvRKd-K!j_~*E~34F971MAw8pLNXR)1eq9{9lUHB}WobB_ztq3w
zSu*k{nNA+cn$K6Vf^F#4C?5Sdi^+2h7*FKT$rH#jjoCOkV{(_!`Rn?r|63QN&o3xg
z-9Lg)jDGt%cM-9puwt-GCN~0mHHBDh=&gGgfTW2~Mx%svN|n2PctPez(10u9MBUcm
z0DhLLa%$pk^)cQ>qM?E<jal-YS5b6yGIVAqi4t1So3YvwiB$k>1Kp9{i0@4=dLmLR
zr>|RBP>yynmmjS)3Eqt584kzJ*GY`^%|VnsE5N)0P6c;BIRsR{Weop$3bB^mJ;xG|
zgf9D#K*1(Fgj;AkU@<Z=JyJnfAVq3>);bR<59~u#qg6Cl;t5Y|Px%iM+y8>bOL*5^
zbKp-W+qBP>k!zbD;M09f*ODEpER=J_S>|TEd^xTJrUUgY@V)J5S|eL3jBpE*u0yb#
z4t_X|^{lP&BaO4DSxnLkpi&37{CQD?vq<DQ4I3+@{lHEz%;s%^x#O};f#Ib>ur-;H
z>D)Kj8XHhzrp0k+qN_rnvwTV-HZM8lE@I!<Qi`{pX4QbBky@PylPPnLw~^~NNt6M{
zC1T8j*m{HTosO4bVGoq|23d&FK4Ls!de^BeZoCJ4Y9DBcu^TlNpJzqM`?NaGtX<(-
zgo}_$n<PsLk`+$%Qwei4y2riwZSh{J-=4KU9*p|9g+!Yt4KUf_*{Kf=fhWsz?J=qA
z9BH{y5P6(f<8*AcYmaIcg*Y^XI-^-fliCr@Pn$}#@h?+I%5uT^BvJhk(%uG@x(V&I
zTm;5eGX4b{TmKY{o`P0+!7IPT9{Uxt2fm+KJWaPfymz0DR{JD&)*Vr=>uDkEKj^MA
z{o#VwJC}8f-v&~EyOfqLt5TBd%^(MRJb9-ZnJu=E`2=XfuI>wJedSx9Hi7d|RLYj;
z%pXe~fCLPKs2`B}+9*F+rvQ-oN`3I<!4v?Zcr$CF!HSyTWeD6DGQVZ$3*f3uo9UwZ
z%>N!hcb9(;;KoB*>v~f%zc5aDn{r)nJFAQpg#WR{+D_W%#AmPM)L2ybAHM~Gzwp_>
z?l2G9rp}{V;u5B}#7Uo=>!WH}L(ume!8KIt3++N0i@}SF*}T@IxI}$Vk~-#4hq2#{
z)!`YmXUcF80gkF;<y{aBXKJ)8azelH3~7B1(rD;~Az6h_5K5Cr(8_d3DU^jT)B?+p
z@g$ruO*Q1Z_|2arI3ShYY8T?)=y|{`(Ba|%4W9%wgyXlRS`gb^tUD!`#+?m4YFA*J
zQB_T*2vp=;3tE~=Nx+2SJ-rO)%5T8ARz_RxzFz;FTi8oOniJ~914)12j6TSBKM()_
z)-gJNx~Mk8`A;AFlQ{UTOd`&%`?b)mvk7E1uWHA9{lAZHWvy<p`uDCklr6;Fn6q5)
zi*0s29~VM(EFtPl-R#h=R?3W#E+VegdBJwn70@Q=GT|!igMf`uFLJI<)!bI<3yt#>
zyWIJ|*6e@q;g@lX$WJHeZ!OLcjXc`VmbLxt&7*3*7&f|<O}XfDt@MlzFT`;5gLP<B
z<R?jk=dp$~GPFYh&m%3+%3fxQnc$&Aj^ElV+hg!J`G<mGBM=REPd~*p6c#4=e@#dD
z!xO=H1|P?dpFYbz{B{(INO}+W4$H52zoml3fl)o{`Xr3p-yWmK1F_qf^?MVKh-=)V
zN1v;v?2|=aERLP*>eZ7?HqL*0veVT{KKS_9#9<(^Ly1E|b6@aFIP!Dd^xgb|8fqX8
zduwyc|EXU7hsOOcbUMUkv*GUwLhN02#0SktGn0??<S2IEKjRWe!%*sc)miom=Oi@K
zEX$NV|FMb5_oo_UP2R^&x|KHZLB(lL<hKAK-h#WK6YB|>{mzs*Lq7d49IQm^Eotym
zi&T0gdbxGk%f@^tsV7Jnr%rk31Ffq)mNZC<KlJ7^68Rxu3*`LYnjHSY$5u)loBduv
z<x=y3qy@TlGQwi0jzTagdXV^aw4bM=97RtP*=MfuVI}gXg|~Cxc|56ca02STo$!7B
zbV9W}sLvQjc+3BDnA}|~f4ifEjr^P$g_eaWIR6Q&!N0elz*1oF8TrQNHVy@$bxDZL
zO;U9sCB!WI?%Nc(w73i>Q2ZUL1~Qwg{gkOHQsQhUnlf%&7A{dM3MREsB?@>4RLvSh
zn(<76JLJel-^F0g?u{J5%atl%uMWD$`sc`3EKeOsS<o4m3WGhnw{WN-i8V5&@+u7d
zD)e6vXw?6}HTWw90PfgPAY?#dHhP#=q46zErO{@eIm3q4DN?&Ao7QaoO+r21G{e}7
zD&~mLM@zD%ZLh!5(x0SKRYN-G*fM}pqV<8*=Qs)FW?k_o)k2-|={4nk5_NW-0@S2T
zZFvNiSEO%UB+j_LUQ=8}1;xDy3GQEdBl$SEOS0!(dM?=`_FeZO9X9mc#~Yo5)YF=x
zgWpYvSJ1uBt%A(JI>qIk6>*fzWu0z8ey!IWxf=1`o$y}=;vw(G`?k9k+BrGd?@zpT
z!4w+ONqM#gdQ0QJb{qZ9TrkAR^l<%~F~py6gI262lZ`<pPr&_+g_Tl|;8#DlZj@I;
zc(0fr&y|HEIyoMy1auHxto5G$;OQE{wdLgKv7cS;t3AQrfWACLOcYA7?2#QAtcT0x
z?MT|3k@>LSVF}b(WWF4(zm`1vl>5$>-TGYLBH#sW{()Y<m@WFAS!0IYCx)RFy6T9>
zY4|-Tk>O(P1o)j6q4V52jSk;zv;v>Aks|s^KXvL{Nn3{<l2dh*hOvf0G!BGQvA%~P
zU@k%Z3q-a4#SX=edt?Ijqr4c_k>f4~JrVLp5!_RaY-~PXQDkKHFm)FM@>brkAcq2A
z5vtbX991^+N{R`0sd{idgo*ZYqb|sqURGOQHwTq?085kMeP6qbj!}OLQV+M4IIm${
zB#WUkKi3S&4=r<r7NEAq^f;!cP5CiLje23GV}G@7H+3(40grTUI@-5<Y7;aoyli@3
z&jPKwH&cQHeq(X+62h}T4o|Awx{S<)rOaY3cNZZmO=cT<`ka1=<8Fh`7YV3@Vtzmh
zb7(7?T?{2+9~A3}F!K8yjCt4ebY%uZE-%ZGe$0rd#bU@r%hYp5GtWmluO0PVy2B{+
zhg&ekoUb`o7s^O=Tt)WAHLssT>zbbRho-?#m|bdxH-h(}xGMJmh50o2UZ%<D?8!Yq
zjN}Mbb7$!D^&}(LXWpO~0ACKJhg-rqe|{~cd&t3{%Dk?j6^c6J7Y|uD8Nd>_=Aclo
z!nlz;Z-GN0d*Mkpf&#O*LQVM_eDEp^ttOaYlFYG_$sot><57$2^w(xlt~GT_DWF1k
zc>>(2Jo+HBE@dpnFbJWt3eBL?$+#FmkBUS7d|Mf#)fMr8TlC?F+6SAn;&BOnQ)F~3
z^<gCQPck!5F(^YWCG5_>Eb9(%k+len0b8DuoU4*pC0aZ`o>d%^@JzPNGxG6C-i&Z9
zi{u9lO%SXR5<CZG54|mju$9nWB9urV&k|gxu@@CHXx(k9U|_==AJ90kel!J{t4uc#
zlz5(rh5J-9pPq#f@m)*BLO95FvjmPR6vu}cMw8=baTY(rSLCqIz0pq{x9l%s$y|)F
z)S0{6mve`G@XFuYrf!UQm!vpD17gT<GN(k>)N@E*l9pvvdaIIDBXVb2l<e|SU?}ut
zwK{@pQQ+&zSZJCQ&}8n7-CQZ?`bW+g=Vtgl;3y7s#6=<`hd;78@N6k%d?8|)aiJV?
zc61@)d-}Gq#Y(v0ZXHA1+itIAbR<UUvY&D>;~uaSbr-W3bLz9j;Ik9P%`|Y4vEOp|
zVv=X;F6H;%3fr~qv*WpY8+-Tl5BJ)IJ%*az1H95;{UWg0zuzhv{w-ggyEBl>1!^+<
z=la5lV<*03;{KTWZj-wgWR7);4v?#0L2#ST-k8Hl7PG;^&bAq&i}s1JX4D0N%5u{L
zis!Aa92@`?+#L0*dREFU-f&13memC6mXw<}CK%GxC7moR4{YQt;GzC77S>};|3M6I
zlE+y)>uG=s0uH<N6fBVJlXVTv^+-_nE>vu+ThW%^{&R+<OY&}il5P;!Bj$NmZ@Fao
zgObe@HFwpiBH|CDs<O>t1tb+s8mel69qaTLxmiS+i^tfJwm{W#J31yR_-4vw;b!4l
zyc3WLK!5}P`NgCUeJ;u<QE?Wo3*I<CQ?#;I;Ld-qBOn~xA+&@szx_E4EoxI?pvft^
zxK>AZpXt<Y<;rg6k3|DxfDX-Iq5Qup408YR;sI$1iyTd8c;Vo{J!@BVz_mM<ONS-;
zm?E5OiNiv$+-6qYQ2O0~a%rffBzZ>Ea@O(_7bFWaMswvX6wEjr<wZ4|`6jboI2DGZ
zDZjo*NpgLYSZRqUJ_4BC0jkc-F1MV^9WXmxNO;yOSmtO^NU~JqTy`zQh3c2~uz7MT
zGCO`{Mk&j~q}7&194N5UqPHS=ad#Gcw9w;=5?<?1LEgxFyj)%+tY7sc*ji%ZQ{tN?
z*cfSzH$(TGA-UjeyinLYj6Of9S`YIca8p@i27=8@44(`E-Bl+4AL`yRDvovC8f}~)
z9UOwY26qeY?oJ>;@W$O5cXti$5Zo=elVAZF2_B>&IE28uz4lqM_de&|d(Ii(pRa!~
zdeAj$RJ}E8zK_mF49fl$8P?AtKf9Go67U<~CD?A=$9-{faeHm2#LDy0-_&?{_2=5Q
zefT(Aw?s>|1BBuEBOB@J=Fq<qnA-J5HM!a&Z0vyq`!#m`ZvaTp{^5Nhvrr`T%RfjC
z`hW{Nrs|i&SA2-BeQpoMcM4Mh-MPr0J$FwulNm+*wt+A%^gn-i`E7R_Yrh|@T`L1B
z<A+ZtqX;zOH2t0!W|Vv8bNW!=FD;1jwPha&X%@}cXNX8q1wMFfnVL9s3+U0B+($e(
zf%aHAPKeh#T$KouGw}QF6jwV^z5J$p@U0oN+KXkUQ7)&XrRi(KLL4-VdWlNCRMDiP
zEFxO^(=4QH$=|lV)!~sXJ^k{rxTS!B&}1=RaAcuMK}**ypJ!vQVA?2Ep_PP5;I#cg
zkW&Cg>?KA#^<8fuathyj-Vld>u(c}_nsuF3H2NypC;L9)UC&TK3hyS(b>=bNn;-h{
zRFJu~J>H6n727G~X&n*JD~PW~G4$w8@gzWYn`YH%!Xm9sQC-Q+JH|6p66zSBxVm=q
z`#b+nEF<>M{E_-MHT)lBng7<QqJ7;@s(h3^avMi3+>eA!>sTRnJBqRfkN{je-s5pz
z7Hg&#3pQUV$UJ72wTl*&BgtShf0lLjj2iXQdNub+bXyZuzE)_V7q^6qNW%ua^oHdG
z0()hfcb4Mxro=4A7}l01W}A<f<8()k;>q;T6;1kZR`c%oi56n*StmAr*iEE1kZ}^R
zfQVg=fII5sWE4NEIc||ZwQ*e)3xglFF*_p1biXnU>d>jCx#n5e*7&nn?;Y{lteXo?
zYWC2cqvUk8ll(Ll7kbB`yUw#UPti{<h6@Wmh!$&L-i?Y3qg<b+3u&rqc&@3cY(8c#
zw_6378zwL-RCu>^7|xXKw<r7hqW!11Mbdx8EqQptFd6TSd2&**rw>1?w~m3Fn8yvJ
z$BXf04)aI7zbg4s9J>wPmVO3h#o>uuZ_iT<fngiISgX!}oD(wV;|_`EC0vaie$`}6
z&RU$c<<h+F<ukz?OkVKERZhrxXOq`k73f^bbi!F0S&)%X$$V`a)3A`~5g@xK-_+A#
z!IMh}Z*h@EizCvv>pX+-Qh&Q!oy#_>o0b6yAA6Mp8*L&l#hxBP*P}iob#zsd$U%la
zSEHwT5~ufSy#&O<P2k#Joug#rq1cu<u%WP1sbUBOU9Y#7_45Y`)?3WzrISfL&qJL9
zCSHBLr0;d@1T7(Ezyh$aKvm%-!XAam8Pss{A*g1~3n=n$tO0A?{LAcl`LDF`RJQi*
zF@^i%o{HFlXQhwWx!x1P6QX_p?bd_O^S@b7f0K{+f3}@ocG3tvOD|Edt`(+&W(+nw
z_df3Rjq0H%1$cEmiS|GB{06|)=bFigj&)z}l3Y$>?FKRDot{o-q;aP*ey@yj3tbgN
zNia5$Rza>aq^^lXXLhD$qIb(u)QtKnzR#Vm0*mxZEZ4mN4|NIj(iL8p1H;M;)UG??
zSQR-YSMM~t^ybEApC&%i3Vp3He5~$rqcG4NwAm0WzDQqW><El~8B?KYJ26q(_HOC4
zt|ti+q|QxfiO2iJ{wT;O6SLDrmLn&)ApFZ&U}|J`RGMAt%Bp-PK?ZXjg<SvpF1GA$
zP<50bsM@Z$XxugG*}CPUIcylsQcFS7$IuaT6QT4ImD2BuKhg~*SZcL&y>wG;MK-R)
zlzsV|hEmalxaB^NYNXj|70KrGde>-&3zRP%olWd}fh=909rz1TqmF1y2Fbw0?2GI#
zohAY2<MvU{5j~Al0y~V7?7UWzO2;D*(Z`MSWLTh538a|5jh{zW_S~ojQTXZK1BuMV
z-+$D&*}5aLBe@Dv`))nO@VNi-3})+|ja|3xwZkk=PNm-f(msu*Z(Au~6Fglaiu(Dl
z10wvJ41NRDt3U5kfL;eFfs}muukiY>6@3~uyU&E=zk(yyLuJ;&A(7Pts8<)KBra=J
zY7O1#t*;bzzB{xmAFJ~F^gJ!jZ;$vXYM-|4)k&~M?Cp1N;4R;!>XvzZS(q1Danj%j
z`kq?9lNr$VeEB4q8we1YGCMPQaKU^?fS_uUh3C~PwQ46YM-0>X81j^nybUXfcJYJu
z)?92-SiyQZ!*&@(t7OUuqbfG6QXCi5<}+JUtID6#Np<3#0|grE>v-J5_&OYGd{9?)
z<6E$+<)YeMbEfi!Y1&r4XcI674~S3XjqJSf^Id}t*zD>q<5f$oGg{hUr4uoY8F<aB
zo1U7Jm%o&kUVGVWd#+O9rt_=8HlM6EugIRQ(WcukQRxUJx`+hoFw`<1p7)IF8tOy0
zKi&Hq;NJsAO`UO(|MTbwK$Me+Q7Y&hbo3Gq^Y%BuLC|qc#B)V<5~gJ7`=dGYvJCqI
zSZ1~<T4FI_`Pt7DGG&tMtJjEm70ES{3s*jylwJYqar8Gz`rh%GyehYNM;Ek`8lT`$
z6V6$L14=nJE=)<l7sQn==ZblusD<ge2JE9iFS|CSuUi{>$EcgS-e#*;rm4~8Bgkl=
z6x!qmF(zpI)|dswEd7xsn^rUE9C5d$m7*5t>FN1akAm!tHZLZm1k>pzPSgy+mPYtg
z6L~$xh@uOYW`5Yl2Er0NkA7VrScxmt(82M_+-a5DjPt3`&l9707Jd%<_HQd#3Q%jt
zJ^R!M6Z2myhB5E5=qPNLK4vebPVo;Vez8#d7lJE$&TW+n&rEHr?a4ock4=jAZh8{^
z)puABr2mJv_8*M1@WgKynfiPa^jYk)Z1-;fM_8NshaHj1V~X2M`_Qw$+_^uzzW*ux
zJ-Wl(Lf{uNT5FTgC5j9BK4ULd-!eU<xC#<S?ZF)?iuwAV?iKhy1Xpjq38H^B)IY^n
zSUzLg`Em8(JHzH+3mu!tM0R$#+v7tnU7BNmIn-*s-a#gBh0w=whR)h{v5=j`X5ZG?
zXg-1ZwBiM4PJ}05Q;~DL{3vH=eOW$4Snp7*Nw0A%ja>7=d0Eu&Gw#rk)X{51juqc}
z?R*&l+1@vcJ13t`H|^|HnIW|c%1`*7e6G++<6Kvkny$=>wt~zr_Y-;_)!GKJUGl!q
z=)eZZsW1vST(sSFbW>%q!)7`jF_=7p6As7-?TCa1gik3bVFgdXZ^DTYJ-;5&9!&HV
zc?uYS6=*4`XSN0m_B@;Pv#VB~onPMsR;{{BSla6rOxZbW^mj2qm8?Tma}`%zHa`}~
zf6T={>WDg`Fa?kWKkfevhpb^Wpahx{r<`)`hcGdF_XzpB7C(RVpzYk~c=CvZz3bI!
z1Vf3*J52jW?tO_2Qoa5wAKBLuVHR%3kos2c&yUyK`>9Wtk#~&00q!h*8o3(H{07J>
zgEb+G{Of^iLvIXl^+rDXoUN36cy{=k*Cd*ejzwCM>%YNacG&c*mmH&>@x3G$c7;~!
zp7QMbwj3+9sW+Ed7m$e5-o3lr=v_77B%cB)+K%R*=2Ygu%JoB6y>wFoY67k<#aQV0
z#h<A70s#$90vL(q<VmPUf<_%pwHj7MtSXcACDel>%}C1I5dsEyX=hMEb{F-10WjVO
z98+jO$SsqrMMWq3lF%BB8slSm8H=No)h7i-uBJQ&O|IuxVl`p_Pc8fBkbMSYjx}pZ
z6(!7wM0*#A7Ko|5!o4`-Fuh~*Ji-@6Sr(1a@d^4wjl2u_-IXiEBI|S@8fxAkzVT<$
zh6#nVzVA*SOZW{?a#`yWuo!koM|$hk(|mdE+28Q-Pp)eI4N&!a?#l_55du`<3y@Tl
zouDd0+`dd-Z_N{zRQzaCh9v!jTarNESm!mM9T?-|gik{}V0;m01|Nw{ZXs^+90_^%
zy|IgnN6TjOc4W+IBjXma(Si+K_cT1Iy<R|phk70byXkXN-P-tA56B#cZFR~$LwW=!
zuM8(j9Y^QC>N6r%R_A{+Yg%}*`!i?{Iaw`G7UDQ8fO;y@en4LTOitQX2q^fpbOPMS
zkJxS3R~9Wc#~d@mZ2yLbTwe%?x>ssX92LI2L2x1i9RC2UOF2FShVERTUY-JwR`}M1
zy&W4|$R3==jz5!FOYmYK3(8vnPyt!t9AF>>jCuhe3w7Q~_%s#{Kt6&C+n5mp0O-{r
zL$EHpH<T*^zAebG&1^RH=ioCM+`8t&e31UzPvXKcZ-vxO(C-;hT$BzKp3H-g0B1}v
zxzSqoZ-rzd?D-5g;lJ>ZgQ;Pe#wN8-7vYB;&i9+fKF`DmM7%X2r8lgl@rt%THv&h2
ze!S+dGvmznw+fk?<I;K^U)g)4-r3T>Bb{0MNSShi7lU^o9|0dl5)bEdkzegcL$=c9
zH$u|mX1}Hj?b)AXg3~e>7E#hgtId{1l;G6{xus!H(=kTu2!AHqLyd|8P!|mKlNSL5
zyaikedA;!Z+&?T6{!j=0Z*M7?nv$AqLUe#1QfV?aOyaUTl;u*$bH}4h?n4U>T4aPp
zDsRO7Nut2sqJ{|QybR@n(_cQgOJr>C@9gYcf68Y6mFzm!B|hdMO5ypfHaljm+02!w
zOu^mq+|w1!OERx`qp1utfzk|tC!D8SLmrV2>na>`TI}EyiQEhz{AlEijm7JszGbfR
z#>DI@92?T!9I;skS6Nm3E;yKW03~COo&6=7)U>DsP#ekbzw?j~I?Ap}pBwr^z<)2!
z-XG5!K}~}qpPp2j@0c)P2!t?XcDP;0afof(WKC{&qDLGa#6o_FqMl^v9~T+{iVE!r
zzo6^%8Jep8b%0GZLESpODhp0lSfpn5__;&X@G1gH${e5|I^+yE27sfS`pG|1Lb3OU
z`N%qgYpY7lwmOnTC8)gMpfIv>9BT2}1TX#1;PzkK2{9lV^|`5F-TMX|)<)*_X>6<k
zhUe8}X=UmcNnU^*M7D3F{8*>^V=i!56jkEpx5+IVycH8<T4MQW37Klbb`?~IQ9P@Z
zv_w(Ded~?eIU^#t(cLAZCx&M_2AVghv?i2H0C8#YX~}e4R1{ZbL@DR1FmiIT+vmCm
zZyU_n|0T-)zLR;TQi?~~2e7}n|2wKixg;3Yf;F(#u_+_Tx=I;uz#=4R0zV6Yk~D+h
z#-F((YHksE&9lVep0mtvnYUpgV#TE!r6@Lol2^?Kj%{jWScO{I$Y-=#2CjID=E@Q9
zSs0W&DBPiqmn;1k?;LDto2}=GeglxK5?CBo(gH#&mS;N2F!nu1_;y|h7-TOgSDd_1
zECg8quaQ=8s7XLE{+_=9IC^{9w{QH!{i%il%P(lf9%&zgrrFJ)|3n0T-_PF|`~O%P
zG&BVOYKTJAy-+yj^PLz<n72AH0Gn(;Lm3efy<I(T%p9=)4Qp%Q4c4x>xoJ(ykl+U{
zzSwg{ChzeGL%YQt!LD703k1fVMJ|0m*4^aj1e`3eT!FkL(OqG2zN=R^@6;kLQL#F=
zHm*E^xqJxKv|qM3-<IQpCyHnfg18-GGTCncO|`K_wxONv8ymE{|0QYg+#z;6mu9{h
zj-F~ki6ncfM$o`zLeoqGpsBB*slz8BIJUu~002Lb;A#=f06*06|D})hkAxJ4^V0GM
zsvEuX)9#ASuLrS-6SpN%*s0Xwox26}`?YEI1JbG=+!1N@Te-rJa5+;?z@#Ok;Aj>t
zSu(0uw`cPz@pUpk;s{=TxtYTDPuU*jmmZn$K88PUX3boF`amOwK!<>_K-Mg&pi5|R
zzgYH>rt(=JZu~l2o3tER`LLJ8F4MqQa+0V-SZ!Arg=chsgbVotC>GF!t9wdSTOpl1
z!cC~8S3<mPD{Wf2w-9f{7Gox^jOEg|Lun+UQRe7u9bjOVFK3@2hM~{ltJ_wu&FL5&
zarR(;3i=iA#%GPOOSR5*xx8PohxVHFO0Ch_u{|M!1LGpWwh=i*51^uthhY+b2$o8U
z#2kuvbb}0#{J8i`jMM~g#_uYR0iZ-!pXhLEdxY&{z;$tyM9c?ud=~c@YEkK7lw`kb
zZ|^Y>&A0ubg5A-7A`?e+?RGdOT)1_wbyy|4oXhE9zf-95wz|6wzmG<!Kh!#q-D|P#
zT}rwi9q}yMoh|E0?!uVgWp4f__WY!vv&^4h#;TFGo$*agO`eCF(YKQsCJ+@T+wQ)A
z8DbecnA_Q?>~(mEMoqKYfa*Xk;(bQq=b^f!c1QhFllgejM{we&K}BW6b{7*KiZieg
z6=8YH3=>?^@KW^dwrZ^^a4#?<*X`*j$mf~9{vh(rPLhN0%RB}+NZ{ae-jkwd_1jL0
zLdUtg{p<eWg>`Hob&9{7^Hp%$Cg#uX3!t&EXtigxXWOuw!Cv1-cJ9rX>ZOGrP3_68
zOHQ_J@4o|g*XOa|qoQ7j1H@qydxy6ie;GmhmWb3#h#YP^pbCbr?E!s95Bvc>Y@9kS
zXqo|Cs(feo8#|XXViglkHRz3PSrra-u6EYip#5DkqgVYv+pi>ppXh@_B$?%aiU)d^
z=O&dj>(grrBFUt(;mlTo7$alGqX>BLP{AE0xmK+@#xExObeo?{j~MJ9yq~f1nek0G
z3^np?jX)(^^c~%Oe8%escJNF=pOyIm<cOGIeCvMvkPmS_j;m~tsOP08v2RlAD{oI}
z#(s&3T1n6jd*aQ*onG}I@@)nDk{b7?8qIIVeyaHyG~kzu{uJ|*;5Wb=MdjCZoQJg_
znCnmMxh{A6@#r@Ie8Is`kOf7}4>H(Ot+@GcQQ<foWcU2x2l*fWXJnnnf^=5&o816B
zZzBVFbu`<&D$n1i_MOkk8R$rcd1=L{`A)sbYxlU|3ALPboRkKm1i*NMZFgvB>H|3d
z?pWJeXx`bbwxp@M%=7jGVW-hV-)dS}WRa3%wMI`_OVdX8>$8Uk$mBVVGnTyAt?eZ&
zs%08TPAT#WpAhfdG8J^U>$lN;-k;Xn%J*0$D8oVvfzd772RZoOKP)VKx=xq>9=AGr
zRU4styl8+K$90(>$i^}`w~w9obt_a9H9QqxYz&2ZZOtz~``_Vcx_~s!-tX+T<QMpL
z-nm%^K$>_$-O(uFQ)IE=u*nZh%}8zmF64gDp{44{@E3x8OGs^XZ2JZ^y}uy$PCMg+
z<;<KK9{Vg-em{PSy_D!+qK0dCQH!)z&vp8?1v1yxW*jl(`Iaf6BlH2S^h4P*O()Et
zLZxf)jIqKTY6;}=@n-I=&ODR*&eATnnR)kNM}49Yu7f)1K1$`@69wq!vfG}JU;g_6
zByQV+44*dko>S$iqmZ9wIDjOmN!=cO6`_&e@m0Sl@Y={(z0a6z^6Fb^f4io$RJ(I7
z=-882f9D1|;JD&HYO`msi<Hft<<sR&qtC5yQ!X3B+wV;C;VMuZ(exxWx0uz!3KM2R
zDRteO#M{S16;($7-gvs!F5(L%0J(Z{witiJ`fRwg$yB?4uj|`T<n=7EVd%nn+V2k3
zgH=^#fm4J*FK{!TuvWxT8oa%|p6huTbo+EGfA!~01s)9b{Y*35&Nz$B3C4q!B)$Kz
z?0Ptt2sEwJYTFTO4N|**>1N*Fn7DM0fA+#3Dpva&AYK?m^Qd<Xn}u@6NAgJY^cMWT
z_$9ayn#xa2Pw{kG(qAcjKJ?)wVNH*fh?d>bx~`Xj@J~<QT3rEuX4~5+zGju0mWSm%
zZIX(PL=DlgK+0^X53Ii=hSuP=Cu40Dxnft->-h5XIyay}b+{U{Mkb3Ux9_er)Ladm
z0~g-9s6B`Hhr(`5HK1gwEz$NMzijrdUtXi7xjDfz@z5jY&s|l=*UkT}Ak}{|^_bZm
z6%=t@+8(Wf6l7C%?Y={}EvMlDlE}|Ec-|m{c6RjuQ(iepPMVcYD?{M>p3JrYcCMr{
zSzH}_Mkwp-6#Li_BdaDknIvq7UF8DY$Yo1=?uqp8DrkkJoaxRhI2!RgvDSagzdQF=
z9Mmz}OP6cd+i(74(r)$8*}0DOF!}a5?n6k`n(gm|?Dq#DCo90zAY-H&P!o?Ww;P<=
zM{mQ0b=YCmXnco{^Uqhm#7n^_Coi0yi}<zzjc$Byp{@V&vz)86xHOiM85D1lB+}-W
z&E*fRR?R0^9U~~Hm;GF9xUf9f6HA5)iW#MH2NXsD26O{Ag*>Z+{)6$$o(}IbY6LBA
zE+Yu01lBmGY#vBp#@c3^5#bpQ0>9<iVY{WdVV8FJ%BT#|X0nW;1_Dty798A)o%=58
z#H5C1m+jk7yNRvW3v=W1rkePq2gPY^nzpb;eTB`<yq|&zD9Twyx7Z5>$V1$0*^^T{
z{$lZs)1|3AUGZ<n4Lsd>J8rIAKh(aX-XC_2E!4^zMVwXVY~S);fG)1J>h+5;{}k{+
z(=Cgv+tP?_bLr}vu1hs~&OZtEm!J((rp)n+gu4pv$83jbX^QxO-n>LIhGxZf>-dQf
z@~677%AtoVn8xw-;ic#~P0a-g$D2&B1<bIvzr6DCzW;H@4~6`N|KPW7=0S~#pZnp~
zQy?xX94p@7OeaEe5xM5@e|kj-ABi5wGWG6r!n=&XcU&k>12=vFU2zpro~_p89=&2x
z#=pE0-L|2i1<bqNevzr~d?Uq@&irml8ADxG*jRuYsYr5RSUcckZjI>|jq0WREpFY`
zo_r>^YkVk?Xa4t*wx-nHg+AS#x_J;jVram4)_FV6)M|f~f|Bi4BEgm8Rr|w^VxBc>
zWoM_1S53(5!C!84H0OUp`+wm_-~Mu^Vce+j<K62&jFymr?NU2uR=;2cZN<eYKGuE}
zBR<@nrf0|VE#74#SC9M`KRrJcQ%#4Fg-MHSOF523OPSq&0m73w%_S1Y>g3oif%tnr
zyriC@-pzI7U58}%{A%+tUSy&gbJyISyWv8;*zeDO>10m)JKPArGyTE7!ZAjG5Y(&P
zPBaqaUvj%XV5gxWi0YEk9nCdn=ks+xEX9G7KZ6;0gV+A@xy_`v==fr*InwN&=^utn
z@E>Q<s@!sX4DoaM4X~OTNZR2DS%q|50CxZFJRv#M8XJgUX~Z`ixqw7U?Bnx3h%3-9
z66mg7WnzlmP8zX(ud27bA^HnL<jYy^LCX<IrL5Lva<kmjTIH<48Ivw5npI-(BtNyA
zMk8C;ftb7R2#yy&10`O>n;p&6q%9BnB4T%|wJOOoETa-8!%I^=tDu=aAN6p9>ZSQA
z(<8;2^L(Dm(2W#>w?DX1D+@Ivj}@X$=`o`K@Cf75;(cDVZ_2l!jv5{=n6%ZcZ0rFK
zO|4-+R(uqns`(5`mk;L_9vUlgmF8Z$Q3AZ>NqvyF$D#u?u*eaH?24;QK$Xv#_dbd}
zZ(g03koH9TdO2MJRB%xN*rLNiTd!7cd|-YYujwB)j@J5$)2YQxAadWg%-MyXbN37X
zwk-hf<bsw{Aq5-ahud323flEsZaQTnBIkP>ndAX`rnneTUp$EFU1Q-)6g4%HvZ(5T
z#_pKoFG^AY=l#AL#RM(&v;1w=nx4ypc3|4qmnv)O@vJQwnhFM%bzhvueyk&6zkEvZ
zOMoP*_vUpAqF8C(koal2CVo=cvaGcrx@%PEg|e7j^`-n}ifGCfe9NR*OgUUh7W4%F
z*zIo>x1+YAQq%;)DU!e`0yF`RZ3E)p0OWu|Ua$jz91XtZzpT0Yqk;7wvOB*Y0MFmr
zT!#@E%BA{ZA)?@-mRxVKpvZV#jg?WU&wLsw*<qEy?H%!P?>UqFK00%ph=1yLRQLH9
ziQT2QD$U2dTrxV+;H@v@&FsIf{H+7Tj!}rE-kB&IO@51nX%Io)8TdRh@3z<L6WJ%M
z_10!*+A%-uk=%0E8dNJtwx-xKhAvMv%a23F-DjMxBYGD4)Eio$(H5NUqmI4MiSA8J
zd;YC2_hZ0aC2IU(3+maoURp;)hlr*gdljzWxbW9;sXHS|q_1v>qr)&h&PPa3J&3*H
z>$+kV5WhO75TbQ&`{1Pa^^7k!Rz*iXEKKAV7$H1ddNQe23$Kj1Tu`Y4p#DnquYe(m
zH5|DS;0&Q3^A-;CZYyG)Z!60U$b-mb`4n_Kcr=p}KbFI@&?hv>2TDPk7vtRM<oc1)
zW;|y~cKobqIyPFjvy4}uDk;UKaxzj6KQ@wgsWCx&qm?g-^#Wy{yqp!&?C2}uG4X^o
z6*hh{H0r_H8D}E>mA#$A%=~NUM>gqc_YmF9G(t}Ha3lh$KErC%+G-Q6C~Q%m>$2lE
ztMXdc<rf3qntE!!Z+gp5b8n1`lFL~08a+f>Syxz60+z~<zYIXW)Q#kwxYNL&`yrMr
zr3jT$t>x1tAkLT>Hxm{(&vH+Hc5!uf5fqvY@5vfg;X_XOGE=@<4y8|F1VoiX0qXKa
zh=U~H3vzd)O`teAkSsq>SO6uq30(KbWH^L`Op_~{Dh6yv#h(huz~w|s57cP#H!4WL
zFGWl0!_+da9rV#@oRgYx!-HlJUwC@=)w(C$O~cD^U*5$8%#n`qq%6Lb*$n@sWG@xt
zRCefN`So~4C_yF~TX(6@OYvLyIiUU>dRlZZ6-HpEbSb>#&C8s+ctLZK%-nM<_2o_e
zHdD18&|<!|S_V~$?JI(nLQ42!U7ZD@(e7wP^loJxF58;~b?O>&_hh>+NDogxdyR^K
z#i>aMZ#*{&XeYLKqI(~NK0A@u2jO*z|8}{jwIs_KmcM9g!-sg;QCsrf6^G6N`u<SD
zS^h;oB{)*_maHFnvwXc<@H27c@A_B&cxOhfA5-}aS2h*()H3k9Q(WKrw@Q+)cUQ!N
z242x=uMSl)QNIoiOSTg~A&noAV+Ic7$gN0%M*`z!-dV=m!7?GVR=H?%V}u(;SxvW8
z9d+Hrx|((ymuh%>O1Tu>o|BRe(Y-qS^^OZ&z)taQmIen?Jf!o*o5Siyll(G@GG&#7
z%h%Vto$$t7tFk9v4u%A=vSsgIFZp9tSGn}-%Aa%~#^Ms+SEi@rU#y2q`xO%K%jOJD
zc7}3T*KynN6GDjQ#E!1pdddcr{rHae#`slQePuYaLO!&W(W>pui+)L~0XPqOlsd%N
zF&ExdXuvu|l8^ry$?(|>j@%5P0{|V^(iS*x`_%Ab4IB$h6*aY+lU?cCnbB85wh+mT
zb6M(1pGsk9!rW&8KtpA$i_AmbY$b+iD4;YkypZZ(%5=~*(S6clgBR6+mv<13_8XjN
zsjtBNV$(YTl2WZ*!p$3R)6^72sisVcT$RC5K6!y^acWM9Vl?L@ZuUWbw)4aHZhZVR
z{-!Leo-4Pu46j5Nzs#wocP$QRFW8+o?(CNtSUO_4R%NEFahj;<oU7$@1QrX}HY^tq
zDBYd%4tV*?j1ekdeJ6sLdU3ovF?>pIFE$Yhz{hwF6lF*tlI!&*yWj5C%9D?Y1v3^Q
zxJ5jEe#J)`n)L%nGac~ODId=8j}<{q{lS<B=KuMj;th%0y5MJQOb>e}y)1~SP|!kx
z(m+-{0;-*mAH$$N|CQ3LsTvh&$1=e}h7aXW?BWEo9G;;4GCY_()%ae7Lsgm#S5^cE
z&H@ePJTvdF(ww6fbE1jqu-J5=!PNJT*ut4Cx!^2Z8N7s|Fa&4#^9BbVL<)<|)YFs7
z(}YeQD9DzX1~-#ZWtCuo^gJ)6^=eej*4BjG5j|pW?0yqRW%ncXy0Wxl1qgF|p3c@%
z19Zca@MR6X8gZ`)e|o8Ny<FNjA5nE1Z|18C-E_9hJ9^OKWMx(<`Db8*TcMcFAL#!J
z2!EDw|GA~00e|sf-k*tdc(-Hiek<f5>OSuP+(uIFuJ)UOeT(KmvpmSfnNFw=8y(OU
zYO$a|04abGlTw2s0YCxQKMVBtN7~BF{|zvXX+FUBBQUtXrd>?;PwJtF5jRawZ63o$
zqDZd^ewSNPDO)Xy7{@BVH-R^a;Kqi?JgQu-UKJfDnKb~Qlv+el+%rjzX%!DpGx$O@
zrYU66MO$Zm?+JIZ`jVHB5btX;k)aY{hB!K}^YGpwj%OnYTMC(XCii#=hyFtGx5{8E
z=O6`8mxsCY`pQ?;9P=TGK=WIdC~HVJgQ=J0dzx0K-1s6k{b?(QadFz8-wS%)z3x;p
zcv?1#F{I|wG}BKJm+2~%#|R!aq4TnW<urkxr&f{+*q1UAURT9VUX8>~kv&y-4H2N(
z8Lo9Z9F3lthz7i$WqG+CJTbyV5a#{4p1827j1DeuO?MXLc9!v@pa^8MG9_`KIzkXA
zpBHQMVD*j?ufuUv%PFVuSzNv<tMNP>mnBnDD}*VeI$Et4j@bJ*Kq-Q8S5r)1(?N{;
zT-2fak*%>-8}y@xHTvv9>)Eg2vKD9BmIC33_Q{KC>>6guWnSnyhpmvSj&SmHjvY~<
z9fOkShP$mnLgmFXD$K4So3`75g5Jf3LxwDS@YayGU98_~HM}ld@}qF(YbnRp{pMR_
z+9ffPPR2n0c~+MiQ5_?4#DvoJ*g`kHp**Y5u{JaOb*&Gf@1e*sms14!T0xINj^ylE
zhU>J1EH1yzXn9=O{3i_x%2nkd^wi?*!3la~8|!?@9ZYVEuMcv(rkJ%5-q5zjjh~)e
zu9sX~i3zmUxJYoQld`_snI22((48WhxBUjRdIo$43iztxLVUR%(IKtxCIfC?W%Rzq
zS#?gejc6ZbnA9&&>0DN!(-hJWl$~p#-Q-t~EvI##DTc<;<D_eFDg%(!;lR@3C@w&R
zcC&ktK&(?}A<JKO0vbY{$)pJ?gdcucNnh`43~Ug(;q)1Aep|5H8(q6NMQ*l|d4osq
zvbXRr`VDdvg;Zdp?vC;`(~1#=D(0`yNlG7^sW!5nT2$*Drepj`n93|EUbl`u^0Haw
z$;giI<Et<qd`^v&8E*C~4f?ckI`UKUx*nU02Ss%-pUUvp(ie8#>(261#GfR<$*!?Z
zzNd+H=n<~P=Bto-p=3dVmBABn%3H(tz^!hY{ckyH>#s{nOtXvalaHfnWmHjw?#hmS
z*jO9*oKJ_dyw&WYl|77@CW608(IJRqCnAzRi4mDllB&P);>(}FW+%R-H)uMpc4e++
zfmbD|Oa`Q1Onr9t7Bt@Qf4#QVUVQO08+l(nhLKTLwk49M`;rz`8n3Osr@~ztEKu1L
zB0a9f=$a%Rt%UoQj0P4S#O+@q&%btywRAt*7p#g4a@_Mf_IxS2FU<A|G77CHF?<nI
zZOHk_gWuBncEPCH@D(H?VWsk@i?SgeZk@KvfJF3Dj?L0jRRZMAe7RH)xy6&*+$0Na
zf%rlzOeW2+$%`9fxO%J(G~6evP&R+9<6MF((!L~KAv1rBg^{!{+e&vMRdqV?ZoB*4
zT1U%BSa!-v{+xnhk3QS4p^cmj$%TgCkM7Y0!R@Fxan|$;oVj$Y#P4T}YjtpOgOhw?
zmofnJ4l3Egl^EbTa;ZJ^zBfaymTyN@#B83wr|Zz_kG*b@cDt0rwjz`-zLhuRRIBT%
zmj_S-EJ)RqGD6DhP6h)C%yL!yh!x*3`_3c{X9uz{W)D}t%;zx0#huWfag~`N?$-ZW
zFKM{Zx=*9Hfo*&ilRRz#m!(grt{<u8y+*-m&N5O4eK*TOxmucoNozsQ7l2E7%&f`v
z4ro>CzlYc29?PJ-C8Ipnm}Iw&P~QBGh_8^H@0&1mb?%4vlj1*7_y7GEw)hLfbTAB0
z-T#hZdIuPW1^xrW9lqVUs(Vpc@qW0>s~$Q+cGWQ(6GI+2lsJ8YzCd(TrrdLnTydid
zfeWMOeeG&eXo)^ggvii0<&(h5hH~lI7(^`m4E2d}NuopeC~Ow<lfzPLu^sER;)S*Z
zPWOoX^%Og63WyK+LoRsEM|CdJ9g4`NWQrze#fmy_!o(%~Xq!LBiUA#BaL{&NC5MW8
z9<gQceRKxpcm&(-w#~IvB(vS=KWbcE)=c11+OF*h)#Wt%aH9@akJdQJypfeDo4?m_
zt$_h}@ecqeE^M|O@XAz8PrcjD{T;w#VXGAms)wazWc5b<9Cw~&$t|Z`ZD+~2pZSi>
znFn1RxnDJDHkLblnH+${UIb?Rj=7o5>>nkai8k$oxcG(aVY9MHqheR|C)^ZP^=PM!
znDlRz<O{o!&N5`?hiT(%lLObKD<mZI05&gZ|0R2dR5jz4&9*XpcQX<R9VZ#0`(*Ud
z3QA&l;C=gs;9H6rqPuqmJB@tFFoFLJ18deRoEu~r*)ZdycCKnH)VwZVbJkT%ww>53
z?A0%N^i^EFdk(59BI~YfC3p>7j*ZM5od*6fo)NE%TnERPxidTI*iewDouKsOiX)Z3
zNU1i&PwqoYUzXD7>SBy~&bOsEG@p`SoJoYyX6W~^b&!m)vN$D`=BJEuI`6rT2=JQ1
zPk-Ckn3Q;RTtx&MKph4+tnhuis`y=}ujb-_1NrV6SmffF=h{X*4qQ%1ALGB7D}cVH
z36h<x!>DQGu5nqPo?7w1a_%;uqk)igGcH+>il<IrRr(>Qc*8#|>gxFIiNb2+to2$)
ztLcPJ(n|!MpLJ>o_LwrfZ_MFTYTTnd(mB9zh|)HwRcfZabV_EI$JcAGM=`H&_Ne*S
zI<`k1xQO!{;}gVvcRYq)k&;kUP><58&>xe6C<C=WYlqn|r@at2jV?$dBFYY+uqp}(
z;frpyMIyw4sV+V%yb!U<U|7`YLHd|v$Sf%}I4G~g{>rY3rz?LVebj5B*Lfq=#Z{f1
zFTo?)>xAHb=8m_>QoMOSJ>I3Yv60P4=QJ`kJ7PZ;rK0DM$^wr)dDoni`7`UcgBYeN
z#tQ>UTD0<UWpyrY8oRgXJ0h7eE`0JmFYHV^n<Qmvh-v6Bh2&Wx-m`H@8ccJT49AJq
zz2d0#w^+)Oo>JyaPCb)R6zxvek>Ld_#oTa*H(Q;jH{P;$k3u^dN*|4ksH-SY-YEGJ
z?|P8Wt5dz&sJFmwJuHQA2F}<bJ9AwsjowX42j+SJk+9fj{{O7cKb^BbI1UldJ;Ye$
z`qMl^G1SE}Llf2Z05ZH<1F)<)qF3sU9$k0^bRF!e>rU`-4XhwU-OJj)*1QB_XvbX2
zq)39AhMB<4IUY-IIIXJ5yxjAYD%`o}RmQi_&5WG_TiZ4Tt`t&i(n}|nAP$^xOncBh
zwn(+QCMC+^D5gEl8W0kka-orm$8Oxqgf63o<+hu6L|aj+tuBp#_+3zJcJo((`S}Uw
z1&wCi`z?8F?JOP-eq0y%Fzm9p8jI|(Y~fXlqzY+_sqc2NRC)G_*Q1}mBK4O-9@Uv<
zjzDkBQW!@C+Hhk7<4@RasHls_Bp0%~<8~CW`Kr;`ewb9Yz*D|R0_0ZzMfPiAZ(WYh
z090*{POC#sfSb;UM^tZ=c({YExg~PW??-b1bSv#zu}TK62Im~cJ`g~!Dcw}q6{5+L
zZF`%MVOwqjQdY-K8S6FLEoWmw6XSG?x}v&($G}8i49lm8tn0_J0LFvLsf4+?pTUt6
zod@t^9%$dcLmCOCPkXP=HaaL0U1^nj`@ZZXPjNUWv20<gW@<-Zf>A{#H4H^NHteg;
zx<JMs;#$A5r+q}c$lz)HFtvI#{YmcA5LG<+?z9{E3nK&4R3u6!%AaQuUFiXoUaxhu
zr`OtT)?XS3;d^D$x_gv=T|LIgSbpyoRlD!1jHXc~g~NO*Vu-*v+$dk3g8ePz(-J|L
zJUF^%8>Xe+%=YMWM$_?F(!K~J|0sm;r(=U~CE~sjE2n14sBOp%>aI}y5R3!b;C|f{
zNj_Mfjp6pv3=j{uU(HZDBsDAKdNy@ICaG^{s7cH@k|mh-K!<b5>rmr#V0?f4enw$D
zOXJc`v~>urCS%ZoSyBvS&LkmuhM0W<x^BN|Ee-HFnnOZde^wo4t?RHae&rqcTJh{#
za5oKm`31*I47UtpD<T7~c#_Huj<cHT5Hk&YaYod|al^c^e98-xS3k`ccHFpE-S7@o
zxc5aRAoP)*OSh<T3&R`f9-B#b<ETz3l=~Y+EC{5rgXPv!*$w3sHR&7(i#cWhc=fH>
zQwqPx_6FqbX<?tw#NYpXap30;Nukl#TL~4e%l19nwHRKG$&KY_(O;2lNyuN4mVW8z
zFp+}8J)0#^p$1V>yG3H}iu>$-=l{B#mTR`5w8pq^qafTzs6jk8$6}}<wUtRytAE11
zkNy<DKtZI1?^V0!$eWyD#uPD|WPq*wNLpUV{n2=X{hnXhP;JEEV8uHvtgq~`Rux6|
zf_izH$|O22bSmfGvy#~x-FK7oi=p&M7TBW0Cece?m#GrSP)xi(zcKMD9VFscor|vP
zTQTu+ZI4WO&q&g|=K5E<GZVi#0ut$l%qY%KE80bCuf1IUP{oUpmV_55moF8)?1Fi&
z{|u1AwVADZl9T+hfes)_foNbA@y`icOX3odYJD{v<P@Lj{RbZiX$Fj5@pJArI>tLZ
zpinBrcIq1qe)ge_Q&J~C{thK!)X6}M?nE`i{>FnW`Ry!KlvMuc9LZ$4O<nRH+<B%P
z5S%U6xXR>GJV&eCVD~AHr{A|4fjHW@w1~7rU%GBM^efzP6e{-JM^Vb_$N^K|!^NlY
zjO!uS6(0n=-ZrR>NJTUk>qwGIA7gl%G8chLuCUYo<NI~9p3+B1iiv?t9fOkzu@3#m
zMemtd`tNWBy=X2Eb&0Zl>GLzLo}{4-BLva4rKk2F&vkARN*dft?C{yN{R!tB$paZ+
zk!2~756;Bk2uBgYV#y+a&bg!ma3A!N>P9N*v1Im77aR>fgajN3q-rNO_Cr}DH>E&=
zJK!&UTWT${{81rN&W-8Uot3QMLcmF7n?WOQsTSN=0_af~KfEn|z)T=HYATjNe6Bm#
zud=w1@Z-`q#3_*_AmJinFSOjj(X~6P-zi@}WiRU$!Ivqdrpxi}ouuICEL&&z{Nw6F
zA6HpM%qY`NO|I(THf2fhrKpVovE^_8sgIP*$BUcsg|ZA9YO8T$FM?R6#b%KLrR2#>
zqhEAMjwn0d=HQ1f>7dy>)*9n<$Tst@z3TE&yL8M%`*u9rNk}acvrP{1*)lwJ8kS*&
zO#^Pq?Gf+ulB$+*Np;T~v<YyDq$h(T-(426VgAoHARdY&XPB%bLO=(sWbpzL5ZVR?
zZ@6q@IQh5_U%xY6xRrY0HUIkZh#{Xbb-@0$(p!?ZnV9u{!A<DNVMT@3+eM}3>8TPP
zM16_x-d9i-X8{N-<X0gS*J_6WF5jN4dyH@m3{o}Sgpvk3*N()YuV05cnR<#QI`*`q
zzj3AiLK3o((M(b7{n<bVsU+(L5FJuTjiMTED_rd`GM_t7VF%$y>aI9Rm1)We9J*Y%
z09j;CS-0195~lJ`b6cTi=Zh!(IGvAl&GclhoV<yt0(){03QL;6W>93#hF&j}BJpY@
zQeDv<R;>GlM%!TwGOlyNWTdR+3?`%yv?|du445)y9HfQH#QXtxitxhfr2KLb{myD~
zv+1l*k`q3KQS{jyA2(M+Wmcv5l=#ePtny4h7N_<5BN<$AP+`IO5^)qa({ySWE>pTd
zH4-DydH8wWR1}A4)#k`fl|EGBy5BC7x?@3?pofaohtu8yz0<EMxAsGzIAe}tbA05e
zD((5#Dq(7sd=bk$-vgXZTvWm<iF>w(?@PTOH{&3aKR>k9LA7NjEI0U%b5>%&_FnuG
z^L*FZvWkt`K~yMA_8*-Mo01)@YX>JKdgR7yQuvtOt1cuW5g<@{q?h|!$MMu}(ijsT
zFX})@l^3i{R>nk4PaDNjy@MzR22CQPy<Zr4cWt$beC$qip<mVpDFvEE2*qpJc_t*J
z>?7oV&s`!goHf8z=i?a_Z{}qWYlPU_xYXC4o?tDzIIX@Q(6F0?N#qfWd7wB?K@MCy
zzovv+VSEmkuk%l!t|YSp<Z%IzpRvh?z2LT%$iN7J=2L;B(l(gxe^MHtkT1U9$g%SZ
zi=0{mGcr>2ZolEjVY?8*+<T>kTcJ)N-`b<0)wubr{4YR-Y!606k3FA{L=zBSF8BkJ
zXuQ{&Uuj`OFSK(NE7#|xCDxAc$J8MJg6@)jovhg6cC!5qLi-S#l{cr2+?y`F7c<*l
zyasVVP-By+d?&k~tKD4k+seG2uH>du4IkoFoh6lvCAluRC@OLDQPYNxC+9K&B0{%$
zP4y%QWOi$dz7B#8#u^DFY)m!iWELIX(jUh(rm(6*yl9JYh7$$VpLE}dR)akh&4}R@
ztXm(8R6s{KPSPuS+AGISBJUl@Xv|h_(;Vi}R3<iNSgpzwFhX4`k-1LdBxk-dVqr+j
zSY#oh(qReY)3`U=4yh1JjC9awFsi-VP`AA!9^6rw5$FwbEs*K1$8f<(uOZzT4p)!v
z>R2@mi?(LLd8i3I=+dkr3fJ0AKqh*p#Lc^Eh~3h~zq(cyxx_A?pg!3dxZhSzjVvQ7
zV&+%2U#}E#)@5N_&d3==DqVyXX-%19TYagqHXw!0$rzmZ6KnXcAibEYT`I3YyN_dL
z4AH<izbsyH)xl947LkeGJ6m431e8EBzIL!O**`;Qwpa)yXIZ}@CrP}DmR{q0TWJY+
z$~jrMnX)7)VIj7f{*>&Yvll79Nu8A5bwwJGa%OI0jq5p&65%~R+U#)R{kbl@s|GK@
zTw8y#(`{qDI;lKI+_+=f_I=I57C%R8CTd|(;e?fjYbm!|+v`yKM_g`)eGa_fWJ~pT
zM<>#GcWl2DN-Co0t29hp8Br|1TUdIRdR(E!a*qzn+GjFjNW0p+YUZnyf7Mb_f8n%9
zZOiXMd9l!n1dmTgXrL1)uqzIWmq`0WT>$o%f$zwAyaN;wO#Wt4%o3+%2*`gJB`QtI
ze0wh7!9he2It#v9O}Tzbs8=nh5puq@;3;Qtmr8kWq6kOkLvBCH<+&LL?#z5AgO1la
zL9phlLqA{5&y$Sh*(Kw$$Txsg|NVUFJ@xmxF&{6PwQo6mFS}nW-{m>^yLp?;=vpA;
z);eUSuqkb>=QQu>9W$`Al{$3enaZ~5cxd^1Xq<uHp{f(PWY)6$eEjlO4cJ@px+Y>^
z%&@JMJQrw0h-Xnb6=OFzrhi6`&2q8mI?P;{KfxH*NHeP~rjwXxS4gIrg$zzHfChfo
zGU}4f{lbU8#3z(H_bL}feuYo8jD#m7)z!;Q$-q`qigLXgF0N}AtJz}`5dj-p{{RmD
zg*1>se&h9;^MY)W3kY>l5qG6sn>b*!p(DjjmED}QA{lfNN-AAuHzv?XrA-a0=hG{q
zHkcglv)k&dPpg)t(!Spfjvz{zp7GQTb7YIZ^{g_#QJSYo<Enq#=~xvNoUEosM1JIB
zpR(OupY@d~2IK7*HlsAv>NtIlT~C^41J>ZQBBj-Bn2YFsnT`L)&-+Yi=Xu;Np9u8m
zk1RCXv{+O~t|)hWgj-v#!PU2it@SLjhR<#Ccat-V&9N?@N{vdm=~H!qh>_yJx!O~~
zpzYQA*eS)Ivp!N9SC&VVLzkoduBk?X!r9&U^w6|uVk)!MTT3dMoXU_{Ee-j=hqHu?
zvxGQt<uHWL|AZ2lg~}%t&_)rzWd@`+C%bpWITb;a7WNZx68D*CZK+B)H_ez$@wOq+
z3GWv6KSEn^#C0y-IQNw>=|m%HbJ&={8|p(RnlzN;9H(Ks6h)rhqT5j3)~YUA;vjXB
z`zrq)>9cO~>pHR%%#$9nN)^85{Dz*nGkO-@^vQU`sqqO(WBqNqn3POzqtO1xRa_qC
z5IM#h86Cs{0Ai5UYnOnXvskFET`t#J%6&PDRz97+oOWZI$>*72|E0tja(k}f&h34+
z_jdFtahrlIc}2op*`K9Jh>Wwvfd>`<9I`s7&XI7LlJrA~EV0O<?3+pA=x$D>hZ=4Q
zqbnwXl#UN;oRs_C4J!DEBNA?vvd5LQ>bNH;#*vs{z}ck<r=d;W{OdMejr8Fu-R6!0
zIg(s?3HW`|t~sh%qbVbD$(*leCik2Uv}8By{m9`;J@Y!M2jXM{7_2c15$gTdwq?Mu
zcOJz}NHg}h949zeW)ln`XJpQ<Cd5rS-bSLMV_?>>r2-wTUW=KIYe>wHu;v>Tz8Ex1
z>4End(Uh*5_lDoUR~AfXo@MZ2mKqSzKvt!I!>bw#21Y1vM2<bn4uRn}Hk%nkVedEE
z((FCFTA0@PykPa=Y`N*j(UmA9^Iqo0O@@v`y=0%;<Dpgg@88FczR|-z5Q&oK`<lQZ
zyU_8%W9p#u(EAp7DmKCP{jj81d;$yVeVWx)<s?j<N;BDn4&48Qp6Urr3<f#U81q%6
zIV#!~Kr<p1@hpnmOXIJc*HsgRUuB`)N>BEs#`W%O@hEif=LxBxxTsNM>EpyPTf$r0
z{OJb$<u?C&dh1^g;3MU8fO5(#xRVysbd+w>E-On5qLvpaYhc|-g4a-QkwBPeBiV)k
zaHzUN6h9n-2Ur4&!3_?<=NozbHxSh*czHXOKRSeMjn6$=8nH@1f^Gp02%waae+LFT
zV}?dr!F9x-wkcP(%h2L5z|+Q9DsV-DODXZnZo!P0YkyxPMf9|T+fcHe!z&>T_T@#_
zC8XI8PI*FAS;M<yvr+0KJF6YlIfEP61~}0;+M3Eb@}V6swA7WpQb{9<D{279#+-M>
z1FedS*IxrkWgAQe94Nb{A=M42u2|YfS-*ru?$8kKzyRa{08Hjk0N|(nw}HV(2@eks
zpoD#>I#6H2!NGl_q?%>~(z4!TX?2CsinA=^-igOsqInE^;odwZ8r9MatMolyZ22N2
zt8}}^Rn1G_&2|K5a@k@<=nudHtdupum6Y(5@GjT@>>u%P4Pg#$I;!38F>>q6cZ$b(
zCGb^grd5k=(+;-UYt6UXz)vlIU&}w91`Q>&3z+m~!`+xRXM)6hU1*|G3k!k@i1;S_
zk{IEl=mo~&D7Q`2=z$<c_H-z6W!6z1VmVbfPYgX3SQ<kEn-Ls^ii!zW=bhEgL>rp5
za)(+7k4oLmH(44*;`oVnQF&My#cUlFY0GRY9j>a>(!o1#nr5YmbI2lAHXia~$eApG
zv0Q=!1b_*_cc6rTpBDeVKr!@=@l`mg4`ZCZmID%;o?p6?DM>&LSh~tP>SUpT^^jAN
zOEd&U4Ne<+?d14G8EQs!Y})KL6|P!SI_*+wGtEkky+QzyFf#aQj0%q`RXtoi5{_xy
zH8|>BB3D*990C9~Q~~@MPlfi2ax`@|jsjP(mI@bMbW+g(nj{{ej1dI@4+eN5!PWmg
zF$@K-1%o79wL8kDY+2oR@^y<Fua00F;Dk)7*doglfH_Sq3_!7hd(?XW1vjL!6_gS#
zkIfuim<-gGGDj5OXC%UbW0J;&J4XK0;EaY~^QCdr^(Drq<a{tVw1ijO7#smXWn+pd
zq-t;w1(6^xqCo#8g#7o~;QhaTC6NOKFR8c{MlZ|Nt?LR|a#eYN6+s8Eanh=d(&8H$
zm;=%%&e(-~WO8zS@Sz8<H{k+#$oM1%2%k((rJztpaZqxNW%9Wtq%b$IGPQR+#Zs=)
zY(;53MqRC%YCvgeMG71aH&)>^P+Wwsh(krE#i^PTJ=#PaomY$uTU^+m7)p38JaKWD
z)D>*yfz<CdD$#}F6I~kvdG#<LwS|sTR(Q&zn7tB({39h=WcWzUE*c%E{OTIYs^AyW
z#i&_I%kqfDbXb&Wh{cp5h?3yN63^qxl<LBmm_lmC%3^7usOYv6G&xS`iRd=W;ShLX
z<X<7+l8ic2hINxR&=Kq1nW~10`yBua?wDDNFd$4SpnXMdNZi(LB9|IXIfu_CD`6*B
z8wVsoftpN>Rs5zN@|mLi??((<SrU^oa4k><QDy*33+XXixPhJ;o++f)v;#{TAYBNm
zi~>YC7m9ILc8papCt7563wj>sFUXz&O*%c4tF5sh&E{wb{!qipbwjddw^mLhZayKV
z?2dAgamHvFPRcXkG^Hi72X%^YR7Tm!WY5oERK6<vPSl3^NvA7AP(d=d(iN1a)Ff%*
zp4dPsaZn95Uem^9i=}TpKiqk^0iTBBP|}@NDZ{!jO5Gs%N%R!bjX&mBw1r4nD)X2U
zuo9gx$dqpS(YO>w3Y0`>u%cZ!aqSsM_B%l?)a*ek9BemQ3H>nsF|3$q-+iWTMPvZ^
z=R)RU)YnYemYUk*dHB-W$}wn)cwWrW(O~IFMldRAHxZ%$Cg9v1&fFEXYztoXfaQ+M
z%UoF+Q48O<H?S~TQB<{E&lL>qKmsUqp?ax)3t_y|iix?S9sl62?o(NzUi4Mqlq3t1
z<+;Oy4E@`d{oP9G-hk65jX{kCQ<kH{&T97iS&@8uohhRL(J;JX!C`HS2U=^mB&zC=
zk8ygsLYIWg<JU>MU5~T1XQ|rX58@#|thH)%{6F1&XHb)G*JncSy+aTJq4%m3jnb?1
z4pKr>2oO3V|InNC-V|v<0Hr511qr<gNDaM87o;lsY<72MpV<%Z&inP*Z`Y@L&UMe3
zGv_+La$1z#_MSUaxOoMb%KGVtrW-RPIX_+w7Ro~@pO-$OpfgIz0Uy6BEBcUsnPp9O
zl_*lsMz@?KkZAaLL0wS@c?9N)L#|kWg<U&$Jccgq;Dl=wO<rZVc;1uu+D|b-w6?Lp
zUoRKYL87VGYM;m+YbRgx|88Ui#h8k8JG5(q0br;UQxFXo0kWnDErUhC=w)L@_`$o{
zQH^9isl3b#mm{V@*;D}4cTjV6IxAC|p^BP}`yvU|1x!rYLMM2*6p%<`44``uBt_=S
zK^XLZB_zI7)PEHj^fjXwIbb3y+tEih#Yi$_;|WH)!deMT19UV!rH@3;q~STlE1HAt
zb_$EGi)d>jT!L3wV*W<M`9g>h=V7zQKG18YHkdkwf?y?p!5m0HMh-PGs?vv+0U$IV
zfdD$3W9cJN533;EsH0668!-TcxJc@GRH<pW-(&Sm-X7{D&Lh09VR%B~$)C$@yWsHC
zxzp5h@0DM;)47wC(GH)_Ap8*;(S(MZr><9nEh+wItlG%Np=T+)vjmr8CRhJERKOxe
z!ks6IXB$g2w$8vK0UH5BcykD@AM=nUM5?bC(bEGq<pATj66gC$_>|R<Hnkh38x_FE
zp<SJA_@wA=T^LE)A_U}AG%&X`ATqU&s1|Ev;5D}*?<qya3V{r(eI@O*;N#WQ|E6I5
zzgv_4qMi~(@YU8Xt?o1UmD?qH*-R<8{)x>P$*1|5B3Es=Vq)~6qHxWk1@B$<$wBN{
zAASZRvygGLBt$O=D_bv3{C1y<{<0tgzM7s~t`$-?)&l30WLQVQy2itYYDGeYD0eR+
zE^`H&_~u=~G0LW6Ec0FSCAtQvkH$I`hy<Z;@n+@(bDp$TO9P)<oAWO6P{p<}Ub5C~
zPxz#8KGLw9QBkpXkPo8HJQ=~1i^{nOke^P=ZI1Nj6dc?ESy&lUFC&)K#CN&D2k*h2
zrfQf;dVBvHk)F2G;;C@AYfCUoAM15=`!^mjW!I9%M?Tt;ekFBeL#ja=cmWztZ=j^t
z6ev|plN6DAt@}M}dGhE?D9;G3>G}7v?fwJY7IT)phICN#izHkqzZS%(!PmRIYA45U
zt>4^@UXNK()2Lq_81RFnF;I&UHRR3n!kx=m+2YU8!t9k1HaL|w)KA=PS?pb$_B`mV
z$kO+>7#MDa<#$LZDV)`y&rOmehj<ArET+aq**@)px{EER7H|liXTb%%nt$16U`v;t
z!MY(Z{v6dk$Zs_*4%vBi2YZ(V_s@9D=bp&bPlyciBAHOYC}|!?c1zgf1#qFwwcxeJ
z6P<-!j0vN3WBdG@#Mg*ls;qjed9TMD`^5q%v|u{z+-z0c*H&>jM{vDF&T6_Sqt?}1
z?Nm!_nE9LOZ}pQZv6G%IMkCUpnM=*3O&T*!_wz(?sGt9M>FO}GeK{xf7!ZeJBnmxC
zNoeYU`77YUl{t78xzqNMUXri*)cb<bQQD<E;@))r577>LmJqu}Daje+YcxEhCBu)&
zn%BCiNxBwoHH=I&$9qOn4a`4mI38zqoM*RiV1p;e!4Fy*CzP5ae|t3Bv(xfSG_=;;
z#M735`1#6nOLEi1TC!q|yxDpVx&k(c7?dgrzIfQ0hsAyKo>8g1jqq^0X#$sJoBvdm
zTr*Y7*>PE>6frzQcF)=T?r_%d=hv#KYi98_mCb#gBQ3<F3$UNa1R23V+@7?&dv(v?
zORVO%aKkOih~>CXQLwqJ_)tb+>xB!8d`kR}Z=Z6E{aSvWk-TkYY%!dzj%B3laoScu
z%a6wS8)k{A+FC2wzipBC_mgcASNs*Kppo2h{GeT-VS@d0Ny6uo+e`j+TcT-hU6o`<
zvxN-Vf-n2PAvqGO=d=~?4VGL#-`lz?o{?j}5M^Polpl0+EG~bwV39>m$Ef&E&{}RQ
z8u9|?mGFe?#BNMSPNxZ`lvEtc8;2aI54vcz<<QTavOVOU#Q3wQ&Z^FX*78(iU!Q9}
zm0DnUU8Dm0^~Dq<AHL*vf+(!ArVJP!erVi?6SE)z@8|cxa@F>wrmutttUf~4CK0c?
zK6vVEVz!u-`)<i(kFi!~>P-;usJqCY2H=dSowu3fZ&nUvN$dKD=s>*lgk<uQ)30`(
zl6by+v_3j_G{6E`K^3Wib{JvB4(H?TcU6S*;wK*DdTJ0{&C}j4UvV=H2)*Ngfks$j
zqZLXdpNxUH>O*>o{6W*NPQP5KqOL6ja{<$r^~|PoH>ECV@1b8yxxTk`-ZMUZYxG8^
zC!9_PQy-J2!+1%JQ-K|E2E)iQ9^!uiCM16bL_ErV_B4ZqYfhipCX>BQ*Tz5;nh1iD
zijEM8z1A;_(Gee0UsBaEWg~RLOLGVwHApncKk2g?yKt$s6(uV?>yS3>YV>p{4EGNV
zedf;Azg=r7%^D8b@Ut_`fPZ5PR$c$2hUa^0*jC{>^4CcS<_<`$y7_;ZG{QmJZgrq4
z)BS_-x6UPtZcBO0(`_EFo44Jv5}t&1ZC;mmau}Su1q_>mt@=^NP^<OAd(}0w1n!#<
zqoSm=JU0sFx+0QE9AQjqQg{CnzdiRsM$dcw9yKIa9N>W#qR0gh3#7=8w(aJz`J$jZ
z6sA95W9<~@#q}=Gc~^Qd)#N%b3+|~byyW-O=TX1y1egOSa;jl6*ixLL(Yq8K94C^p
zT=A}Fw0M77J~ANbH;q+Tb>Tm5ZYurm26nl#iLN%+%vW8%56wN|^28u@dVwuXKN;#v
z4CH0Wg5C!Ug2z%zS~XzS?GFH_OX2fx3gsmox5X0EeBzCzZc_TLZcSwQdq%c6o4Kg$
zp8NZle>zrPtW!Tmt=u=y$UXTfwpbA|I~ArD+!130ZMiA0bkSN&gW#;17TSGoLTh(^
z=XZtru7y<HHTbRSWj%-2Y&A}(b-Z)&R$kexSaBP#Z`WZi=FwnLf3F`$N0>FSBE<1@
zW<srSfVE{+?M?f!K;4_Q!2!0iKO?^R<K<U1pW&wh%PcDVV;WIktx#I;{Q@4O=)WHP
zq>rc|cuv^0JCP2Oa<sEeY5KGEs-vjYpD%9QPmRmpM;})EnGe6w%9RSQJMfxoLW2f6
zd4d-BGm;#St$x-OfCx0D0Fm)Fg7e?1KFI$IP&>*o@~Oh@;yR^Kf-t99Y=2a>yVB2E
zab=uSuKt$YuQ4k=t())t$Z<1ldtJg+!y|C!6d~*8^ni#ec^~{O=O@y)zMeSY*?ZQa
zOJI)Q39w&gMA{Yn!eL|h+DxOg^@9F9Wu;$5JZa8XB3dEG?L@xUrC%|{PEmSV>RzYf
zMYQLZ*1Uq`mtRqz;Arcp-wlFrTe!sXAewqc_8(Cg1J2XGl9ba!?L*OF%IIifQ3B2l
z@-dS^yni!e%uG{tsDk*-P0KH8)l}R4l*5cFTyv<x#7V37onl7Sxqys#+buTWF4b~y
z>Z^prm?tIG8ERj9bk#UR%=!{KVadZLM2UCA=}^1qjrF?{9A+h6Q4Un!6F@!0HiUc_
zYnRD6jcD;&EJq*OT4?0llSPbX$_pN=WV6s8hH?k$7QIuy6QYS=wP72$oS{+w_NRFI
zEUi^cFYF@SMpVEGZ6VjLqv$DRZWNc<a&q(iMPgD~#mgmCf)7*7Tp}2HOybo7RoG0`
z2mW)oUT^*RfO<}2Q-tx<1-y)9Mxz;4vBgymTSjXPRenwftudrGvD(`93&A)Zk1Tz%
zV{q2k9Q&TG1M1B-qN(w@SPyu)JiKILyeRNfbFy=n6-_sc$Vc5J^#}bKSsdN1wY7Ys
z^@<{;+}(~RWKSdp(kC{ceH^Js1Q!-kOJFCC3DcJQSWVTlM`ESuI+1f1;yw>*$-z0<
zTfn$A(N-elAAL7RQ@36_QD&?^<~_yk)UzV$avV(`itIi&6^7b`m&|a!^U`_=dm(cC
z*+-rco0`IzaoYKk^_ilAP>Y)>Y!!NSH)j+RAM7fHiL7lqc+FW5vyEic`>PN7+8Nn%
zP-J7t9PjQiX>~vDhl8nJyCm3#W!D&)fwq=Wsd_j{FXLVJdy2V|PY3ZtU(><0)p-cZ
ztpKlOpI^Bdb|+V`#xRT-FPAExe)fr<r8Rm<ivBl!{l9#CQ~Z0#^dEMNbIAW~ve)Ey
z#W2sU5R+Xr!~q!%+SSWVMlYbwgSPq@WHP1}t5)5LyqE8rdxa>HFSHWPilkXcY|}}7
zrHOxOvi=I$l!`vT<2>{cKGghMG@-~zu6bA~{=#LzD`9%Lg-gz!K}l_hC<F71(4UlW
zw3<k}%!v^gb@Q<zSvxh%O=beB2i6HS9I`<)`nFd33d2_UQwoMfpXJ{Y9b+w3giQ?)
z)x%RXs4BjIUdH^9OTv1ngZpjf()Xi%@SKAFH#ygN=J-ojXBW;%rfm=9peGv%>0RP+
zGZuG$mBQ%vOZv!TtOt5^8bxAcgCm~O#TjF&{Mo2vO#!<XEs)e!$DgeB;}yJNh9~fJ
z_VA~x;1|QWoSzofSVqVTxypDdu=){d{NJMK%u3B%SQ)qmd7;qBaG(78l7BABJl9k_
ze<|(dd{MIdvdB(3$vcjlW;wDwJcT@7VZpuNkjfO!=vx$0$daCBKnhnyQIrHs8#`(<
z-DiFAK6B3I;`{b6%ZSwEo{nsoOO+&_3$8P0e}<Hm!c&!?0vnGYziywP5Gv3W&Q|~8
z>_xx>i_I!6k=TDYF|Mfj_UvtTXWTwEIHg@^ha8s#o7@}_#zyJS_((%|3JfI+<;~(W
zWa<R;$w(LyvU}9uYEEi&7gfKa@jQ1ic~EYO&Os_VXm$4K&4c!sIb+Hz%4DlFO;n3I
z?_W8~j$l6sGw2`LJ!P^m|D7|^P|@Kk#7g;o%`9K3uQxXWko_j(DWo#*hy2yjTvD3^
zc_3r+aq?dh6B9GL{(+9?X89VWsDmnz$X*#TARcKw$qfop)E<bDmhkJTd+#44jQy3;
zU!Zx@=6zFRb=!$%w`v!m_NrP^dhpBdH1#YNO@6;N09#Y=C6Orsm0yfPf^300POq=0
zZ7Yw;OZ8~n=K2@<06TiYW|p<0Zn{&=JIt7SF2(~v{L$`}>H-o&l{}}F8WBH*q`{;A
zj4Vg@5Fy7V-rR4?Tw!BhQqoktsCbg+08sg<AkO_Mpt1{R6w(`d*|{p0q$;D`S8*9Q
zw)pTMQ@FZDz1y$>NadTsA^h}#hs-}^aoqa_-(uX1290mMwY7iGt|T)ifH=4cpy*$D
z9lYDYcloKxQy#`58?$)*O5zdm%3Lbq)5$C7p+D<$MkA%Po*~=Zt$bCv4FgpheO#6b
z?!&qSL#VK@uMXDEz<pxhteilnEH;kq3cg&zY%lyx90>R7mv}*0j(%4pzr8>?UjH6>
z$t*6OR{`ODt7EXx)`7murevo~iK3?X!Kq0vH*{=q^sB-vGDm}En+O6dEGWP4d`MZr
z_tc)xQZ)4Csirln{<Vd4OVb$Wqp%|FYf0H<BNO6KMS`6q2H$Z>g_`Oo!)RD}8TX<z
zO+xk#2jN9P2CU1xj>`0%-aJj8jzh??efVZ2GvbBLZ-tLh6t4_#^Hjo8bQ=aTwj7Sv
z(!OfQlg}!Z0{kcl`F(0bvB3rdZ+13$EZpf1wdRk!u*Kod30guSx5<eiZITW=ksLr=
zawW}T2P$xck`^I^@9MxEB@=9U1e?Qp<%%3mT!BhuZGqSW0p6pYR##Q7Kdcl(<y?q|
z-dsUg`;HGwIp)U30nnJ`6+|TA5+nNJuCJmHhY3?S9{u*qr4XB^t5nbqVVmIoVz~df
zcr#aorRaA9K`1Bke1j~z6OnI4<#AH*vgWlD!PGNVzQSkotJMxR9m%7HQYWcW3Q+jO
ze4nXB%8!zj>Z1~}yW=8z*1?yyqgonUov$2E=+f(Fvn;%iq6ZE6`La^9Q+qDmX$9{|
zvVAcTHk*H)0b9KDkI9nptm~iVw=S!xK&0<ye;0a8P$0urrpy+J6`~bc>coVJ2|{F5
zkCWAmY=Y++9$@qsX6SnCEb8$NQ815w^VF1dexXdn*L*{b8HGU&<3JYoqlu_wk;Dl{
z9uTeUuIO*p=Xs@_l82ciMTNr_e{+9weF_VhrfNE^v#p%Ax-ITr6r>_Chw4-$x467v
zZY&mCn0|#IKpK}S(>xx0K>2Kq`B2v5?;>XFtj%dATmn3BPN|{Pl2+J6-0a;P==-p(
zpe|GQUx1i`Qy00+fMt}(PTWBSEJz%9<7?1ZK{d-)rSBJh$X7vgGGg`jtf^`%;4p-R
znCM79@%f-~nt@KmqRT?t*oIZoG<8y~XzXM2rr`qT;!OfTK=c;w8Q?(KULmOI(z7l_
zmd16mU6jY=qS0hZck`|``J8Kcxx1T{sp*P(7Ti*&PSqzGp%FbVUMO%38Cn=FQ~!b-
z77R)iodO+C`BkfDjLx?uN;}AXBT-e|N#968tS<cB9S$bnS^t=P0Et$Q?9>T;Kw9C+
zGn^Of3gTGST_B=>5?lp%Z*xAAxj<bcxl!T4HH9k0F28{{<1fa?Y$d#REf?w1Fk{_&
zbmok_TrwD-`YJ!FTdTz3V>nuU7f>prT)8T}Oy&`+gwft@Zis98aaZ!I8V&<{k4|5-
z*2|v=_BG@vu$<Y|NL>0Onek#p5kVowIzYhPhL>Ddn0Jdb;yyY})q7x{)ogmKiY3{2
zTaVq6l?I@qyM8k)G~T>pQ~iA`?10{-LV}p?0d&(yJU6>B#@4QRruwWuRdW~P^YA*^
zehiSFc?6TvADJfWFVXB5ojZl4tgJBSStWH#fehx>d7SNQS>NY_h%N#h+Hq2D_7ZM&
zF7(CWLqD0{dUs^{_&^l1;BKZCf7Tf4T=w8D6)t5PGhY32TjsKki-MKBL(Y*Capjlv
zHR9mKr58h!3>Y_g*?Nonip)E4ENaC*g1J%}2`4V1UBl<OHs>=Rz{Z8~1NCM6_k_i^
zxoZ&uYG7O)kreyJ%O%q2{j^59n8=X18RYkFi-p&SmFUO>NA07=EZfEeQkXtRbc_{q
zyY^%U!Iy*zUmm)CIs)-x=P1EH<6BlRF;YnTv*quGpI<{d)&(tdo{6K9&P7^C-tnzz
zI`f!{2?s_t#wS!cmGU2&D_A@J((3@LRNK0zBqW9wd*70#H;T~5z!+@&M&EG_ALdpw
zlhh6KzYalbOea<a$9O#<4U~Bq$tYtz&$?e*?*6QhbK?Ho-ou)w;??u9tvStkVRqSl
zJR`$)x%F4m`_>Q7$SfK%&(5Cm+qH{U&!|^t;?opHPbejeX~!tBhdq+wvVCmvXA-ew
z+)#HDm92mG(Ysyy=)Dw5hxO7VIJPxyH{1LbGKL{L%8@Z{J<Q2*fxXdfj7g?+sy3*n
z=C7*ZwRU3QBsToEqr}DUW`l$9^6P(CYx&=wypk+rhkbDbtThDTc`x=@tpu-o7okY+
z10&{xH0mq*wFoW+?uRe)=#O?zQUCx&kJGXHKkgX?OFE|F;;EyXkwzR!@pw>G=tc|0
zdAf-CxloQ(4n6IAPG|(YIc>WO@d6psZ}Xu+(X*Q>gDF6$a7>SjH-;2I`%^0&8AL|r
z(~#PS1%&z%RPiK`t0BF6pz2fr6)h*~7*b>ql8lyq14{wairc_kLW8-h^L!TofJfsS
z*synKRm?GliGYk`*<cR?=TV_XTNz+(*J&o&b^+_awqB?ih2tUF5m@CufEKvXw$--o
z5FYxK`5+aCZ|4BS_<eYU`QBIs9MXGFE5V4F;m5|OKT(`CtKwHXe_oy)5k58Q!}jg9
z9o(+kT@%vstT`?OEh{V&#sVEaQn%4j_a%5^j>m%6eAke^o16$5Cc<h}@0$d?)t3ud
zKG8!tJev^U#W#m@qQ@b>0t5leO1+qPGiX<gmLK<o@`<t^w;JXYYKrm$Zab`UY2iO}
zMIH`#Sp8!I3aA3m1E{eN6?6O6mz5Pa`95v60uJ&clu;@n2}oKXU>We}`;Foz9v4u5
zPrrr)`1IxHE9_v-gaX^>aNcd8-$E4RL{Q4bzrH_keQmh2E&OuT!Tf|P<f7qMyo)Rf
zOn4mj?ypDc+MdrMhM9p3zXlwGBz2_O`0JadI+#Fq28xdbLV1w@#kIQ)I(i@@DbK>A
zaqh3%D(azM*<!N-B7I;hHv7LV(oThkI@6wB8EQ1WI~LH-uO;_w8ERs|U4Pi&u5%1u
z3E9GV2z*OP5=L2y6gUymWs^K3W7Fl~;erx6(gSEs{~=kvfh!+z9z)G^1@;;Mh~tG2
zzl9K5_!kwmUZ@eE$Cw7JavQhFhwa4%Q4b=6NbVG9<VYfUv0YHbwFKWyBb?*%gX49D
zb(P?aRv-pKkH=Fsc@K^U6VjxYUp_$4Z!(c;3|mc)j$VOgU!Ti&-wUv|tdPB-Or<dZ
zV?24;`DZm#O%95yPP9%$-nOHlcyNfPOj9_Cq*Pf>Oq4SMEI}bHTggt-U*}K6WOfK*
z@%qXaG?awOAADe<AtPWc9TQdw*akuouS~tiDDg}iMH=7|50@f!8-Rur&Uqw9h=SwE
z0*>^!SMlC`@J98J><4h8AJjlExSf3J$z8m&t6tKQyI}a1#SGO^JDPXihFJJyogt}F
zkZIa?J!M9(Gn=A@PNs(GkA|Ts(0|N$0S8x(!(()GbhsWkfbV+nt@Hv#pt@RGO&=c0
zrUd}k2?yV<?cuZN48V#4e%e5z0Lp>UxxZ<tukMl2+t<*f9HrFkD__lj4s0*9@$7@B
z@_sm0h2q+<cHuD;^&BnPtqj9db3U(HB6Ils3uY8fwbQIeHqF9s5|I-EOTK;emD2pZ
zUe(PHd;LV!H=C;ynS4BWb~0Sd9UzT8#y-5;AwvC%`{}8f9{XFcSrLgm-kwiTP5?;{
hcPWC_G(PJ85VikfU+(|?^7{Yb*y4X(p7vjh{{y`{(enTR

literal 0
HcmV?d00001

diff --git a/docs/source/tutorials/yolox.md b/docs/source/tutorials/yolox.md
index 0276b44a..283a561a 100644
--- a/docs/source/tutorials/yolox.md
+++ b/docs/source/tutorials/yolox.md
@@ -10,9 +10,9 @@ In breif, our main contributions are:
 - Provide an easy way to use PAI-BLADE to accelerate the inference process.
 - Provide a convenient way to train/evaluate/export YOLOX-PAI model and conduct end2end object detection.
 
-To learn more details of YOLOX-PAI, you can refer to our technical paper [to be done].
+To learn more details of YOLOX-PAI, you can refer to our technical paper [??link][arxiv].
 
-img
+![image](../../../assets/result.jpg)
 
 ## Data preparation
 To download the dataset, please refer to [prepare_data.md](../prepare_data.md).
@@ -25,14 +25,16 @@ To use coco data to train detection, you can refer to [configs/detection/yolox/y
 ### PAI-Itag detection format
 To use pai-itag detection format data to train detection, you can refer to [configs/detection/yolox/yolox_s_8xb16_300e_coco_pai.py](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_s_8xb16_300e_coco_pai.py) for more configuration details.
 
-## Docker (Recommended)
-
-## Local & PAI-DSW
+## Quick Start
 
 To use COCO format data, use config file `configs/detection/yolox/yolox_s_8xb16_300e_coco.py`
 
 To use PAI-Itag format data, use config file `configs/detection/yolox/yolox_s_8xb16_300e_coco_pai.py`
 
+You can use the [quick_start.md](../quick_start.md) for local installation or use our provided doker images.
+```shell
+registry.cn-shanghai.aliyuncs.com/pai-ai-test/eas-service:blade_cu111_easycv
+```
 
 ### Train
 **Single gpu:**
@@ -120,6 +122,7 @@ python tools/export.py \
 		${EXPORT_PATH}
 ```
 
+For more details of the export process, you can refer to [export.md](export.md).
 <details>
 <summary>Arguments</summary>
 
@@ -138,14 +141,23 @@ python tools/export.py configs/detection/yolox/yolox_s_8xb16_300e_coco.py \
 ```
 
 ### Inference
+Download exported models([preprocess](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/model/export/epoch_300_pre_notrt.pt.preprocess), [model](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/model/export/epoch_300_pre_notrt.pt.blade), [meta](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/model/export/epoch_300_pre_notrt.pt.blade.config.json)) or export your own model. 
+Put them in the following format:
+```shell
+export_blade/
+??? epoch_300_pre_notrt.pt.blade
+??? epoch_300_pre_notrt.pt.blade.config.json
+??? epoch_300_pre_notrt.pt.preprocess
+```
 Download [test_image](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/data/small_coco_demo/val2017/000000017627.jpg)
 
+
 ```python
 import cv2
 from easycv.predictors import TorchYoloXPredictor
 
-output_ckpt = 'work_dirs/detection/yolox/epoch_300.pth'
-detector = TorchYoloXPredictor(output_ckpt)
+output_ckpt = 'export_blade/epoch_300_pre_notrt.pt.blade'
+detector = TorchYoloXPredictor(output_ckpt,use_trt_efficientnms=False)
 
 img = cv2.imread('000000017627.jpg')
 img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
@@ -153,13 +165,12 @@ output = detector.predict([img])
 print(output)
 
 # visualize image
-from matplotlib import pyplot as plt
 image = img.copy()
 for box, cls_name in zip(output[0]['detection_boxes'], output[0]['detection_class_names']):
     # box is [x1,y1,x2,y2]
     box = [int(b) for b in box]
     image = cv2.rectangle(image, tuple(box[:2]), tuple(box[2:4]), (0,255,0), 2)
     cv2.putText(image, cls_name, (box[0], box[1]-5), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0,0,255), 2)
-plt.imshow(image)
-plt.show()
+
+cv2.imwrite('result.jpg',image)
 ```

From 887e2be008d374b86fa2e935b590625410e85426 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Mon, 22 Aug 2022 15:39:07 +0800
Subject: [PATCH 56/69] update export.md

---
 docs/source/model_zoo_det.md    |  19 ++--
 docs/source/tutorials/export.md | 167 ++++++++------------------------
 2 files changed, 51 insertions(+), 135 deletions(-)

diff --git a/docs/source/model_zoo_det.md b/docs/source/model_zoo_det.md
index 3adf04be..931a4a7d 100644
--- a/docs/source/model_zoo_det.md
+++ b/docs/source/model_zoo_det.md
@@ -4,14 +4,17 @@
 
 Pretrained on COCO2017 dataset.
 
-| Algorithm  | Config                                                       | Params                                                 | inference time(V100)<br/>(ms/img)                      | mAP<sup>val<br/><sub>0.5:0.95</sub> | AP<sup>val<br/><sub>50</sub> | Download                                                     |
-| ---------- | ------------------------------------------------------------ | ------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ | ------------------------------------------------------------ |
-| YOLOX-s    | [yolox_s_8xb16_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_s_8xb16_300e_coco.py) | 9M | 10.7ms | 40.0                   | 58.9          | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_s_bs16_lr002/epoch_300.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_s_bs16_lr002/log.txt) |
-| YOLOX-m    | [yolox_m_8xb16_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_m_8xb16_300e_coco.py) | 25M | 12.3ms | 46.3                   | 64.9          | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_m_bs16_lr002/epoch_300.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_m_bs16_lr002/log.txt) |
-| YOLOX-l    | [yolox_l_8xb8_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_m_8xb8_300e_coco.py) | 54M | 15.5ms | 48.9                  | 67.5        | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_l_bs8_lr001/epoch_290.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_l_bs8_lr001/log.txt) |
-| YOLOX-x    | [yolox_x_8xb8_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_x_8xb8_300e_coco.py) | 99M | 19ms | 50.9                   | 69.2          | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_x_bs8_lr001/epoch_290.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_x_bs8_lr001/log.txt) |
-| YOLOX-tiny | [yolox_tiny_8xb16_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py) | 5M | 9.5ms | 31.5                   | 49.2          | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_tiny_bs16_lr002/epoch_300.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_tiny_bs16_lr002/log.txt) |
-| YOLOX-nano | [yolox_nano_8xb16_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py) | 2.2M | 9.4ms | 26.5                   | 42.6          | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_nano_bs16_lr002/epoch_300.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_nano_bs16_lr002/log.txt) |
+| Algorithm             | Config                                                                                                                                                              | Params | Speed<sup>V100<br/><sub>fp16 b32 </sub> | mAP<sup>val<br/><sub>0.5:0.95</sub> | AP<sup>val<br/><sub>50</sub> | Download                                                                                                                                                                                                                                                                         |
+|-----------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------|-----------------------------------------|-------------------------------------|------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| YOLOX-s               | [yolox_s_8xb16_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_s_8xb16_300e_coco.py)                                         | 9M     | 0.68ms                                  | 40.0                                | 58.9                         | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_s_bs16_lr002/epoch_300.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_s_bs16_lr002/log.txt)                |
+| PAI-YOLOXs            | [yoloxs_pai_8xb16_300e_coco](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/config/pai_yoloxs.py)                  | 16M    | 0.71ms                                  | 41.4                                | 60.0                         | [model](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/model/pai_yoloxs.pth) - [log](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/log/pai_yoloxs.json)                       |
+| PAI-YOLOXs-ASFF       | [yoloxs_pai_asff_8xb16_300e_coco](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/config/pai_yoloxs_asff.py)        | 21M    | 0.87ms                                  | 42.8                                | 61.8                         | [model](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/model/pai_yoloxs_asff.pth) - [log](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/log/pai_yoloxs_asff.json)             |
+| PAI-YOLOXs-ASFF-TOOD3 | [yoloxs_pai_asff_tood3_8xb16_300e_coco](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/config/pai_yoloxs_tood3.py) | 24M    | 1.15ms                                  | 43.9                                | 62.1                         | [model](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/model/pai_yoloxs_asff_tood3.pth) - [log](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/log/pai_yoloxs_asff_tood3.json) |
+| YOLOX-m               | [yolox_m_8xb16_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_m_8xb16_300e_coco.py)                                         | 25M    | 1.50ms                                  | 46.3                                | 64.9                         | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_m_bs16_lr002/epoch_300.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_m_bs16_lr002/log.txt)                |
+| YOLOX-l               | [yolox_l_8xb8_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_m_8xb8_300e_coco.py)                                           | 54M    | 15.5ms                                  | 48.9                                | 67.5                         | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_l_bs8_lr001/epoch_290.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_l_bs8_lr001/log.txt)                  |
+| YOLOX-x               | [yolox_x_8xb8_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_x_8xb8_300e_coco.py)                                           | 99M    | 19ms                                    | 50.9                                | 69.2                         | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_x_bs8_lr001/epoch_290.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_x_bs8_lr001/log.txt)                  |
+| YOLOX-tiny            | [yolox_tiny_8xb16_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py)                                   | 5M     | 9.5ms                                   | 31.5                                | 49.2                         | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_tiny_bs16_lr002/epoch_300.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_tiny_bs16_lr002/log.txt)          |
+| YOLOX-nano            | [yolox_nano_8xb16_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py)                                   | 2.2M   | 9.4ms                                   | 26.5                                | 42.6                         | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_nano_bs16_lr002/epoch_300.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_nano_bs16_lr002/log.txt)          |
 
 ## ViTDet
 
diff --git a/docs/source/tutorials/export.md b/docs/source/tutorials/export.md
index 7adef025..4edd300c 100644
--- a/docs/source/tutorials/export.md
+++ b/docs/source/tutorials/export.md
@@ -1,12 +1,12 @@
 # Export tutorial
 
-We support the following ways to export models.
+We support the following ways to export YOLOX-PAI models.
 
 **Original**
 
-Original model saves the state dict of model. One should build model in advance and then load the model state dict.
+Original model saves the state dict of model. One should export model in advance to infer an image.
 
-**torch.jit**
+**Torch.jit**
 
 Torch.jit is used to save the TorchScript model. It can be used independently from Python. It is convenient to be deployed in various environments and has little dependency on hardware. It can also reduce the inference time. For more details, you can refer to the official tutorial: https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html
 
@@ -16,10 +16,18 @@ Blade Model is used to greatly accelerate the inference process. It combines the
 
 **End2end**
 
-End2end model wraps the preprocess and postprocess process along with the model. Therefore, given an input image, the model can be directly used for inference.
+To simplify and accelerate the end2end inference, we support to wrap the preprocess/postprocess process, respectively.
 
+You can choose to export model with or without preprocess/postprocess by setting different configs.
 
+### Installation
+You should install a blade environment first to use blade optimization.
+See [link](https://help.aliyun.com/document_detail/205134.html.) for instruction.
 
+You are also recommended to use our provided docker image.
+```shell
+sudo docker pull registry.cn-shanghai.aliyuncs.com/pai-ai-test/eas-service:blade_cu111_easycv
+```
 ### Export model
 
 ```shell
@@ -47,131 +55,36 @@ python tools/export.py configs/detection/yolox/yolox_s_8xb16_300e_coco.py \
         work_dirs/detection/yolox/epoch_300_export.pth
 ```
 
-#### Original model
-
-Eport the orginal model by setting the export config as:
-
-```shell
-export = dict(use_jit=False, export_blade=False, end2end=False)
-```
-
-#### Script model
-
-Eport the script model by setting the export config as:
-
-```shell
-export = dict(use_jit=True, export_blade=False, end2end=False)
-```
-
-#### Blade model
-
-Eport the blade model by setting the export config as:
-
-```shell
-export = dict(use_jit=True, export_blade=True, end2end=False)
-```
-
-You can choose not to save the jit model by setting use_jit=False.
-
-The blade environment must be installed successfully to export a blade model.
-
-To install the blade, you can refer to https://help.aliyun.com/document_detail/205134.html.
-
-#### End2end model
-
-Eport the model in the end2end mode by setting ''end2end=True'' in the export config:
-
+**Export configs:**
 ```shell
-export = dict(use_jit=True, export_blade=True, end2end=True)
+export = dict(export_type='ori',              # exported model type ['ori','jit','blade'] 
+              preprocess_jit=True,            # whether to save a preprocess jit model
+              static_opt=True,                # whether to use static shape ti optimize model
+              batch_size=1,                   # batch_size if the static shape
+              blade_config=dict(
+                  enable_fp16=True,
+                  fp16_fallback_op_ratio=0.05 # fallback to fp32 ratio for blade optimize
+                                              # the difference between the fp16 and fp32 results of all layers will be computed
+                                              # The layers with larger difference are likely to fallback to fp16
+                                              # if the optimized result is not ture, you can choose a larger ratio.
+              ), 
+              use_trt_efficientnms=True)      # whether to wrap the trt_nms into model
 ```
 
-You should define your own preprocess and postprocess as below (please refer to: https://pytorch.org/docs/stable/jit.html?highlight=jit#module-torch.jit ) or the default test pipeline will be used.
-
-```python
-@torch.jit.script
-def preprocess_fn(image, traget_size=(640, 640)):
-		"""Process the data input to model."""
-    pass
-
-@torch.jit.script
-def postprocess_fn(output):
-		"""Process output values of the model."""
-    pass
-
-# define your own export wrapper
-End2endModelExportWrapper(
-    model,
-    preprocess_fn=preprocess_fn,
-    postprocess_fn=postprocess_fn)
-```
-
-
-
-### Inference with the Exported Model
-
-#### Non-End2end model
-
-```python
-image_path = 'data/demo.jpg'
-input_data_list =[np.asarray(Image.open(image_path))]
-
-# define the preprocess function
-test_pipeline = [
-    dict(type='MMResize', img_scale=img_scale, keep_ratio=True),
-    dict(type='MMPad', pad_to_square=True, pad_val=(114.0, 114.0, 114.0)),
-    dict(type='MMNormalize', **img_norm_cfg),
-    dict(type='DefaultFormatBundle'),
-    dict(type='Collect', keys=['img'])
-]
-
-def preprocess(img):
-  	pipeline = [build_from_cfg(p, PIPELINES) for p in test_pipeline]
-    transform = Compose(pipeline)
-    return transform(img)['img']
-
-
-with io.open(jit_model_path, 'rb') as infile:
-    device = 'cuda' if torch.cuda.is_available() else 'cpu'
-    model = torch.jit.load(infile, device)
-
-    for idx, img in enumerate(input_data_list):
-        if type(img) is not np.ndarray:
-            img = np.asarray(img)
-        img = preprocess(img)
-        output = model(img)
-        output = postprocess(output)
-        print(output)
-```
-
-#### End2end model
-
-
-```python
-image_path = 'data/demo.jpg'
-input_data_list =[np.asarray(Image.open(image_path))]
-
-with io.open(jit_model_path, 'rb') as infile:
-    device = 'cuda' if torch.cuda.is_available() else 'cpu'
-    model = torch.jit.load(infile, device)
-
-    for idx, img in enumerate(input_data_list):
-        if type(img) is not np.ndarray:
-            img = np.asarray(img)
-        img = torch.from_numpy(img).to(device)
-        output = model(img)
-        print(output)
-```
-
-
-
 ### Inference Time Comparisons
+Use YOLOX-s as an example, we test the en2end inference time of models exported with different configs.
+Note that blade optimization needs warmup, and we report average time among 1000 experiments on a single NVIDIA Tesla V100.  
+
+
+| export_type | preprocess_jit | use_trt_efficientnms | Infer time (end2end) /ms |
+| :---------: | :------------: | :------------------: | :----------------------: |
+|     ori     |       -        |          -           |          24.58           |
+|     jit     |     False      |        False         |          18.30           |
+|     jit     |     False      |         True         |          18.38           |
+|     jit     |      True      |        False         |          13.44           |
+|     jit     |      True      |         True         |          13.04           |
+|    blade    |     False      |        False         |           8.72           |
+|    blade    |     False      |         True         |           9.39           |
+|    blade    |      True      |        False         |           3.93           |
+|    blade    |      True      |         True         |           4.53           |
 
-Use the YOLOX-S model as an example, the inference process can be greatly accelerated by using the script and blade model.
-
-|  Model  |       Mode       |  FPS   |
-| :-----: | :--------------: | :----: |
-| YOLOX-S |     Original     | 54.02  |
-| YOLOX-S |      Script      | 89.33  |
-| YOLOX-S |      Blade       | 174.38 |
-| YOLOX-S | Script (End2End) | 86.62  |
-| YOLOX-S | Blade (End2End)  | 160.86 |

From b8653062c2d7a82b907d97eb21215fa231a4802b Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Mon, 22 Aug 2022 16:19:58 +0800
Subject: [PATCH 57/69] format

---
 README.md                                          | 2 +-
 configs/detection/yolox/yolox_s_8xb16_300e_coco.py | 2 +-
 docs/source/tutorials/export.md                    | 7 +++----
 docs/source/tutorials/yolox.md                     | 6 +++---
 4 files changed, 8 insertions(+), 9 deletions(-)

diff --git a/README.md b/README.md
index 4e62d212..f83c7b2c 100644
--- a/README.md
+++ b/README.md
@@ -22,7 +22,7 @@ English | [简体中文](README_zh-CN.md)
 
 EasyCV is an all-in-one computer vision toolbox based on PyTorch, mainly focus on self-supervised learning, transformer based models, and SOTA CV tasks including image classification, metric-learning, object detection, pose estimation and so on.
 
-[Latest News!!] We have released our YOLOX-PAI that reveives SOTA results within 40~50 mAP (less than 1ms). And we also provide a convenient and fast export/predictor api for end2end object detection. To get a quick start of YOLOX-PAI, click [here](docs/source/tutorials/yolox.md)!
+[🔥 Latest News] We have released our YOLOX-PAI that reveives SOTA results within 40~50 mAP (less than 1ms). And we also provide a convenient and fast export/predictor api for end2end object detection. To get a quick start of YOLOX-PAI, click [here](docs/source/tutorials/yolox.md)!
 
 ### Major features
 
diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index 93c20319..82cd4c6d 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -43,7 +43,7 @@
 
 # dataset settings
 data_root = 'data/coco/'
-data_root = '/apsarapangu/disk6/xinyi.zxy/coco/'
+
 img_norm_cfg = dict(
     mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
 
diff --git a/docs/source/tutorials/export.md b/docs/source/tutorials/export.md
index 4edd300c..79149b15 100644
--- a/docs/source/tutorials/export.md
+++ b/docs/source/tutorials/export.md
@@ -57,7 +57,7 @@ python tools/export.py configs/detection/yolox/yolox_s_8xb16_300e_coco.py \
 
 **Export configs:**
 ```shell
-export = dict(export_type='ori',              # exported model type ['ori','jit','blade'] 
+export = dict(export_type='ori',              # exported model type ['ori','jit','blade']
               preprocess_jit=True,            # whether to save a preprocess jit model
               static_opt=True,                # whether to use static shape ti optimize model
               batch_size=1,                   # batch_size if the static shape
@@ -67,13 +67,13 @@ export = dict(export_type='ori',              # exported model type ['ori','jit'
                                               # the difference between the fp16 and fp32 results of all layers will be computed
                                               # The layers with larger difference are likely to fallback to fp16
                                               # if the optimized result is not ture, you can choose a larger ratio.
-              ), 
+              ),
               use_trt_efficientnms=True)      # whether to wrap the trt_nms into model
 ```
 
 ### Inference Time Comparisons
 Use YOLOX-s as an example, we test the en2end inference time of models exported with different configs.
-Note that blade optimization needs warmup, and we report average time among 1000 experiments on a single NVIDIA Tesla V100.  
+Note that blade optimization needs warmup, and we report average time among 1000 experiments on a single NVIDIA Tesla V100.
 
 
 | export_type | preprocess_jit | use_trt_efficientnms | Infer time (end2end) /ms |
@@ -87,4 +87,3 @@ Note that blade optimization needs warmup, and we report average time among 1000
 |    blade    |     False      |         True         |           9.39           |
 |    blade    |      True      |        False         |           3.93           |
 |    blade    |      True      |         True         |           4.53           |
-
diff --git a/docs/source/tutorials/yolox.md b/docs/source/tutorials/yolox.md
index 283a561a..4d435a13 100644
--- a/docs/source/tutorials/yolox.md
+++ b/docs/source/tutorials/yolox.md
@@ -2,12 +2,12 @@
 
 ## Introduction
 Welcome to YOLOX-PAI! YOLOX-PAI is an incremental work of YOLOX based on PAI-EasyCV.
-We use various existing detection methods and PAI-BLADE to boost the performance.
+We use various existing detection methods and PAI-Blade to boost the performance.
 We also provide an efficient way for end2end object detction.
 
 In breif, our main contributions are:
 - Investigate various detection methods upon YOLOX to achieve SOTA object detection results.
-- Provide an easy way to use PAI-BLADE to accelerate the inference process.
+- Provide an easy way to use PAI-Blade to accelerate the inference process.
 - Provide a convenient way to train/evaluate/export YOLOX-PAI model and conduct end2end object detection.
 
 To learn more details of YOLOX-PAI, you can refer to our technical paper [??link][arxiv].
@@ -141,7 +141,7 @@ python tools/export.py configs/detection/yolox/yolox_s_8xb16_300e_coco.py \
 ```
 
 ### Inference
-Download exported models([preprocess](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/model/export/epoch_300_pre_notrt.pt.preprocess), [model](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/model/export/epoch_300_pre_notrt.pt.blade), [meta](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/model/export/epoch_300_pre_notrt.pt.blade.config.json)) or export your own model. 
+Download exported models([preprocess](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/model/export/epoch_300_pre_notrt.pt.preprocess), [model](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/model/export/epoch_300_pre_notrt.pt.blade), [meta](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/model/export/epoch_300_pre_notrt.pt.blade.config.json)) or export your own model.
 Put them in the following format:
 ```shell
 export_blade/

From b0fa9e65badc8086b63dfaf24ffaaa43c11610be Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Mon, 22 Aug 2022 17:11:15 +0800
Subject: [PATCH 58/69] complete result

---
 docs/source/model_zoo_det.md | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/docs/source/model_zoo_det.md b/docs/source/model_zoo_det.md
index 931a4a7d..345fa787 100644
--- a/docs/source/model_zoo_det.md
+++ b/docs/source/model_zoo_det.md
@@ -10,11 +10,11 @@ Pretrained on COCO2017 dataset.
 | PAI-YOLOXs            | [yoloxs_pai_8xb16_300e_coco](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/config/pai_yoloxs.py)                  | 16M    | 0.71ms                                  | 41.4                                | 60.0                         | [model](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/model/pai_yoloxs.pth) - [log](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/log/pai_yoloxs.json)                       |
 | PAI-YOLOXs-ASFF       | [yoloxs_pai_asff_8xb16_300e_coco](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/config/pai_yoloxs_asff.py)        | 21M    | 0.87ms                                  | 42.8                                | 61.8                         | [model](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/model/pai_yoloxs_asff.pth) - [log](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/log/pai_yoloxs_asff.json)             |
 | PAI-YOLOXs-ASFF-TOOD3 | [yoloxs_pai_asff_tood3_8xb16_300e_coco](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/config/pai_yoloxs_tood3.py) | 24M    | 1.15ms                                  | 43.9                                | 62.1                         | [model](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/model/pai_yoloxs_asff_tood3.pth) - [log](http://pai-vision-data-hz.oss-accelerate.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox-pai/log/pai_yoloxs_asff_tood3.json) |
-| YOLOX-m               | [yolox_m_8xb16_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_m_8xb16_300e_coco.py)                                         | 25M    | 1.50ms                                  | 46.3                                | 64.9                         | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_m_bs16_lr002/epoch_300.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_m_bs16_lr002/log.txt)                |
-| YOLOX-l               | [yolox_l_8xb8_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_m_8xb8_300e_coco.py)                                           | 54M    | 15.5ms                                  | 48.9                                | 67.5                         | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_l_bs8_lr001/epoch_290.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_l_bs8_lr001/log.txt)                  |
-| YOLOX-x               | [yolox_x_8xb8_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_x_8xb8_300e_coco.py)                                           | 99M    | 19ms                                    | 50.9                                | 69.2                         | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_x_bs8_lr001/epoch_290.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_x_bs8_lr001/log.txt)                  |
-| YOLOX-tiny            | [yolox_tiny_8xb16_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py)                                   | 5M     | 9.5ms                                   | 31.5                                | 49.2                         | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_tiny_bs16_lr002/epoch_300.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_tiny_bs16_lr002/log.txt)          |
-| YOLOX-nano            | [yolox_nano_8xb16_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py)                                   | 2.2M   | 9.4ms                                   | 26.5                                | 42.6                         | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_nano_bs16_lr002/epoch_300.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_nano_bs16_lr002/log.txt)          |
+| YOLOX-m               | [yolox_m_8xb16_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_m_8xb16_300e_coco.py)                                         | 25M    | 1.52ms                                  | 46.3                                | 64.9                         | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_m_bs16_lr002/epoch_300.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_m_bs16_lr002/log.txt)                |
+| YOLOX-l               | [yolox_l_8xb8_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_m_8xb8_300e_coco.py)                                           | 54M    | 2.47ms                                  | 48.9                                | 67.5                         | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_l_bs8_lr001/epoch_290.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_l_bs8_lr001/log.txt)                  |
+| YOLOX-x               | [yolox_x_8xb8_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_x_8xb8_300e_coco.py)                                           | 99M    | 4.74ms                                  | 50.9                                | 69.2                         | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_x_bs8_lr001/epoch_290.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_x_bs8_lr001/log.txt)                  |
+| YOLOX-tiny            | [yolox_tiny_8xb16_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py)                                   | 5M     | 0.28ms                                  | 31.5                                | 49.2                         | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_tiny_bs16_lr002/epoch_300.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_tiny_bs16_lr002/log.txt)          |
+| YOLOX-nano            | [yolox_nano_8xb16_300e_coco](https://github.com/alibaba/EasyCV/tree/master/configs/detection/yolox/yolox_tiny_8xb16_300e_coco.py)                                   | 2.2M   | 0.19ms                                  | 26.5                                | 42.6                         | [model](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_nano_bs16_lr002/epoch_300.pth) - [log](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/modelzoo/detection/yolox/yolox_nano_bs16_lr002/log.txt)          |
 
 ## ViTDet
 

From a7a914cf228ec926d1f948e2ad56694d5ad692b4 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Mon, 22 Aug 2022 17:44:19 +0800
Subject: [PATCH 59/69] hook

---
 easycv/utils/mmlab_utils.py | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/easycv/utils/mmlab_utils.py b/easycv/utils/mmlab_utils.py
index 3216350e..bff3040a 100644
--- a/easycv/utils/mmlab_utils.py
+++ b/easycv/utils/mmlab_utils.py
@@ -14,10 +14,7 @@
 
 try:
     from mmcv.runner.hooks import HOOKS
-    if 'easycv' not in HOOKS.module_dict['YOLOXLrUpdaterHook'].__module__:
-        # the latest mmcv has registed YOLOXLrUpdaterHook and will occur conflict with our YOLOXLrUpdaterHook
-        # however, we can not find the exact version of such change!
-        HOOKS._module_dict.pop('YOLOXLrUpdaterHook', None)
+    HOOKS._module_dict.pop('YOLOXLrUpdaterHook', None)
     from mmdet.models.builder import MODELS as MMMODELS
     from mmdet.models.builder import BACKBONES as MMBACKBONES
     from mmdet.models.builder import NECKS as MMNECKS

From 3ff49feb0e4da8a5739226faed81b577a6ddd965 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Tue, 23 Aug 2022 00:41:14 +0800
Subject: [PATCH 60/69] fix ut

---
 configs/edge_models/yolox_edge.py             |  4 +-
 .../detection/detectors/yolox/yolo_head.py    |  5 ++-
 .../detectors/yolox/yolo_head_template.py     | 10 ++++-
 .../models/detection/detectors/yolox/yolox.py |  6 ++-
 .../detectors/yolox_edge/yolox_edge.py        | 39 +++++++++++--------
 tests/models/detection/yolox/test_yolox.py    | 12 +++---
 .../detection/yolox_edge/test_yolox_edge.py   | 19 +++++----
 tests/tools/test_eval.py                      |  6 +--
 8 files changed, 60 insertions(+), 41 deletions(-)

diff --git a/configs/edge_models/yolox_edge.py b/configs/edge_models/yolox_edge.py
index 3ab0e8e1..97bc49ad 100644
--- a/configs/edge_models/yolox_edge.py
+++ b/configs/edge_models/yolox_edge.py
@@ -7,7 +7,6 @@
 model = dict(
     stage='EDGE',
     type='YOLOX_EDGE',
-    num_classes=1,
     model_type='customized',
     test_conf=0.01,
     nms_thre=0.65,
@@ -16,7 +15,8 @@
     max_model_params=-1,
     max_model_flops=-1,
     activation='relu',
-)
+    head=dict(
+        type='YOLOXHead', model_type='customized', num_classes=1, width=1.0))
 
 # train setting
 samples_per_gpu = 16  # batch size per gpu
diff --git a/easycv/models/detection/detectors/yolox/yolo_head.py b/easycv/models/detection/detectors/yolox/yolo_head.py
index d01bc191..388836e0 100644
--- a/easycv/models/detection/detectors/yolox/yolo_head.py
+++ b/easycv/models/detection/detectors/yolox/yolo_head.py
@@ -21,6 +21,7 @@ def __init__(
         obj_loss_type='BCE',
         reg_loss_type='giou',
         decode_in_inference=True,
+        width=None,
     ):
         """
         Args:
@@ -35,6 +36,7 @@ def __init__(
             reg_loss_type (str): the loss function of the box prediction. Default value: l1.
         """
         super(YOLOXHead, self).__init__(
+            width=width,
             num_classes=num_classes,
             model_type=model_type,
             strides=strides,
@@ -44,7 +46,8 @@ def __init__(
             stage=stage,
             obj_loss_type=obj_loss_type,
             reg_loss_type=reg_loss_type,
-            decode_in_inference=decode_in_inference)
+            decode_in_inference=decode_in_inference,
+        )
 
     def forward(self, xin, labels=None, imgs=None):
         outputs = []
diff --git a/easycv/models/detection/detectors/yolox/yolo_head_template.py b/easycv/models/detection/detectors/yolox/yolo_head_template.py
index d28c3df1..ac13b71c 100644
--- a/easycv/models/detection/detectors/yolox/yolo_head_template.py
+++ b/easycv/models/detection/detectors/yolox/yolo_head_template.py
@@ -34,7 +34,8 @@ def __init__(self,
                  stage='CLOUD',
                  obj_loss_type='BCE',
                  reg_loss_type='giou',
-                 decode_in_inference=True):
+                 decode_in_inference=True,
+                 width=None):
         """
         Args:
             num_classes (int): detection class numbers.
@@ -48,7 +49,12 @@ def __init__(self,
             reg_loss_type (str): the loss function of the box prediction. Default value: giou.
         """
         super().__init__()
-        width = self.param_map[model_type][1]
+        if width is None and model_type in self.param_map:
+            width = self.param_map[model_type][1]
+        else:
+            assert (width!=None),\
+            'Unknow model type must have a given width!'
+
         self.width = width
         self.n_anchors = 1
         self.num_classes = num_classes
diff --git a/easycv/models/detection/detectors/yolox/yolox.py b/easycv/models/detection/detectors/yolox/yolox.py
index bb9760ac..0d05db9d 100644
--- a/easycv/models/detection/detectors/yolox/yolox.py
+++ b/easycv/models/detection/detectors/yolox/yolox.py
@@ -67,10 +67,12 @@ def __init__(self,
             asff_channel=asff_channel,
             use_att=use_att)
 
-        self.head = build_head(head)
+        if head is not None:
+            # head is None for YOLOX-edge to define a special head
+            self.head = build_head(head)
+            self.num_classes = self.head.num_classes
 
         self.apply(init_yolo)  # init_yolo(self)
-        self.num_classes = self.head.num_classes
         self.test_conf = test_conf
         self.nms_thre = nms_thre
         self.use_trt_efficientnms = False  # TRT NMS only will be convert during export
diff --git a/easycv/models/detection/detectors/yolox_edge/yolox_edge.py b/easycv/models/detection/detectors/yolox_edge/yolox_edge.py
index 7fbf7a2b..3e285803 100644
--- a/easycv/models/detection/detectors/yolox_edge/yolox_edge.py
+++ b/easycv/models/detection/detectors/yolox_edge/yolox_edge.py
@@ -1,7 +1,7 @@
 # Copyright (c) Alibaba, Inc. and its affiliates.
 import torch.nn as nn
 
-from easycv.models.builder import MODELS
+from easycv.models.builder import MODELS, build_head
 from easycv.models.detection.detectors.yolox.yolo_head import YOLOXHead
 from easycv.models.detection.detectors.yolox.yolo_pafpn import YOLOPAFPN
 from easycv.models.detection.detectors.yolox.yolox import YOLOX
@@ -22,6 +22,15 @@ class YOLOX_EDGE(YOLOX):
     and detection results during test.
     """
 
+    param_map = {
+        'nano': [0.33, 0.25],
+        'tiny': [0.33, 0.375],
+        's': [0.33, 0.5],
+        'm': [0.67, 0.75],
+        'l': [1.0, 1.0],
+        'x': [1.33, 1.25]
+    }
+
     def __init__(self,
                  stage: str = 'EDGE',
                  model_type: str = 's',
@@ -38,23 +47,21 @@ def __init__(self,
                  in_channels: list = [256, 512, 1024],
                  backbone=None,
                  head=None):
+
         super(YOLOX_EDGE, self).__init__()
 
-        if backbone is None:
-            self.backbone = YOLOPAFPN(
-                depth,
-                width,
-                in_channels=in_channels,
-                depthwise=True,
-                act=activation)
-        if head is None:
-            self.head = YOLOXHead(
-                num_classes,
-                width,
-                in_channels=in_channels,
-                depthwise=True,
-                act=activation,
-                stage=stage)
+        if model_type in self.param_map.keys():
+            depth = self.param_map[model_type][0]
+            width = self.param_map[model_type][1]
+
+        self.backbone = YOLOPAFPN(
+            depth,
+            width,
+            in_channels=in_channels,
+            depthwise=True,
+            act=activation)
+
+        self.head = build_head(head)
 
         self.apply(init_yolo)  # init_yolo(self)
         self.head.initialize_biases(1e-2)
diff --git a/tests/models/detection/yolox/test_yolox.py b/tests/models/detection/yolox/test_yolox.py
index 1b09d638..98325c73 100644
--- a/tests/models/detection/yolox/test_yolox.py
+++ b/tests/models/detection/yolox/test_yolox.py
@@ -15,15 +15,13 @@ def setUp(self):
     def test_yolox(self):
         for model_type in ['s', 'm', 'l', 'x', 'tiny', 'nano']:
             model = YOLOX(
-                backbone=dict(
-                    type='YOLOPAFPN',
-                    backbone='CSPDarknet',
-                    model_type=model_type,  # s m l x tiny nano
-                    neck='yolo'),
+                test_conf=0.01,
+                nms_thre=0.65,
+                backbone='CSPDarknet',
+                model_type=model_type,
                 head=dict(
                     type='YOLOXHead', model_type=model_type, num_classes=2),
-                test_conf=0.01,
-                nms_thre=0.65)
+            )
             model = model.cuda()
             model.train()
 
diff --git a/tests/models/detection/yolox_edge/test_yolox_edge.py b/tests/models/detection/yolox_edge/test_yolox_edge.py
index 26a3d2d5..078d9caf 100644
--- a/tests/models/detection/yolox_edge/test_yolox_edge.py
+++ b/tests/models/detection/yolox_edge/test_yolox_edge.py
@@ -15,15 +15,18 @@ def setUp(self):
     def test_yolox_edge(self):
         for model_type in ['s', 'm', 'l', 'x', 'tiny', 'nano']:
             model = YOLOX_EDGE(
-                backbone=dict(
-                    type='YOLOPAFPN',
-                    backbone='CSPDarknet',
-                    model_type=model_type,  # s m l x tiny nano
-                    neck='yolo'),
-                head=dict(
-                    type='YOLOXHead', model_type=model_type, num_classes=2),
+                num_classes=2,
+                model_type=model_type,  # s m l x tiny nano
                 test_conf=0.01,
-                nms_thre=0.65)
+                nms_thre=0.65,
+                backbone='CSPDarknet',
+                head=dict(
+                    type='YOLOXHead',
+                    model_type=model_type,
+                    num_classes=2,
+                    stage='EDGE',
+                ),
+            )
             model = model.cuda()
             model.train()
 
diff --git a/tests/tools/test_eval.py b/tests/tools/test_eval.py
index 01720280..379016f7 100644
--- a/tests/tools/test_eval.py
+++ b/tests/tools/test_eval.py
@@ -70,13 +70,13 @@ def check_metric(self, work_dir):
             content = f.readlines()
             res = json.loads(content[0])
             self.assertAlmostEqual(
-                res['DetectionBoxes_Precision/mAP'], 0.423, delta=0.001)
+                res['DetectionBoxes_Precision/mAP'], 0.450, delta=0.001)
             self.assertAlmostEqual(
                 res['DetectionBoxes_Precision/mAP@.50IOU'],
-                0.5816,
+                0.6132,
                 delta=0.001)
             self.assertAlmostEqual(
-                res['DetectionBoxes_Precision/mAP@.75IOU'], 0.451, delta=0.001)
+                res['DetectionBoxes_Precision/mAP@.75IOU'], 0.490, delta=0.001)
 
     def _base_eval(self, eval_cfgs, dist=False, dist_eval=False):
         cfg_file = eval_cfgs.pop('config_file')

From 55c3c467f59692d69656a7e2c95101b4099cc6ce Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Tue, 23 Aug 2022 11:45:18 +0800
Subject: [PATCH 61/69] fix ut bug and cr problem

---
 configs/edge_models/yolox_edge.py             |  6 ++-
 easycv/apis/export.py                         | 27 ++---------
 .../models/backbones/repvgg_yolox_backbone.py | 11 +++--
 .../detectors/yolox/yolo_head_template.py     |  4 +-
 easycv/toolkit/blade/cv_blade_utils.py        | 46 +++++++++++--------
 easycv/utils/misc.py                          | 22 +++++++++
 export_log.txt                                | 18 --------
 tools/eval.py                                 | 22 +--------
 8 files changed, 69 insertions(+), 87 deletions(-)
 delete mode 100644 export_log.txt

diff --git a/configs/edge_models/yolox_edge.py b/configs/edge_models/yolox_edge.py
index 97bc49ad..d641953e 100644
--- a/configs/edge_models/yolox_edge.py
+++ b/configs/edge_models/yolox_edge.py
@@ -16,7 +16,11 @@
     max_model_flops=-1,
     activation='relu',
     head=dict(
-        type='YOLOXHead', model_type='customized', num_classes=1, width=1.0))
+        type='YOLOXHead',
+        model_type='customized',
+        num_classes=1,
+        reg_loss_type='iou',
+        width=1.0))
 
 # train setting
 samples_per_gpu = 16  # batch size per gpu
diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index 0f6dbd35..8b77e743 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -15,34 +15,17 @@
 from easycv.file import io
 from easycv.models import (DINO, MOCO, SWAV, YOLOX, Classification, MoBY,
                            build_model)
-from easycv.models.backbones.repvgg_yolox_backbone import RepVGGBlock
-from easycv.utils.bbox_util import scale_coords
 from easycv.utils.checkpoint import load_checkpoint
+from easycv.utils.misc import reparameterize_models
 
 __all__ = [
-    'export', 'PreProcess', 'ModelExportWrapper', 'ProcessExportWrapper',
-    'reparameterize_models'
+    'export',
+    'PreProcess',
+    'ModelExportWrapper',
+    'ProcessExportWrapper',
 ]
 
 
-def reparameterize_models(model):
-    """ reparameterize model for inference, especially forf
-            1. rep conv block : merge 3x3 weight 1x1 weights
-        call module switch_to_deploy recursively
-    Args:
-        model: nn.Module
-    """
-    reparameterize_count = 0
-    for layer in model.modules():
-        if isinstance(layer, RepVGGBlock):
-            reparameterize_count += 1
-            layer.switch_to_deploy()
-    logging.warning(
-        'export : PAI-export reparameterize_count(RepVGGBlock, ) switch to deploy with {} blocks'
-        .format(reparameterize_count))
-    return model
-
-
 def export(cfg, ckpt_path, filename):
     """ export model for inference
 
diff --git a/easycv/models/backbones/repvgg_yolox_backbone.py b/easycv/models/backbones/repvgg_yolox_backbone.py
index cf029563..e52378db 100644
--- a/easycv/models/backbones/repvgg_yolox_backbone.py
+++ b/easycv/models/backbones/repvgg_yolox_backbone.py
@@ -26,7 +26,12 @@ def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
 
 
 class RepVGGBlock(nn.Module):
-    """Basic Block of RepVGG"""
+    """
+        Basic Block of RepVGG
+        It's an efficient block that will be reparameterized in evaluation. (deploy = True)
+        Usage: RepVGGBlock(in_channels, out_channels, ksize=3, stride=stride)
+
+    """
 
     def __init__(self,
                  in_channels,
@@ -82,7 +87,6 @@ def __init__(self,
                 stride=stride,
                 padding=padding_11,
                 groups=groups)
-            # print('RepVGG Block, identity = ', self.rbr_identity)
 
     def forward(self, inputs):
         if hasattr(self, 'rbr_reparam'):
@@ -291,7 +295,8 @@ def forward(self, x):
 
 
 class RepVGGYOLOX(nn.Module):
-    ''' RepVGG with MT_SPPF to build a efficient Yolox backbone
+    '''
+        RepVGG with MT_SPPF to build a efficient Yolox backbone
     '''
 
     def __init__(
diff --git a/easycv/models/detection/detectors/yolox/yolo_head_template.py b/easycv/models/detection/detectors/yolox/yolo_head_template.py
index ac13b71c..63923abf 100644
--- a/easycv/models/detection/detectors/yolox/yolo_head_template.py
+++ b/easycv/models/detection/detectors/yolox/yolo_head_template.py
@@ -52,8 +52,8 @@ def __init__(self,
         if width is None and model_type in self.param_map:
             width = self.param_map[model_type][1]
         else:
-            assert (width!=None),\
-            'Unknow model type must have a given width!'
+            assert (width !=
+                    None), 'Unknow model type must have a given width!'
 
         self.width = width
         self.n_anchors = 1
diff --git a/easycv/toolkit/blade/cv_blade_utils.py b/easycv/toolkit/blade/cv_blade_utils.py
index e8df13a0..0bfcb8f4 100644
--- a/easycv/toolkit/blade/cv_blade_utils.py
+++ b/easycv/toolkit/blade/cv_blade_utils.py
@@ -246,6 +246,8 @@ def blade_optimize(speed_test_model,
                    batch=1,
                    warm_up_time=10,
                    compute_cost=True,
+                   use_profile=False,
+                   check_result=False,
                    static_opt=True):
 
     if not static_opt:
@@ -295,32 +297,36 @@ def blade_optimize(speed_test_model,
         summary = pd.DataFrame(results)
         logging.warning(summary.to_markdown())
 
-    torch.cuda.empty_cache()
-    # warm-up
-    for k in range(warm_up_time):
-        test_result = opt_model(*inputs)
-        torch.cuda.synchronize()
-
-    torch.cuda.synchronize()
-    cu_prof_start()
-    for k in range(warm_up_time):
-        test_result = opt_model(*inputs)
-        torch.cuda.synchronize()
-    cu_prof_stop()
-    import torch.autograd.profiler as profiler
-    with profiler.profile(use_cuda=True) as prof:
+    if use_profile:
+        torch.cuda.empty_cache()
+        # warm-up
         for k in range(warm_up_time):
             test_result = opt_model(*inputs)
             torch.cuda.synchronize()
 
-    with profiler.profile(use_cuda=True) as prof:
+        torch.cuda.synchronize()
+        cu_prof_start()
         for k in range(warm_up_time):
             test_result = opt_model(*inputs)
             torch.cuda.synchronize()
-
-    prof_str = prof.key_averages().table(sort_by='cuda_time_total')
-    print(f'{prof_str}')
-
-    # check_results(output, test_result)
+        cu_prof_stop()
+        import torch.autograd.profiler as profiler
+        with profiler.profile(use_cuda=True) as prof:
+            for k in range(warm_up_time):
+                test_result = opt_model(*inputs)
+                torch.cuda.synchronize()
+
+        with profiler.profile(use_cuda=True) as prof:
+            for k in range(warm_up_time):
+                test_result = opt_model(*inputs)
+                torch.cuda.synchronize()
+
+        prof_str = prof.key_averages().table(sort_by='cuda_time_total')
+        print(f'{prof_str}')
+
+    if check_result:
+        output = model(*inputs)
+        test_result = opt_model(*inputs)
+        check_results(output, test_result)
 
     return opt_model
diff --git a/easycv/utils/misc.py b/easycv/utils/misc.py
index abc8c143..8e544b96 100644
--- a/easycv/utils/misc.py
+++ b/easycv/utils/misc.py
@@ -1,10 +1,13 @@
 # Copyright (c) Alibaba, Inc. and its affiliates.
+import logging
 from functools import partial
 
 import mmcv
 import numpy as np
 from six.moves import map, zip
 
+from easycv.models.backbones.repvgg_yolox_backbone import RepVGGBlock
+
 
 def tensor2imgs(tensor, mean=(0, 0, 0), std=(1, 1, 1), to_rgb=True):
     num_imgs = tensor.size(0)
@@ -67,3 +70,22 @@ def add_prefix(inputs, prefix):
         outputs[f'{prefix}.{name}'] = value
 
     return outputs
+
+
+def reparameterize_models(model):
+    """ reparameterize model for inference, especially forf
+            1. rep conv block : merge 3x3 weight 1x1 weights
+        call module switch_to_deploy recursively
+    Args:
+        model: nn.Module
+    """
+    reparameterize_count = 0
+    for layer in model.modules():
+        if isinstance(layer, RepVGGBlock):
+            reparameterize_count += 1
+            layer.switch_to_deploy()
+    logging.info(
+        'export : PAI-export reparameterize_count(RepVGGBlock, ) switch to deploy with {} blocks'
+        .format(reparameterize_count))
+    print('reparam:', reparameterize_count)
+    return model
diff --git a/export_log.txt b/export_log.txt
deleted file mode 100644
index e56ee35f..00000000
--- a/export_log.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-  646  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815.py  models/epoch_300.pth  models/output.blade
-  648  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815.py  models/epoch_300.pth  models/as2_tood32.blade
-  655  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815.py  models/epoch_300.pth  models/output.blade
-  668  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815.py  models/epoch_300.pth  models/output_bs1.blade
-  677  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815.py  models/epoch_300.pth  models/output_bs1.blade
-  688  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815.py  models/epoch_300.pth  models/output_bs1.blade
-  698  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815.py  models/epoch_300.pth  models/output_bs1_e2e.blade
-  708  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e.py  models/epoch_300.pth  models/output_bs1_e2e.blade
-  713  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e_fp16005.py  models/epoch_300.pth  models/output_bs1_e2e_fp16005.blade
-  714  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e_005.py  models/epoch_300.pth  models/output_bs1_e2e_fp005.blade
-  716  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e_f005.py  models/epoch_300.pth  models/output_bs1_e2e_f005.blade
-  719  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e_f005.py  models/epoch_300.pth  models/output_bs1_e2e_f005.blade
-  738  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e_f005_trtnms.py  models/epoch_300.pth  models/output_bs1_e2e_f005_trtnms.blade
-  741  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e_f005_trtnms.py  models/epoch_300.pth  models/output_bs1_e2e_f005_trtnms.blade
-  767  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e_f005_trtnms.py  models/epoch_300.pth  models/output_bs1_e2e_f005_trtnms.blade
-  770  PYTHONPATH=./ CUDA_VISIBLE_DEVICES=4 python tools/export.py models/reptood3_assf_bs32_nopost_220815_e2e_f005_trtnms.py  models/epoch_300.pth  models/output_bs1_e2e_f005_trtnms.blade
-  774  history | grep export | grep CUDA_VISIBLE_DEVICES=4
-  775  history | grep export | grep CUDA_VISIBLE_DEVICES=4 > export_log.txt
diff --git a/tools/eval.py b/tools/eval.py
index 9e1cf19b..c835b4f2 100644
--- a/tools/eval.py
+++ b/tools/eval.py
@@ -32,27 +32,7 @@
 from easycv.utils.mmlab_utils import dynamic_adapt_for_mmlab
 
 from easycv.utils.setup_env import setup_multi_processes
-import logging
-from easycv.models.backbones.repvgg_yolox_backbone import RepVGGBlock
-
-
-def reparameterize_models(model):
-    """ reparameterize model for inference, especially forf
-            1. rep conv block : merge 3x3 weight 1x1 weights
-        call module switch_to_deploy recursively
-    Args:
-        model: nn.Module
-    """
-    reparameterize_count = 0
-    for layer in model.modules():
-        if isinstance(layer, RepVGGBlock):
-            reparameterize_count += 1
-            layer.switch_to_deploy()
-    logging.info(
-        'export : PAI-export reparameterize_count(RepVGGBlock, ) switch to deploy with {} blocks'
-        .format(reparameterize_count))
-    print('reparam:', reparameterize_count)
-    return model
+from easycv.utils.misc import reparameterize_models
 
 
 def parse_args():

From 3490d45a406e5e0b5ef1a8bf2fe2484034c0000a Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Tue, 23 Aug 2022 13:38:31 +0800
Subject: [PATCH 62/69] skip correct ut

---
 .github/workflows/citest.yaml                        | 4 +++-
 tests/models/detection/yolox_edge/test_yolox_edge.py | 2 +-
 2 files changed, 4 insertions(+), 2 deletions(-)

diff --git a/.github/workflows/citest.yaml b/.github/workflows/citest.yaml
index a785c2ef..8efc2203 100644
--- a/.github/workflows/citest.yaml
+++ b/.github/workflows/citest.yaml
@@ -64,7 +64,9 @@ jobs:
           export CUDA_VISIBLE_DEVICES=7
           source ~/workspace/anaconda2/etc/profile.d/conda.sh
           conda activate evtorch_torch1.8.0
-          PYTHONPATH=. python tests/run.py
+#          PYTHONPATH=. python tests/run.py
+          PYTHONPATH=. python tests/models/detection/yolox_edge/test_yolox_edge.py
+          PYTHONPATH=. python tests/tools/test_prune.py
 
 # blade test env will be updated!
 #  ut-torch181-blade:
diff --git a/tests/models/detection/yolox_edge/test_yolox_edge.py b/tests/models/detection/yolox_edge/test_yolox_edge.py
index 078d9caf..2eeed017 100644
--- a/tests/models/detection/yolox_edge/test_yolox_edge.py
+++ b/tests/models/detection/yolox_edge/test_yolox_edge.py
@@ -30,7 +30,7 @@ def test_yolox_edge(self):
             model = model.cuda()
             model.train()
 
-            batch_size = 2
+            batch_size = 1
             imgs = torch.randn(batch_size, 3, 640, 640).cuda()
             num_boxes = 5
             gt_bboxes = torch.randint(

From 991d2cef998bec05d5ec387a277d4840cbe46c0e Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Tue, 23 Aug 2022 13:50:38 +0800
Subject: [PATCH 63/69] ut

---
 .github/workflows/citest.yaml | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/.github/workflows/citest.yaml b/.github/workflows/citest.yaml
index 8efc2203..675d9777 100644
--- a/.github/workflows/citest.yaml
+++ b/.github/workflows/citest.yaml
@@ -64,9 +64,8 @@ jobs:
           export CUDA_VISIBLE_DEVICES=7
           source ~/workspace/anaconda2/etc/profile.d/conda.sh
           conda activate evtorch_torch1.8.0
-#          PYTHONPATH=. python tests/run.py
-          PYTHONPATH=. python tests/models/detection/yolox_edge/test_yolox_edge.py
-          PYTHONPATH=. python tests/tools/test_prune.py
+          PYTHONPATH=. python tests/run.py
+       
 
 # blade test env will be updated!
 #  ut-torch181-blade:

From 99e0d4c0f8e0112e2223722c558347ab53ae9bb9 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Tue, 23 Aug 2022 14:47:25 +0800
Subject: [PATCH 64/69] ut

---
 .scripts/ci_test.sh               | 3 ++-
 configs/edge_models/yolox_edge.py | 2 +-
 2 files changed, 3 insertions(+), 2 deletions(-)

diff --git a/.scripts/ci_test.sh b/.scripts/ci_test.sh
index 1d5c99a8..b79aa855 100644
--- a/.scripts/ci_test.sh
+++ b/.scripts/ci_test.sh
@@ -40,4 +40,5 @@ export TEST_DIR="/tmp/easycv_test_${USER}_`date +%s`"
 # do not uncomments, casue faild in Online UT, install requirements by yourself on UT machine
 # pip install -r requirements.txt
 #run test
-PYTHONPATH=. python tests/run.py
+#PYTHONPATH=. python tests/run.py
+PYTHONPATH=. python tests/tools/test_prune.py
\ No newline at end of file
diff --git a/configs/edge_models/yolox_edge.py b/configs/edge_models/yolox_edge.py
index d641953e..8cecbaf0 100644
--- a/configs/edge_models/yolox_edge.py
+++ b/configs/edge_models/yolox_edge.py
@@ -26,7 +26,7 @@
 samples_per_gpu = 16  # batch size per gpu
 test_samples_per_gpu = 16  # test batch size per gpu
 gpu_num = 2  # gpu number for one worker
-total_epochs = 11  # train epoch
+total_epochs = 6  # train epoch
 interval = 5
 
 # tiny nano without mixup

From 3eb7cd2c3bcac705ec35f280ffdc3473b28d4492 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Tue, 23 Aug 2022 16:21:52 +0800
Subject: [PATCH 65/69] ut

---
 .github/workflows/citest.yaml | 2 +-
 .scripts/ci_test.sh           | 3 +--
 2 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/.github/workflows/citest.yaml b/.github/workflows/citest.yaml
index 675d9777..3fe2d2bf 100644
--- a/.github/workflows/citest.yaml
+++ b/.github/workflows/citest.yaml
@@ -65,7 +65,7 @@ jobs:
           source ~/workspace/anaconda2/etc/profile.d/conda.sh
           conda activate evtorch_torch1.8.0
           PYTHONPATH=. python tests/run.py
-       
+
 
 # blade test env will be updated!
 #  ut-torch181-blade:
diff --git a/.scripts/ci_test.sh b/.scripts/ci_test.sh
index b79aa855..1d5c99a8 100644
--- a/.scripts/ci_test.sh
+++ b/.scripts/ci_test.sh
@@ -40,5 +40,4 @@ export TEST_DIR="/tmp/easycv_test_${USER}_`date +%s`"
 # do not uncomments, casue faild in Online UT, install requirements by yourself on UT machine
 # pip install -r requirements.txt
 #run test
-#PYTHONPATH=. python tests/run.py
-PYTHONPATH=. python tests/tools/test_prune.py
\ No newline at end of file
+PYTHONPATH=. python tests/run.py

From 1b063fca956f8bc6858b94b24174cf0a91e6a0ca Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Tue, 23 Aug 2022 19:47:12 +0800
Subject: [PATCH 66/69] ut

---
 configs/edge_models/yolox_edge.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/configs/edge_models/yolox_edge.py b/configs/edge_models/yolox_edge.py
index 8cecbaf0..d641953e 100644
--- a/configs/edge_models/yolox_edge.py
+++ b/configs/edge_models/yolox_edge.py
@@ -26,7 +26,7 @@
 samples_per_gpu = 16  # batch size per gpu
 test_samples_per_gpu = 16  # test batch size per gpu
 gpu_num = 2  # gpu number for one worker
-total_epochs = 6  # train epoch
+total_epochs = 11  # train epoch
 interval = 5
 
 # tiny nano without mixup

From 26b458f89782e456e0949a31016b59f5bb2ccfbd Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Tue, 23 Aug 2022 20:19:28 +0800
Subject: [PATCH 67/69] ut

---
 configs/edge_models/yolox_edge.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/configs/edge_models/yolox_edge.py b/configs/edge_models/yolox_edge.py
index d641953e..1ff8ef6d 100644
--- a/configs/edge_models/yolox_edge.py
+++ b/configs/edge_models/yolox_edge.py
@@ -27,7 +27,7 @@
 test_samples_per_gpu = 16  # test batch size per gpu
 gpu_num = 2  # gpu number for one worker
 total_epochs = 11  # train epoch
-interval = 5
+interval = 5  # eval interval
 
 # tiny nano without mixup
 img_scale = (256, 256)

From e08509e538b4b81424e3ba2d08139600222d8695 Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Wed, 24 Aug 2022 10:55:59 +0800
Subject: [PATCH 68/69] fix cr problem

---
 configs/detection/yolox/yolox_s_8xb16_300e_coco.py |  2 +-
 docs/source/tutorials/export.md                    |  4 ++--
 docs/source/tutorials/yolox.md                     |  8 ++++----
 easycv/apis/export.py                              | 10 +++++-----
 easycv/predictors/detector.py                      |  4 ++--
 5 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
index 82cd4c6d..c08fb896 100644
--- a/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
+++ b/configs/detection/yolox/yolox_s_8xb16_300e_coco.py
@@ -188,4 +188,4 @@
         # dict(type='WandbLoggerHookV2'),
     ])
 
-export = dict(export_type = 'ori', preprocess_jit = False, batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=False)
+export = dict(export_type = 'raw', preprocess_jit = False, batch_size=1, blade_config=dict(enable_fp16=True, fp16_fallback_op_ratio=0.01), use_trt_efficientnms=False)
diff --git a/docs/source/tutorials/export.md b/docs/source/tutorials/export.md
index 79149b15..cc364b5b 100644
--- a/docs/source/tutorials/export.md
+++ b/docs/source/tutorials/export.md
@@ -57,9 +57,9 @@ python tools/export.py configs/detection/yolox/yolox_s_8xb16_300e_coco.py \
 
 **Export configs:**
 ```shell
-export = dict(export_type='ori',              # exported model type ['ori','jit','blade']
+export = dict(export_type='raw',              # exported model type ['raw','jit','blade']
               preprocess_jit=True,            # whether to save a preprocess jit model
-              static_opt=True,                # whether to use static shape ti optimize model
+              static_opt=True,                # whether to use static shape to optimize model
               batch_size=1,                   # batch_size if the static shape
               blade_config=dict(
                   enable_fp16=True,
diff --git a/docs/source/tutorials/yolox.md b/docs/source/tutorials/yolox.md
index 4d435a13..64d4746e 100644
--- a/docs/source/tutorials/yolox.md
+++ b/docs/source/tutorials/yolox.md
@@ -10,7 +10,7 @@ In breif, our main contributions are:
 - Provide an easy way to use PAI-Blade to accelerate the inference process.
 - Provide a convenient way to train/evaluate/export YOLOX-PAI model and conduct end2end object detection.
 
-To learn more details of YOLOX-PAI, you can refer to our technical paper [??link][arxiv].
+To learn more details of YOLOX-PAI, you can refer to our technical paper [technical report][arxiv].
 
 ![image](../../../assets/result.jpg)
 
@@ -145,9 +145,9 @@ Download exported models([preprocess](http://pai-vision-data-hz.oss-accelerate.a
 Put them in the following format:
 ```shell
 export_blade/
-??? epoch_300_pre_notrt.pt.blade
-??? epoch_300_pre_notrt.pt.blade.config.json
-??? epoch_300_pre_notrt.pt.preprocess
+epoch_300_pre_notrt.pt.blade
+epoch_300_pre_notrt.pt.blade.config.json
+epoch_300_pre_notrt.pt.preprocess
 ```
 Download [test_image](http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/data/small_coco_demo/val2017/000000017627.jpg)
 
diff --git a/easycv/apis/export.py b/easycv/apis/export.py
index 8b77e743..fe8a1850 100644
--- a/easycv/apis/export.py
+++ b/easycv/apis/export.py
@@ -164,15 +164,15 @@ def _export_yolox(model, cfg, filename):
     """
 
     if hasattr(cfg, 'export'):
-        export_type = getattr(cfg.export, 'export_type', 'ori')
-        default_export_type_list = ['ori', 'jit', 'blade']
+        export_type = getattr(cfg.export, 'export_type', 'raw')
+        default_export_type_list = ['raw', 'jit', 'blade']
         if export_type not in default_export_type_list:
             logging.warning(
-                'YOLOX-PAI only supports the export type as  [ori,jit,blade], otherwise we use ori as default'
+                'YOLOX-PAI only supports the export type as  [raw,jit,blade], otherwise we use ori as default'
             )
-            export_type = 'ori'
+            export_type = 'raw'
 
-        if export_type != 'ori':
+        if export_type != 'raw':
             # only when we use jit or blade, we need to reparameterize_models before export
             model = reparameterize_models(model)
             device = 'cuda' if torch.cuda.is_available() else 'cpu'
diff --git a/easycv/predictors/detector.py b/easycv/predictors/detector.py
index 6647e329..4bdb24bb 100644
--- a/easycv/predictors/detector.py
+++ b/easycv/predictors/detector.py
@@ -60,7 +60,7 @@ def __init__(self,
         self.max_det = max_det
         self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
         # set type
-        self.model_type = 'ori'
+        self.model_type = 'raw'
         if model_path.endswith('jit'):
             self.model_type = 'jit'
         if model_path.endswith('blade'):
@@ -79,7 +79,7 @@ def __init__(self,
         self.score_thresh = model_config[
             'score_thresh'] if 'score_thresh' in model_config else score_thresh
 
-        if self.model_type != 'ori':
+        if self.model_type != 'raw':
             # jit or blade model
             preprocess_path = '.'.join(
                 model_path.split('.')[:-1] + ['preprocess'])

From 59a39161069dcdc002a1f3bab42e3000c53391cd Mon Sep 17 00:00:00 2001
From: zouxinyi0625 <zouxinyi.zxy@alibaba-inc.com>
Date: Wed, 24 Aug 2022 14:00:02 +0800
Subject: [PATCH 69/69] fix cr problem

---
 tools/eval.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/tools/eval.py b/tools/eval.py
index c835b4f2..60be08df 100644
--- a/tools/eval.py
+++ b/tools/eval.py
@@ -186,6 +186,7 @@ def main():
     print(f'use device {device}')
     checkpoint = load_checkpoint(model, args.checkpoint, map_location=device)
 
+    # reparameter to deploy for RepVGG block
     model = reparameterize_models(model)
 
     model.to(device)