Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

mmpose_RTDT-1550_Train_a_network_in_LS_based_on_merged_annotation #15

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
102 changes: 102 additions & 0 deletions .drone.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
---
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
####################### Jobs below are used to build only #########################################
####################### master branches for all containers #########################################
####################### #########################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################

# This pipeline would be used after each merge request to build mmpose latest docker images
kind: pipeline
type: docker
name: mmpose LS_mmpose_latest

platform:
arch: amd64
os: linux

trigger:
branch:
- main
event:
- push

clone:
depth: 1

steps:
- name: Build latest LS_mmpose docker image
image: plugins/docker:20.14
environment:
DOCKER_BUILDKIT: 1
settings:
dockerfile: docker/LabelStudio.Dockerfile
context: docker/
registry: quay.io
repo: quay.io/logivations/ml_all
privileged: true
build_args:
- BUILDKIT_INLINE_CACHE=1
cache_from: quay.io/logivations/ml_all:LS_mmpose_latest
tags:
- LS_mmpose_latest
- LS_mmpose_latest_${DRONE_COMMIT_SHA}
username:
from_secret: DOCKER_QUAY_USERNAME
password:
from_secret: DOCKER_QUAY_PASSWORD

#######################################################################################################################
#######################################################################################################################
#######################################################################################################################
####################### Jobs below are used to run #########################################
####################### pull request validation only #########################################
####################### #########################################
#######################################################################################################################
#######################################################################################################################
#######################################################################################################################

# 2. Build PR docker image for mmpose
---
kind: pipeline
type: docker
name: PR validation build mmpose images

platform:
arch: amd64
os: linux

trigger:
event:
include:
- pull_request

clone:
depth: 50

steps:
- name: Build LS_mmpose docker image for pull request
image: plugins/docker:20.14
environment:
DOCKER_BUILDKIT: 1
settings:
dockerfile: docker/LabelStudio.Dockerfile
context: docker/
registry: quay.io
repo: quay.io/logivations/ml_all
privileged: true
build_args:
- BUILDKIT_INLINE_CACHE=1
cache_from:
- quay.io/logivations/ml_all:LS_mmpose_latest
- quay.io/logivations/ml_all:LS_mmpose_pr${DRONE_PULL_REQUEST}
tags:
- LS_mmpose_pr${DRONE_PULL_REQUEST}
- LS_mmpose_pr${DRONE_PULL_REQUEST}_${DRONE_COMMIT_SHA}
username:
from_secret: DOCKER_QUAY_USERNAME
password:
from_secret: DOCKER_QUAY_PASSWORD
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ repos:
hooks:
- id: isort
- repo: https://github.com/pre-commit/mirrors-yapf
rev: v0.30.0
rev: v0.40.1
hooks:
- id: yapf
- repo: https://github.com/pre-commit/pre-commit-hooks
Expand Down
170 changes: 170 additions & 0 deletions auto_training/config_factories/mmpose_config_factory.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,170 @@
from mmcv import Config


def make_mmpose_cfg(
work_dir: str,
labels: list = []
):
channel_cfg = dict(
num_output_channels=len(labels),
dataset_joints=len(labels),
dataset_channel=list(range(len(labels))),
inference_channel=list(range(len(labels)))
)
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=180, scale_factor=0.5, rot_prob=0.9),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=3),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
data_cfg = dict(
image_size=[192, 256],
heatmap_size=[48, 64],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=True,
det_bbox_thr=0.0,
bbox_file=f'/dataset/train/coco_train.json',
)

cfg = dict(
work_dir=work_dir,
total_epochs=100,
default_scope='mmpose',
optimizer_config=dict(grad_clip=None),
log_level='INFO',
load_from=None,
resume_from=None,
dist_params=dict(backend='nccl'),
optimizer=dict(
type='Adam',
lr=1e-4,
),
workflow=[('train', 1)],
checkpoint_config=dict(interval=100),
evaluation=dict(interval=10, metric='mAP', key_indicator='AP'),
lr_config=dict(
policy='step',
warmup='linear',
warmup_iters=5,
warmup_ratio=0.001,
step=[360, 380]
),
log_config=dict(
interval=25,
hooks=[
dict(type='TextLoggerHook'),
dict(type='TensorboardLoggerHook')
]
),
default_hooks=dict(
# record the time of every iteration.
timer=dict(type='EpochTimerHook'),

# print log every 10 iterations.
logger=dict(type='LoggerHook', interval=10),

# enable the parameter scheduler.
param_scheduler=dict(type='ParamSchedulerHook'),

# save checkpoint per epoch.
checkpoint=dict(type='CheckpointHook', interval=100),

# set sampler seed in distributed evrionment.
sampler_seed=dict(type='DistSamplerSeedHook'),

# validation results visualization, set True to enable it.
visualization=dict(type='VisualizationHook', enable=True),
),
val_pipeline=val_pipeline,
test_pipeline=val_pipeline,
data=dict(
samples_per_gpu=8,
workers_per_gpu=2,
train=dict(
type='LiftedForkDatasetAnyKP',
ann_file='/dataset/annotations/coco_train.json',
img_prefix='/dataset/train/',
data_cfg=data_cfg,
pipeline=train_pipeline,
num_joints=len(labels)
),
val=dict(
type='LiftedForkDatasetAnyKP',
ann_file='/dataset/annotations/coco_val.json',
img_prefix='/dataset/val/',
data_cfg=data_cfg,
pipeline=val_pipeline,
num_joints=len(labels)
),
test=dict(
type='LiftedForkDatasetAnyKP',
ann_file='/dataset/annotations/coco_test.json',
img_prefix='/dataset/test/',
data_cfg=data_cfg,
pipeline=val_pipeline,
num_joints=len(labels)
),
),
val_evaluator=[
dict(prefix='val', topk=(1,), type='Accuracy'),
dict(prefix='val', type='AveragePrecision'),
dict(prefix='val', type='SingleLabelMetric'),
],
model=dict(
type='TopDown',
pretrained='torchvision://resnet18',
backbone=dict(type='ResNet', depth=18),
keypoint_head=dict(
type='TopDownSimpleHead',
in_channels=512,
out_channels=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(by_epoch=True),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11
)
)

)
cfg = Config(cfg)
return cfg
93 changes: 93 additions & 0 deletions auto_training/inference.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
from pathlib import Path
from argparse import ArgumentParser
import os
import json
import time
from mmpose.apis.custom_inferencers import MMPoseInferencer

class FPSLogger:
def __init__(self, num_of_images):
self.tottime = 0.0
self.count = 0
self.last_record = 0.0
self.last_print = time.time()
self.interval = 3
self.num_of_images = num_of_images

def start_record(self):
self.last_record = time.time()

def end_record(self):
self.tottime += time.time() - self.last_record
self.count += 1
self.print_fps()

def print_fps(self):
if time.time() - self.last_print > self.interval:
print(f"{time.strftime('%Y-%m-%d %H:%M:%S')} - mmpret - INFO - Predict({self.count}/{self.num_of_images}) "
f"- Inference running at {self.count / self.tottime:.3f} FPS")
self.last_print = time.time()

def main(args):
fps_logger = FPSLogger(len(os.listdir(args.images_dir)))
inference = MMPoseInferencer(
config=args.config,
pretrained=args.checkpoint,
bboxes_filepath=args.bboxes_filepath,
)
print(f"Inference classes: {args.classes}")
# if args.silent:
# inference.show_progress = False

images: Path = args.images_dir
output_dir: Path = args.output_dir
output_dir.mkdir(parents=True, exist_ok=True)

start_time = time.time()
image_paths = list(images.glob("**/*.jpg"))
for i, p in enumerate(image_paths, 1):
try:
fps_logger.start_record()
result = inference(str(p))
fps_logger.end_record()

file_name = str(p).split("/")[-1].replace(".jpg", ".json")
pred_path = os.path.join(output_dir, file_name)
prediction = {
"result": [
{
"type": "choices",
"value": {"choices": [result[0]['pred_class']]},
"origin": "manual",
"to_name": "image",
"from_name": "choice",
}
],
}

with open(pred_path, "w") as f:
json.dump(prediction, f)
except Exception as e:
print(f"Failed with {p}. {e}")
print(f"Inference time: {round(time.time() - start_time, 2)} s.")


if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("checkpoint")
parser.add_argument("config")
parser.add_argument("images_dir", type=Path)
parser.add_argument("output_dir", type=Path)
parser.add_argument("bboxes_filepath", type=Path)
parser.add_argument(
"--silent",
action="store_true",
help="suppress progress bars and verbose output")
parser.add_argument(
'--classes',
nargs='+',
required=True,
help='list of classes for the training'
)
config = parser.parse_args()
main(config)
Loading