From 3a785f12233019c960e9dea2c96acae5d4f5a865 Mon Sep 17 00:00:00 2001 From: "q.yao" Date: Thu, 25 Nov 2021 09:57:05 +0800 Subject: [PATCH] [Refactor] Refactor codebase (#220) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * [Fix]: Fix bugs in details in refactor branch (#192) * [WIP] Refactor v2.0 (#163) * Refactor backend wrapper * Refactor mmdet.inference * Fix * merge * refactor utils * Use deployer and deploy_model to manage pipeline * Resolve comments * Add a real inference api function * rename wrappers * Set execute to private method * Rename deployer deploy_model * Refactor task * remove type hint * lint * Resolve comments * resolve comments * lint * docstring * Fix errors * lint * resolve comments * fix bugs * conflict * lint and typo * Resolve comment * refactor mmseg (#201) * support mmseg * fix docstring * fix docstring * [Refactor]: Get the count of backend files (#202) * Fix backend files * resolve comments * lint * Fix ncnn * [Refactor]: Refactor folders of mmdet (#200) * Move folders * lint * test object detection model * lint * reset changes * fix openvino * resolve comments * __init__.py * Fix path * [Refactor]: move mmseg (#206) * [Refactor]: Refactor mmedit (#205) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * Fix wrong func_name of ConvFCBBoxHead (#209) * [Refactor]: Refactor mmdet unit test (#207) * Move folders * lint * test object detection model * lint * WIP * remove print * finish unit test * Fix tests * resolve comments * Add mask test * lint * resolve comments * Refine cfg file * Move files * add files * Fix path * [Unittest]: Refine the unit tests in mmdet #214 * [Refactor] refactor mmocr to mmdeploy/codebase (#213) * refactor mmocr to mmdeploy/codebase * fix docstring of show_result * fix docstring of visualize * refine docstring * replace print with logging * refince codes * resolve comments * resolve comments * [Refactor]: mmseg tests (#210) * refactor mmseg tests * rename test_codebase * update * add model.py * fix * [Refactor] Refactor mmcls and the package (#217) * refactor mmcls * fix yapf * fix isort * refactor-mmcls-package * fix print to logging * fix docstrings according to others comments * fix comments * fix comments * fix allentdans comment in pr215 * remove mmocr init * [Refactor] Refactor mmedit tests (#212) * feature mmedit * edit2.0 * edit * refactor mmedit * fix __init__.py * fix __init__ * fix formai * fix comment * fix comment * buff * edit test and code refactor * refactor dir * refactor tests/mmedit * fix docstring * add test coverage * fix lint * fix comment * fix comment * Update typehint (#216) * update type hint * update docstring * update * remove file * fix ppl * Refine get_predefined_partition_cfg * fix tensorrt version > 8 * move parse_cuda_device_id to device.py * Fix cascade * onnx2ncnn docstring Co-authored-by: Yifan Zhou Co-authored-by: RunningLeon Co-authored-by: VVsssssk <88368822+VVsssssk@users.noreply.github.com> Co-authored-by: AllentDan <41138331+AllentDan@users.noreply.github.com> Co-authored-by: hanrui1sensetime <83800577+hanrui1sensetime@users.noreply.github.com> --- backend_ops/ncnn/pyncnn_ext/CMakeLists.txt | 2 +- mmdeploy/__init__.py | 25 - mmdeploy/apis/__init__.py | 12 +- mmdeploy/apis/calibration.py | 29 +- mmdeploy/apis/extract_model.py | 2 +- mmdeploy/apis/inference.py | 65 +- mmdeploy/apis/ncnn/__init__.py | 30 +- mmdeploy/apis/onnxruntime/__init__.py | 23 +- mmdeploy/apis/openvino/__init__.py | 22 +- mmdeploy/apis/openvino/utils.py | 22 + mmdeploy/apis/ppl/__init__.py | 18 +- mmdeploy/apis/pytorch2onnx.py | 16 +- mmdeploy/apis/tensorrt/__init__.py | 32 +- mmdeploy/apis/test.py | 167 --- mmdeploy/apis/utils.py | 367 +------ mmdeploy/apis/visualize.py | 67 ++ mmdeploy/backend/__init__.py | 22 + mmdeploy/backend/base/__init__.py | 8 + .../backend/base/backend_wrapper_registry.py | 27 + mmdeploy/backend/base/base_wrapper.py | 70 ++ mmdeploy/backend/ncnn/__init__.py | 26 + .../{apis => backend}/ncnn/init_plugins.py | 4 +- mmdeploy/backend/ncnn/onnx2ncnn.py | 40 + .../ncnn_utils.py => backend/ncnn/wrapper.py} | 58 +- mmdeploy/backend/onnxruntime/__init__.py | 22 + .../onnxruntime/init_plugins.py | 2 +- .../onnxruntime/wrapper.py} | 68 +- mmdeploy/backend/openvino/__init__.py | 16 + .../openvino/onnx2openvino.py | 0 .../openvino/wrapper.py} | 53 +- mmdeploy/backend/ppl/__init__.py | 16 + mmdeploy/{apis => backend}/ppl/onnx2ppl.py | 20 +- .../ppl_utils.py => backend/ppl/wrapper.py} | 64 +- mmdeploy/backend/tensorrt/__init__.py | 30 + .../{apis => backend}/tensorrt/calib_utils.py | 6 +- .../tensorrt/init_plugins.py | 4 +- .../tensorrt/onnx2tensorrt.py | 20 +- .../tensorrt/utils.py} | 126 +-- mmdeploy/backend/tensorrt/wrapper.py | 123 +++ mmdeploy/codebase/__init__.py | 31 + mmdeploy/codebase/base/__init__.py | 8 + mmdeploy/codebase/base/backend_model.py | 78 ++ mmdeploy/codebase/base/mmcodebase.py | 122 +++ mmdeploy/codebase/base/task.py | 238 +++++ mmdeploy/{ => codebase}/mmcls/__init__.py | 2 +- mmdeploy/codebase/mmcls/deploy/__init__.py | 4 + .../codebase/mmcls/deploy/classification.py | 234 +++++ .../mmcls/deploy/classification_model.py | 160 +++ .../codebase/mmcls/deploy/mmclassification.py | 140 +++ .../{ => codebase}/mmcls/models/__init__.py | 0 .../mmcls/models/backbones/__init__.py | 0 .../mmcls/models/backbones/shufflenet_v2.py | 0 .../mmcls/models/classifiers/__init__.py | 0 .../mmcls/models/classifiers/base.py | 0 .../mmcls/models/heads/__init__.py | 0 .../mmcls/models/heads/cls_head.py | 0 .../mmcls/models/heads/multi_label_head.py | 0 mmdeploy/codebase/mmdet/__init__.py | 9 + .../{ => codebase}/mmdet/core/__init__.py | 0 mmdeploy/codebase/mmdet/core/bbox/__init__.py | 3 + .../mmdet/core/bbox}/delta_xywh_bbox_coder.py | 24 +- .../mmdet/core/bbox}/tblr_bbox_coder.py | 18 +- .../mmdet/core/bbox/transforms.py | 6 +- .../mmdet/core/post_processing/__init__.py | 0 .../mmdet/core/post_processing/bbox_nms.py | 5 +- mmdeploy/codebase/mmdet/deploy/__init__.py | 8 + mmdeploy/codebase/mmdet/deploy/mmdetection.py | 142 +++ .../mmdet/deploy/model_partition_cfg.py} | 14 - .../codebase/mmdet/deploy/object_detection.py | 235 +++++ .../mmdet/deploy/object_detection_model.py | 579 +++++++++++ mmdeploy/codebase/mmdet/deploy/utils.py | 99 ++ .../{ => codebase}/mmdet/models/__init__.py | 0 .../mmdet/models/dense_heads/__init__.py | 17 + .../mmdet/models/dense_heads/anchor_head.py | 15 +- .../mmdet/models/dense_heads/atss_head.py | 10 +- .../mmdet/models/dense_heads/fcos_head.py | 19 +- .../mmdet/models/dense_heads/fovea_head.py | 10 +- .../mmdet/models/dense_heads/rpn_head.py | 20 +- .../mmdet/models/dense_heads/vfnet_head.py | 10 +- .../mmdet/models/dense_heads/yolo_head.py | 19 +- .../mmdet/models/dense_heads/yolox_head.py | 7 +- .../mmdet/models/detectors/__init__.py | 11 + .../mmdet/models/detectors/base.py | 7 +- .../mmdet/models/detectors/rpn.py | 3 +- .../mmdet/models/detectors/single_stage.py | 4 +- .../mmdet/models/detectors/two_stage.py | 18 +- .../mmdet/models/roi_heads/__init__.py | 22 + .../mmdet/models/roi_heads}/bbox_head.py | 14 +- .../models/roi_heads/cascade_roi_head.py | 4 +- .../mmdet/models/roi_heads}/fcn_mask_head.py | 10 +- .../roi_heads}/single_level_roi_extractor.py | 8 +- .../models/roi_heads/standard_roi_head.py | 4 +- .../mmdet/models/roi_heads/test_mixins.py | 10 +- mmdeploy/codebase/mmedit/__init__.py | 4 + mmdeploy/codebase/mmedit/deploy/__init__.py | 4 + mmdeploy/codebase/mmedit/deploy/mmediting.py | 137 +++ .../mmedit/deploy/super_resolution.py | 280 +++++ .../mmedit/deploy/super_resolution_model.py | 181 ++++ .../{ => codebase}/mmedit/models/__init__.py | 0 .../mmedit/models/backbones}/__init__.py | 0 .../mmedit/models/backbones}/srcnn.py | 0 .../{mmedit => codebase/mmocr}/__init__.py | 2 +- mmdeploy/codebase/mmocr/deploy/__init__.py | 5 + mmdeploy/codebase/mmocr/deploy/mmocr.py | 141 +++ .../codebase/mmocr/deploy/text_detection.py | 264 +++++ .../mmocr/deploy/text_detection_model.py | 165 +++ .../codebase/mmocr/deploy/text_recognition.py | 264 +++++ .../mmocr/deploy/text_recognition_model.py | 172 +++ mmdeploy/codebase/mmocr/models/__init__.py | 2 + .../mmocr/models/text_detection/__init__.py | 6 + .../mmocr/models/text_detection}/fpn_cat.py | 0 .../single_stage_text_detector.py | 0 .../mmocr/models/text_recognition/__init__.py | 13 + .../mmocr/models/text_recognition}/base.py | 0 .../models/text_recognition}/crnn_decoder.py | 0 .../encode_decode_recognizer.py | 0 .../models/text_recognition}/lstm_layer.py | 0 .../mmocr/models/text_recognition}/sar.py | 2 +- .../models/text_recognition}/sar_decoder.py | 0 .../models/text_recognition}/sar_encoder.py | 0 .../mmocr/models/utils.py} | 0 .../{mmocr => codebase/mmseg}/__init__.py | 2 +- mmdeploy/codebase/mmseg/deploy/__init__.py | 5 + .../codebase/mmseg/deploy/mmsegmentation.py | 141 +++ .../codebase/mmseg/deploy/segmentation.py | 216 ++++ .../mmseg/deploy/segmentation_model.py | 190 ++++ .../mmseg/deploy/utils.py} | 0 .../{ => codebase}/mmseg/models/__init__.py | 0 .../mmseg/models/decode_heads/__init__.py | 0 .../mmseg/models/decode_heads/aspp_head.py | 0 .../mmseg/models/decode_heads/psp_head.py | 0 .../mmseg/models/segmentors/__init__.py | 0 .../mmseg/models/segmentors/base.py | 0 .../models/segmentors/encoder_decoder.py | 0 mmdeploy/core/optimizers/extractor.py | 6 +- mmdeploy/core/optimizers/function_marker.py | 19 +- mmdeploy/core/optimizers/optimize.py | 13 +- mmdeploy/core/rewriters/function_rewriter.py | 2 +- mmdeploy/core/rewriters/module_rewriter.py | 2 +- mmdeploy/core/rewriters/rewriter_manager.py | 8 +- mmdeploy/core/rewriters/rewriter_utils.py | 2 +- mmdeploy/core/rewriters/symbolic_rewriter.py | 6 +- mmdeploy/mmcls/apis/__init__.py | 4 - mmdeploy/mmcls/apis/inference.py | 239 ----- mmdeploy/mmcls/apis/visualize.py | 32 - mmdeploy/mmcls/export/__init__.py | 7 - mmdeploy/mmcls/export/prepare_input.py | 132 --- mmdeploy/mmdet/__init__.py | 3 - mmdeploy/mmdet/apis/__init__.py | 4 - mmdeploy/mmdet/apis/inference.py | 977 ------------------ mmdeploy/mmdet/apis/visualize.py | 34 - mmdeploy/mmdet/core/bbox/__init__.py | 2 - mmdeploy/mmdet/core/bbox/coder/__init__.py | 2 - mmdeploy/mmdet/export/__init__.py | 10 - mmdeploy/mmdet/export/onnx_utils.py | 47 - mmdeploy/mmdet/export/prepare_input.py | 162 --- mmdeploy/mmdet/export/tensorrt_helper.py | 35 - mmdeploy/mmdet/models/dense_heads/__init__.py | 15 - mmdeploy/mmdet/models/detectors/__init__.py | 9 - mmdeploy/mmdet/models/roi_heads/__init__.py | 6 - .../models/roi_heads/bbox_heads/__init__.py | 3 - .../models/roi_heads/mask_heads/__init__.py | 3 - .../roi_heads/roi_extractors/__init__.py | 8 - mmdeploy/mmedit/apis/__init__.py | 4 - mmdeploy/mmedit/apis/inference.py | 314 ------ mmdeploy/mmedit/apis/visualize.py | 46 - mmdeploy/mmedit/export/__init__.py | 7 - mmdeploy/mmedit/export/prepare_input.py | 201 ---- mmdeploy/mmedit/models/backbones/__init__.py | 1 - mmdeploy/mmocr/apis/__init__.py | 4 - mmdeploy/mmocr/apis/inference.py | 538 ---------- mmdeploy/mmocr/apis/visualize.py | 35 - mmdeploy/mmocr/export/__init__.py | 7 - mmdeploy/mmocr/export/prepare_input.py | 194 ---- mmdeploy/mmocr/models/__init__.py | 2 - mmdeploy/mmocr/models/textdet/__init__.py | 2 - mmdeploy/mmocr/models/textrecog/__init__.py | 12 - .../models/textrecog/decoders/__init__.py | 4 - .../models/textrecog/encoders/__init__.py | 3 - .../mmocr/models/textrecog/layers/__init__.py | 3 - mmdeploy/mmseg/__init__.py | 2 - mmdeploy/mmseg/apis/__init__.py | 4 - mmdeploy/mmseg/apis/inference.py | 289 ------ mmdeploy/mmseg/apis/visualize.py | 36 - mmdeploy/mmseg/export/__init__.py | 8 - mmdeploy/mmseg/export/prepare_input.py | 141 --- mmdeploy/utils/__init__.py | 11 +- mmdeploy/utils/config_utils.py | 52 +- mmdeploy/utils/device.py | 37 + mmdeploy/utils/test.py | 61 +- mmdeploy/utils/timer.py | 9 +- mmdeploy/version.py | 3 +- .../test_mmdet/data/coco_sample.json | 0 .../test_mmdet/data/mask_model.json | 0 tests/test_codebase/test_mmdet/data/model.py | 108 ++ .../test_mmdet/data/single_stage_model.json | 0 .../test_mmdet/test_mmdet_core.py | 26 +- .../test_mmdet/test_mmdet_models.py | 92 +- .../test_mmdet/test_mmdet_utils.py | 49 + .../test_mmdet/test_object_detection.py | 155 +++ .../test_mmdet/test_object_detection_model.py | 446 ++++++++ .../test_mmedit/data/imgs/blank.jpg | Bin .../test_mmedit/data/model.py | 4 +- .../test_mmedit/test_mmedit_models.py | 0 .../test_mmedit/test_super_resolution.py | 117 +++ .../test_super_resolution_model.py | 53 + .../test_mmseg/data/model.py | 62 +- .../test_mmseg/test_mmseg_models.py | 30 +- .../test_mmseg/test_segmentation.py | 115 +++ .../test_mmseg/test_segmentation_model.py | 137 +++ tests/test_codebase/test_mmseg/test_utils.py | 25 + tests/test_mmdet/data/imgs/000000000139.jpg | Bin 161811 -> 0 bytes tests/test_mmdet/test_mmdet_apis.py | 574 ---------- tests/test_mmdet/test_mmdet_export.py | 104 -- tests/test_mmedit/test_mmedit_export.py | 97 -- tests/test_mmseg/data/imgs/blank.jpg | Bin 691 -> 0 bytes tests/test_mmseg/test_mmseg_export.py | 114 -- tests/test_pytorch/test_pytorch_ops.py | 2 - tools/deploy.py | 41 +- tools/test.py | 41 +- 220 files changed, 6698 insertions(+), 5837 deletions(-) create mode 100644 mmdeploy/apis/openvino/utils.py delete mode 100644 mmdeploy/apis/test.py create mode 100644 mmdeploy/apis/visualize.py create mode 100644 mmdeploy/backend/__init__.py create mode 100644 mmdeploy/backend/base/__init__.py create mode 100644 mmdeploy/backend/base/backend_wrapper_registry.py create mode 100644 mmdeploy/backend/base/base_wrapper.py create mode 100644 mmdeploy/backend/ncnn/__init__.py rename mmdeploy/{apis => backend}/ncnn/init_plugins.py (92%) create mode 100644 mmdeploy/backend/ncnn/onnx2ncnn.py rename mmdeploy/{apis/ncnn/ncnn_utils.py => backend/ncnn/wrapper.py} (67%) create mode 100644 mmdeploy/backend/onnxruntime/__init__.py rename mmdeploy/{apis => backend}/onnxruntime/init_plugins.py (93%) rename mmdeploy/{apis/onnxruntime/onnxruntime_utils.py => backend/onnxruntime/wrapper.py} (59%) create mode 100644 mmdeploy/backend/openvino/__init__.py rename mmdeploy/{apis => backend}/openvino/onnx2openvino.py (100%) rename mmdeploy/{apis/openvino/openvino_utils.py => backend/openvino/wrapper.py} (75%) create mode 100644 mmdeploy/backend/ppl/__init__.py rename mmdeploy/{apis => backend}/ppl/onnx2ppl.py (79%) rename mmdeploy/{apis/ppl/ppl_utils.py => backend/ppl/wrapper.py} (76%) create mode 100644 mmdeploy/backend/tensorrt/__init__.py rename mmdeploy/{apis => backend}/tensorrt/calib_utils.py (96%) rename mmdeploy/{apis => backend}/tensorrt/init_plugins.py (93%) rename mmdeploy/{apis => backend}/tensorrt/onnx2tensorrt.py (81%) rename mmdeploy/{apis/tensorrt/tensorrt_utils.py => backend/tensorrt/utils.py} (57%) create mode 100644 mmdeploy/backend/tensorrt/wrapper.py create mode 100644 mmdeploy/codebase/__init__.py create mode 100644 mmdeploy/codebase/base/__init__.py create mode 100644 mmdeploy/codebase/base/backend_model.py create mode 100644 mmdeploy/codebase/base/mmcodebase.py create mode 100644 mmdeploy/codebase/base/task.py rename mmdeploy/{ => codebase}/mmcls/__init__.py (50%) create mode 100644 mmdeploy/codebase/mmcls/deploy/__init__.py create mode 100644 mmdeploy/codebase/mmcls/deploy/classification.py create mode 100644 mmdeploy/codebase/mmcls/deploy/classification_model.py create mode 100644 mmdeploy/codebase/mmcls/deploy/mmclassification.py rename mmdeploy/{ => codebase}/mmcls/models/__init__.py (100%) rename mmdeploy/{ => codebase}/mmcls/models/backbones/__init__.py (100%) rename mmdeploy/{ => codebase}/mmcls/models/backbones/shufflenet_v2.py (100%) rename mmdeploy/{ => codebase}/mmcls/models/classifiers/__init__.py (100%) rename mmdeploy/{ => codebase}/mmcls/models/classifiers/base.py (100%) rename mmdeploy/{ => codebase}/mmcls/models/heads/__init__.py (100%) rename mmdeploy/{ => codebase}/mmcls/models/heads/cls_head.py (100%) rename mmdeploy/{ => codebase}/mmcls/models/heads/multi_label_head.py (100%) create mode 100644 mmdeploy/codebase/mmdet/__init__.py rename mmdeploy/{ => codebase}/mmdet/core/__init__.py (100%) create mode 100644 mmdeploy/codebase/mmdet/core/bbox/__init__.py rename mmdeploy/{mmdet/core/bbox/coder => codebase/mmdet/core/bbox}/delta_xywh_bbox_coder.py (94%) rename mmdeploy/{mmdet/core/bbox/coder => codebase/mmdet/core/bbox}/tblr_bbox_coder.py (93%) rename mmdeploy/{ => codebase}/mmdet/core/bbox/transforms.py (87%) rename mmdeploy/{ => codebase}/mmdet/core/post_processing/__init__.py (100%) rename mmdeploy/{ => codebase}/mmdet/core/post_processing/bbox_nms.py (97%) create mode 100644 mmdeploy/codebase/mmdet/deploy/__init__.py create mode 100644 mmdeploy/codebase/mmdet/deploy/mmdetection.py rename mmdeploy/{mmdet/export/model_partition.py => codebase/mmdet/deploy/model_partition_cfg.py} (80%) create mode 100644 mmdeploy/codebase/mmdet/deploy/object_detection.py create mode 100644 mmdeploy/codebase/mmdet/deploy/object_detection_model.py create mode 100644 mmdeploy/codebase/mmdet/deploy/utils.py rename mmdeploy/{ => codebase}/mmdet/models/__init__.py (100%) create mode 100644 mmdeploy/codebase/mmdet/models/dense_heads/__init__.py rename mmdeploy/{ => codebase}/mmdet/models/dense_heads/anchor_head.py (95%) rename mmdeploy/{ => codebase}/mmdet/models/dense_heads/atss_head.py (93%) rename mmdeploy/{ => codebase}/mmdet/models/dense_heads/fcos_head.py (94%) rename mmdeploy/{ => codebase}/mmdet/models/dense_heads/fovea_head.py (92%) rename mmdeploy/{ => codebase}/mmdet/models/dense_heads/rpn_head.py (94%) rename mmdeploy/{ => codebase}/mmdet/models/dense_heads/vfnet_head.py (93%) rename mmdeploy/{ => codebase}/mmdet/models/dense_heads/yolo_head.py (96%) rename mmdeploy/{ => codebase}/mmdet/models/dense_heads/yolox_head.py (95%) create mode 100644 mmdeploy/codebase/mmdet/models/detectors/__init__.py rename mmdeploy/{ => codebase}/mmdet/models/detectors/base.py (91%) rename mmdeploy/{ => codebase}/mmdet/models/detectors/rpn.py (91%) rename mmdeploy/{ => codebase}/mmdet/models/detectors/single_stage.py (86%) rename mmdeploy/{ => codebase}/mmdet/models/detectors/two_stage.py (80%) create mode 100644 mmdeploy/codebase/mmdet/models/roi_heads/__init__.py rename mmdeploy/{mmdet/models/roi_heads/bbox_heads => codebase/mmdet/models/roi_heads}/bbox_head.py (91%) rename mmdeploy/{ => codebase}/mmdet/models/roi_heads/cascade_roi_head.py (96%) rename mmdeploy/{mmdet/models/roi_heads/mask_heads => codebase/mmdet/models/roi_heads}/fcn_mask_head.py (92%) rename mmdeploy/{mmdet/models/roi_heads/roi_extractors => codebase/mmdet/models/roi_heads}/single_level_roi_extractor.py (96%) rename mmdeploy/{ => codebase}/mmdet/models/roi_heads/standard_roi_head.py (93%) rename mmdeploy/{ => codebase}/mmdet/models/roi_heads/test_mixins.py (93%) create mode 100644 mmdeploy/codebase/mmedit/__init__.py create mode 100644 mmdeploy/codebase/mmedit/deploy/__init__.py create mode 100644 mmdeploy/codebase/mmedit/deploy/mmediting.py create mode 100644 mmdeploy/codebase/mmedit/deploy/super_resolution.py create mode 100644 mmdeploy/codebase/mmedit/deploy/super_resolution_model.py rename mmdeploy/{ => codebase}/mmedit/models/__init__.py (100%) rename mmdeploy/{mmedit/models/backbones/sr_backbones => codebase/mmedit/models/backbones}/__init__.py (100%) rename mmdeploy/{mmedit/models/backbones/sr_backbones => codebase/mmedit/models/backbones}/srcnn.py (100%) rename mmdeploy/{mmedit => codebase/mmocr}/__init__.py (50%) create mode 100644 mmdeploy/codebase/mmocr/deploy/__init__.py create mode 100644 mmdeploy/codebase/mmocr/deploy/mmocr.py create mode 100644 mmdeploy/codebase/mmocr/deploy/text_detection.py create mode 100644 mmdeploy/codebase/mmocr/deploy/text_detection_model.py create mode 100644 mmdeploy/codebase/mmocr/deploy/text_recognition.py create mode 100644 mmdeploy/codebase/mmocr/deploy/text_recognition_model.py create mode 100644 mmdeploy/codebase/mmocr/models/__init__.py create mode 100644 mmdeploy/codebase/mmocr/models/text_detection/__init__.py rename mmdeploy/{mmocr/models/textdet/necks => codebase/mmocr/models/text_detection}/fpn_cat.py (100%) rename mmdeploy/{mmocr/models/textdet/detectors => codebase/mmocr/models/text_detection}/single_stage_text_detector.py (100%) create mode 100644 mmdeploy/codebase/mmocr/models/text_recognition/__init__.py rename mmdeploy/{mmocr/models/textrecog/recognizer => codebase/mmocr/models/text_recognition}/base.py (100%) rename mmdeploy/{mmocr/models/textrecog/decoders => codebase/mmocr/models/text_recognition}/crnn_decoder.py (100%) rename mmdeploy/{mmocr/models/textrecog/recognizer => codebase/mmocr/models/text_recognition}/encode_decode_recognizer.py (100%) rename mmdeploy/{mmocr/models/textrecog/layers => codebase/mmocr/models/text_recognition}/lstm_layer.py (100%) rename mmdeploy/{mmocr/models/textrecog/recognizer => codebase/mmocr/models/text_recognition}/sar.py (97%) rename mmdeploy/{mmocr/models/textrecog/decoders => codebase/mmocr/models/text_recognition}/sar_decoder.py (100%) rename mmdeploy/{mmocr/models/textrecog/encoders => codebase/mmocr/models/text_recognition}/sar_encoder.py (100%) rename mmdeploy/{mmocr/utils/cfg_utils.py => codebase/mmocr/models/utils.py} (100%) rename mmdeploy/{mmocr => codebase/mmseg}/__init__.py (50%) create mode 100644 mmdeploy/codebase/mmseg/deploy/__init__.py create mode 100644 mmdeploy/codebase/mmseg/deploy/mmsegmentation.py create mode 100644 mmdeploy/codebase/mmseg/deploy/segmentation.py create mode 100644 mmdeploy/codebase/mmseg/deploy/segmentation_model.py rename mmdeploy/{mmseg/export/onnx_utils.py => codebase/mmseg/deploy/utils.py} (100%) rename mmdeploy/{ => codebase}/mmseg/models/__init__.py (100%) rename mmdeploy/{ => codebase}/mmseg/models/decode_heads/__init__.py (100%) rename mmdeploy/{ => codebase}/mmseg/models/decode_heads/aspp_head.py (100%) rename mmdeploy/{ => codebase}/mmseg/models/decode_heads/psp_head.py (100%) rename mmdeploy/{ => codebase}/mmseg/models/segmentors/__init__.py (100%) rename mmdeploy/{ => codebase}/mmseg/models/segmentors/base.py (100%) rename mmdeploy/{ => codebase}/mmseg/models/segmentors/encoder_decoder.py (100%) delete mode 100644 mmdeploy/mmcls/apis/__init__.py delete mode 100644 mmdeploy/mmcls/apis/inference.py delete mode 100644 mmdeploy/mmcls/apis/visualize.py delete mode 100644 mmdeploy/mmcls/export/__init__.py delete mode 100644 mmdeploy/mmcls/export/prepare_input.py delete mode 100644 mmdeploy/mmdet/__init__.py delete mode 100644 mmdeploy/mmdet/apis/__init__.py delete mode 100644 mmdeploy/mmdet/apis/inference.py delete mode 100644 mmdeploy/mmdet/apis/visualize.py delete mode 100644 mmdeploy/mmdet/core/bbox/__init__.py delete mode 100644 mmdeploy/mmdet/core/bbox/coder/__init__.py delete mode 100644 mmdeploy/mmdet/export/__init__.py delete mode 100644 mmdeploy/mmdet/export/onnx_utils.py delete mode 100644 mmdeploy/mmdet/export/prepare_input.py delete mode 100644 mmdeploy/mmdet/export/tensorrt_helper.py delete mode 100644 mmdeploy/mmdet/models/dense_heads/__init__.py delete mode 100644 mmdeploy/mmdet/models/detectors/__init__.py delete mode 100644 mmdeploy/mmdet/models/roi_heads/__init__.py delete mode 100644 mmdeploy/mmdet/models/roi_heads/bbox_heads/__init__.py delete mode 100644 mmdeploy/mmdet/models/roi_heads/mask_heads/__init__.py delete mode 100644 mmdeploy/mmdet/models/roi_heads/roi_extractors/__init__.py delete mode 100644 mmdeploy/mmedit/apis/__init__.py delete mode 100644 mmdeploy/mmedit/apis/inference.py delete mode 100644 mmdeploy/mmedit/apis/visualize.py delete mode 100644 mmdeploy/mmedit/export/__init__.py delete mode 100644 mmdeploy/mmedit/export/prepare_input.py delete mode 100644 mmdeploy/mmedit/models/backbones/__init__.py delete mode 100644 mmdeploy/mmocr/apis/__init__.py delete mode 100644 mmdeploy/mmocr/apis/inference.py delete mode 100644 mmdeploy/mmocr/apis/visualize.py delete mode 100644 mmdeploy/mmocr/export/__init__.py delete mode 100644 mmdeploy/mmocr/export/prepare_input.py delete mode 100644 mmdeploy/mmocr/models/__init__.py delete mode 100644 mmdeploy/mmocr/models/textdet/__init__.py delete mode 100644 mmdeploy/mmocr/models/textrecog/__init__.py delete mode 100644 mmdeploy/mmocr/models/textrecog/decoders/__init__.py delete mode 100644 mmdeploy/mmocr/models/textrecog/encoders/__init__.py delete mode 100644 mmdeploy/mmocr/models/textrecog/layers/__init__.py delete mode 100644 mmdeploy/mmseg/__init__.py delete mode 100644 mmdeploy/mmseg/apis/__init__.py delete mode 100644 mmdeploy/mmseg/apis/inference.py delete mode 100644 mmdeploy/mmseg/apis/visualize.py delete mode 100644 mmdeploy/mmseg/export/__init__.py delete mode 100644 mmdeploy/mmseg/export/prepare_input.py create mode 100644 mmdeploy/utils/device.py rename tests/{ => test_codebase}/test_mmdet/data/coco_sample.json (100%) rename tests/{ => test_codebase}/test_mmdet/data/mask_model.json (100%) create mode 100644 tests/test_codebase/test_mmdet/data/model.py rename tests/{ => test_codebase}/test_mmdet/data/single_stage_model.json (100%) rename tests/{ => test_codebase}/test_mmdet/test_mmdet_core.py (89%) rename tests/{ => test_codebase}/test_mmdet/test_mmdet_models.py (93%) create mode 100644 tests/test_codebase/test_mmdet/test_mmdet_utils.py create mode 100644 tests/test_codebase/test_mmdet/test_object_detection.py create mode 100644 tests/test_codebase/test_mmdet/test_object_detection_model.py rename tests/{ => test_codebase}/test_mmedit/data/imgs/blank.jpg (100%) rename tests/{ => test_codebase}/test_mmedit/data/model.py (96%) rename tests/{ => test_codebase}/test_mmedit/test_mmedit_models.py (100%) create mode 100644 tests/test_codebase/test_mmedit/test_super_resolution.py create mode 100644 tests/test_codebase/test_mmedit/test_super_resolution_model.py rename tests/{ => test_codebase}/test_mmseg/data/model.py (50%) rename tests/{ => test_codebase}/test_mmseg/test_mmseg_models.py (88%) create mode 100644 tests/test_codebase/test_mmseg/test_segmentation.py create mode 100644 tests/test_codebase/test_mmseg/test_segmentation_model.py create mode 100644 tests/test_codebase/test_mmseg/test_utils.py delete mode 100755 tests/test_mmdet/data/imgs/000000000139.jpg delete mode 100644 tests/test_mmdet/test_mmdet_apis.py delete mode 100644 tests/test_mmdet/test_mmdet_export.py delete mode 100644 tests/test_mmedit/test_mmedit_export.py delete mode 100644 tests/test_mmseg/data/imgs/blank.jpg delete mode 100644 tests/test_mmseg/test_mmseg_export.py diff --git a/backend_ops/ncnn/pyncnn_ext/CMakeLists.txt b/backend_ops/ncnn/pyncnn_ext/CMakeLists.txt index 87108ec247..a3e3733dfa 100755 --- a/backend_ops/ncnn/pyncnn_ext/CMakeLists.txt +++ b/backend_ops/ncnn/pyncnn_ext/CMakeLists.txt @@ -11,4 +11,4 @@ pybind11_add_module(ncnn_ext ncnn_ext.cpp) target_link_libraries(ncnn_ext PUBLIC ncnn ${SHARED_TARGET}) set_target_properties( ncnn_ext PROPERTIES LIBRARY_OUTPUT_DIRECTORY - ${CMAKE_SOURCE_DIR}/mmdeploy/apis/ncnn) + ${CMAKE_SOURCE_DIR}/mmdeploy/backend/ncnn) diff --git a/mmdeploy/__init__.py b/mmdeploy/__init__.py index a73f89927d..e96395d7cb 100644 --- a/mmdeploy/__init__.py +++ b/mmdeploy/__init__.py @@ -7,28 +7,3 @@ importlib.import_module('mmdeploy.mmcv') else: logging.debug('mmcv is not installed.') - -if importlib.util.find_spec('mmcls'): - importlib.import_module('mmdeploy.mmcls') -else: - logging.debug('mmcls is not installed.') - -if importlib.util.find_spec('mmdet'): - importlib.import_module('mmdeploy.mmdet') -else: - logging.debug('mmdet is not installed.') - -if importlib.util.find_spec('mmseg'): - importlib.import_module('mmdeploy.mmseg') -else: - logging.debug('mmseg is not installed.') - -if importlib.util.find_spec('mmocr'): - importlib.import_module('mmdeploy.mmocr') -else: - logging.debug('mmocr is not installed.') - -if importlib.util.find_spec('mmedit'): - importlib.import_module('mmdeploy.mmedit') -else: - logging.debug('mmedit is not installed.') diff --git a/mmdeploy/apis/__init__.py b/mmdeploy/apis/__init__.py index 36065f5285..f923a11fab 100644 --- a/mmdeploy/apis/__init__.py +++ b/mmdeploy/apis/__init__.py @@ -2,13 +2,11 @@ from .extract_model import extract_model from .inference import inference_model from .pytorch2onnx import torch2onnx, torch2onnx_impl -from .test import post_process_outputs, single_gpu_test -from .utils import (build_dataloader, build_dataset, get_tensor_from_input, - init_backend_model) +from .utils import build_task_processor, get_predefined_partition_cfg +from .visualize import visualize_model __all__ = [ - 'create_calib_table', 'torch2onnx_impl', 'torch2onnx', 'extract_model', - 'inference_model', 'init_backend_model', 'single_gpu_test', - 'post_process_outputs', 'build_dataset', 'get_tensor_from_input', - 'build_dataloader' + 'create_calib_table', 'extract_model', 'inference_model', 'torch2onnx', + 'torch2onnx_impl', 'build_task_processor', 'get_predefined_partition_cfg', + 'visualize_model' ] diff --git a/mmdeploy/apis/calibration.py b/mmdeploy/apis/calibration.py index 035520eb47..a435008ba3 100644 --- a/mmdeploy/apis/calibration.py +++ b/mmdeploy/apis/calibration.py @@ -7,9 +7,7 @@ from mmdeploy.core import (RewriterContext, patch_model, reset_mark_function_count) -from mmdeploy.utils import cfg_apply_marks, get_codebase, load_config -from .utils import (build_dataloader, build_dataset, get_tensor_from_input, - init_pytorch_model, run_inference) +from mmdeploy.utils import cfg_apply_marks, load_config def create_calib_table(calib_file: str, @@ -46,32 +44,33 @@ def create_calib_table(calib_file: str, # load dataset_cfg if necessary dataset_cfg = load_config(dataset_cfg)[0] - codebase = get_codebase(deploy_cfg) + from mmdeploy.apis.utils import build_task_processor + task_processor = build_task_processor(model_cfg, deploy_cfg, device) + apply_marks = cfg_apply_marks(deploy_cfg) backend = 'default' - model = init_pytorch_model( - codebase, model_cfg, model_checkpoint, device=device) - dataset = build_dataset(codebase, dataset_cfg, dataset_type) + model = task_processor.init_pytorch_model(model_checkpoint) + dataset = task_processor.build_dataset(dataset_cfg, dataset_type) # patch model patched_model = patch_model(model, cfg=deploy_cfg, backend=backend) - with h5py.File(calib_file, mode='w') as calib_file: - calib_data_group = calib_file.create_group('calib_data') + with h5py.File(calib_file, mode='w') as file: + calib_data_group = file.create_group('calib_data') if not apply_marks: # create end2end group input_data_group = calib_data_group.create_group('end2end') input_group = input_data_group.create_group('input') - dataloader = build_dataloader( - codebase, dataset, 1, 1, dist=False, shuffle=False) + dataloader = task_processor.build_dataloader( + dataset, 1, 1, dist=False, shuffle=False) patched_model = MMDataParallel(patched_model, device_ids=[device_id]) prog_bar = mmcv.ProgressBar(len(dataset)) for data_id, input_data in enumerate(dataloader): if not apply_marks: # save end2end data - input_tensor = get_tensor_from_input(codebase, input_data) + input_tensor = task_processor.get_tensor_from_input(input_data) input_ndarray = input_tensor.detach().cpu().numpy() input_group.create_dataset( str(data_id), @@ -84,10 +83,10 @@ def create_calib_table(calib_file: str, cfg=deploy_cfg, backend=backend, create_calib=True, - calib_file=calib_file, + calib_file=file, data_id=data_id): reset_mark_function_count() - _ = run_inference(codebase, input_data, patched_model) - calib_file.flush() + _ = task_processor.run_inference(patched_model, input_data) + file.flush() prog_bar.update() diff --git a/mmdeploy/apis/extract_model.py b/mmdeploy/apis/extract_model.py index a7896e665a..e705198498 100644 --- a/mmdeploy/apis/extract_model.py +++ b/mmdeploy/apis/extract_model.py @@ -16,7 +16,7 @@ def extract_model(model: Union[str, onnx.ModelProto], start_name_map: Optional[Dict[str, str]] = None, end_name_map: Optional[Dict[str, str]] = None, dynamic_axes: Optional[Dict[str, Dict[int, str]]] = None, - save_file: Optional[str] = None): + save_file: Optional[str] = None) -> onnx.ModelProto: """Extract sub-model from an ONNX model. The sub-model is defined by the names of the input and output tensors diff --git a/mmdeploy/apis/inference.py b/mmdeploy/apis/inference.py index 8040ed8cd9..09c1720df5 100644 --- a/mmdeploy/apis/inference.py +++ b/mmdeploy/apis/inference.py @@ -1,75 +1,40 @@ -from typing import Optional, Sequence, Union +from typing import Any, Sequence, Union import mmcv import numpy as np import torch -from mmdeploy.utils import (Backend, get_backend, get_codebase, - get_input_shape, get_task_type, load_config) -from .utils import (create_input, init_backend_model, init_pytorch_model, - run_inference, visualize) +from mmdeploy.utils import get_input_shape, load_config def inference_model(model_cfg: Union[str, mmcv.Config], deploy_cfg: Union[str, mmcv.Config], - model: Union[str, Sequence[str], torch.nn.Module], - img: Union[str, np.ndarray], - device: str, - backend: Optional[Backend] = None, - output_file: Optional[str] = None, - show_result: bool = False): + backend_files: Sequence[str], img: Union[str, np.ndarray], + device: str) -> Any: """Run inference with PyTorch or backend model and show results. Args: model_cfg (str | mmcv.Config): Model config file or Config object. deploy_cfg (str | mmcv.Config): Deployment config file or Config object. - model (str | list[str], torch.nn.Module): Input model or file(s). + backend_files (Sequence[str]): Input backend model file(s). img (str | np.ndarray): Input image file or numpy array for inference. device (str): A string specifying device type. - backend (Backend): Specifying backend type, defaults to `None`. - output_file (str): Output file to save visualized image, defaults to - `None`. Only valid if `show_result` is set to `False`. - show_result (bool): Whether to show plotted image in windows, defaults - to `False`. + + Returns: + Any: The inference results """ deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) - codebase = get_codebase(deploy_cfg) - task = get_task_type(deploy_cfg) - input_shape = get_input_shape(deploy_cfg) - if backend is None: - backend = get_backend(deploy_cfg) - - if isinstance(model, str): - model = [model] + from mmdeploy.apis.utils import build_task_processor + task_processor = build_task_processor(model_cfg, deploy_cfg, device) - if isinstance(model, (list, tuple)): - assert len(model) > 0, 'Model should have at least one element.' - assert all([isinstance(m, str) for m in model]), 'All elements in the \ - list should be str' + model = task_processor.init_backend_model(backend_files) - if backend == Backend.PYTORCH: - model = init_pytorch_model(codebase, model_cfg, model[0], device) - else: - device_id = -1 if device == 'cpu' else 0 - model = init_backend_model( - model, - model_cfg=model_cfg, - deploy_cfg=deploy_cfg, - device_id=device_id) - - model_inputs, _ = create_input(codebase, task, model_cfg, img, input_shape, - device) + input_shape = get_input_shape(deploy_cfg) + model_inputs, _ = task_processor.create_input(img, input_shape) with torch.no_grad(): - result = run_inference(codebase, model_inputs, model) + result = task_processor.run_inference(model, model_inputs) - visualize( - codebase, - img, - result=result, - model=model, - output_file=output_file, - backend=backend, - show_result=show_result) + return result diff --git a/mmdeploy/apis/ncnn/__init__.py b/mmdeploy/apis/ncnn/__init__.py index 28ed404be8..9d66d06530 100644 --- a/mmdeploy/apis/ncnn/__init__.py +++ b/mmdeploy/apis/ncnn/__init__.py @@ -1,28 +1,8 @@ -import importlib -import os.path as osp - -from .init_plugins import get_onnx2ncnn_path, get_ops_path - -__all__ = ['get_ops_path', 'get_onnx2ncnn_path'] - - -def is_available(): - """Check whether ncnn with extension is installed. - - Returns: - bool: True if ncnn and its extension are installed. - """ - ncnn_ops_path = get_ops_path() - if not osp.exists(ncnn_ops_path): - return False - has_pyncnn = importlib.util.find_spec('ncnn') is not None - has_pyncnn_ext = importlib.util.find_spec( - 'mmdeploy.apis.ncnn.ncnn_ext') is not None - - return has_pyncnn and has_pyncnn_ext +from mmdeploy.backend.ncnn import is_available +__all__ = ['is_available'] if is_available(): - from .ncnn_utils import NCNNWrapper - - __all__ += ['NCNNWrapper'] + from mmdeploy.backend.ncnn.onnx2ncnn import (onnx2ncnn, + get_output_model_file) + __all__ += ['onnx2ncnn', 'get_output_model_file'] diff --git a/mmdeploy/apis/onnxruntime/__init__.py b/mmdeploy/apis/onnxruntime/__init__.py index 7eed665244..281f1e3c87 100644 --- a/mmdeploy/apis/onnxruntime/__init__.py +++ b/mmdeploy/apis/onnxruntime/__init__.py @@ -1,22 +1,3 @@ -import importlib -import os.path as osp +from mmdeploy.backend.onnxruntime import is_available -from .init_plugins import get_ops_path - - -def is_available(): - """Check whether onnxruntime and its custom ops are installed. - - Returns: - bool: True if onnxruntime package is installed and its - custom ops are compiled. - """ - onnxruntime_op_path = get_ops_path() - if not osp.exists(onnxruntime_op_path): - return False - return importlib.util.find_spec('onnxruntime') is not None - - -if is_available(): - from .onnxruntime_utils import ORTWrapper - __all__ = ['get_ops_path', 'ORTWrapper'] +__all__ = ['is_available'] diff --git a/mmdeploy/apis/openvino/__init__.py b/mmdeploy/apis/openvino/__init__.py index 89547526d2..5d2e28fb15 100644 --- a/mmdeploy/apis/openvino/__init__.py +++ b/mmdeploy/apis/openvino/__init__.py @@ -1,19 +1,11 @@ -import importlib - - -def is_available() -> bool: - """Checking if OpenVINO is installed. - - Returns: - bool: True if OpenVINO is installed. - """ - return importlib.util.find_spec('openvino') is not None +from mmdeploy.backend.openvino import is_available +__all__ = ['is_available'] if is_available(): - from .openvino_utils import OpenVINOWrapper, get_input_shape_from_cfg - from .onnx2openvino import (onnx2openvino, get_output_model_file) - __all__ = [ - 'OpenVINOWrapper', 'onnx2openvino', 'get_output_model_file', - 'get_input_shape_from_cfg' + from mmdeploy.backend.openvino.onnx2openvino \ + import onnx2openvino, get_output_model_file + from .utils import get_input_shape_from_cfg + __all__ += [ + 'onnx2openvino', 'get_output_model_file', 'get_input_shape_from_cfg' ] diff --git a/mmdeploy/apis/openvino/utils.py b/mmdeploy/apis/openvino/utils.py new file mode 100644 index 0000000000..4c66f23aab --- /dev/null +++ b/mmdeploy/apis/openvino/utils.py @@ -0,0 +1,22 @@ +from typing import List + +import mmcv + + +def get_input_shape_from_cfg(config: mmcv.Config) -> List[int]: + """Get the input shape from the model config for OpenVINO Model Optimizer. + + Args: + config (mmcv.Config): Model config. + Returns: + List[int]: The input shape in [1, 3, H, W] format from config + or [1, 3, 800, 1344]. + """ + shape = [] + test_pipeline = config.get('test_pipeline', None) + if test_pipeline is not None: + img_scale = test_pipeline[1]['img_scale'] + shape = [1, 3, img_scale[1], img_scale[0]] + else: + shape = [1, 3, 800, 1344] + return shape diff --git a/mmdeploy/apis/ppl/__init__.py b/mmdeploy/apis/ppl/__init__.py index 4b0d4e954b..3357a3c50a 100644 --- a/mmdeploy/apis/ppl/__init__.py +++ b/mmdeploy/apis/ppl/__init__.py @@ -1,16 +1,8 @@ -import importlib - - -def is_available(): - """Check whether ppl is installed. - - Returns: - bool: True if ppl package is installed. - """ - return importlib.util.find_spec('pyppl') is not None +from mmdeploy.backend.ppl import is_available +__all__ = ['is_available'] if is_available(): - from .ppl_utils import PPLWrapper, register_engines - from .onnx2ppl import onnx2ppl - __all__ = ['register_engines', 'PPLWrapper', 'onnx2ppl'] + from mmdeploy.backend.ppl import onnx2ppl + + __all__ += ['onnx2ppl'] diff --git a/mmdeploy/apis/pytorch2onnx.py b/mmdeploy/apis/pytorch2onnx.py index 91b648af94..527e3b56c8 100644 --- a/mmdeploy/apis/pytorch2onnx.py +++ b/mmdeploy/apis/pytorch2onnx.py @@ -5,9 +5,8 @@ import torch from mmdeploy.core import RewriterContext, patch_model -from mmdeploy.utils import (get_backend, get_codebase, get_input_shape, - get_onnx_config, get_task_type, load_config) -from .utils import create_input, init_pytorch_model +from mmdeploy.utils import (get_backend, get_input_shape, get_onnx_config, + load_config) def torch2onnx_impl(model: torch.nn.Module, input: torch.Tensor, @@ -74,14 +73,13 @@ def torch2onnx(img: Any, mmcv.mkdir_or_exist(osp.abspath(work_dir)) output_file = osp.join(work_dir, save_file) - codebase = get_codebase(deploy_cfg) - task = get_task_type(deploy_cfg) input_shape = get_input_shape(deploy_cfg) - torch_model = init_pytorch_model(codebase, model_cfg, model_checkpoint, - device) - data, model_inputs = create_input(codebase, task, model_cfg, img, - input_shape, device) + from mmdeploy.apis import build_task_processor + task_processor = build_task_processor(model_cfg, deploy_cfg, device) + + torch_model = task_processor.init_pytorch_model(model_checkpoint) + data, model_inputs = task_processor.create_input(img, input_shape) if not isinstance(model_inputs, torch.Tensor): model_inputs = model_inputs[0] diff --git a/mmdeploy/apis/tensorrt/__init__.py b/mmdeploy/apis/tensorrt/__init__.py index f217613559..cb224a475e 100644 --- a/mmdeploy/apis/tensorrt/__init__.py +++ b/mmdeploy/apis/tensorrt/__init__.py @@ -1,34 +1,8 @@ -# flake8: noqa -import importlib -import os.path as osp - -from .init_plugins import get_ops_path, load_tensorrt_plugin - - -def is_available(): - """Check whether TensorRT and plugins are installed. - - Returns: - bool: True if TensorRT and plugins are installed. - """ - tensorrt_op_path = get_ops_path() - if not osp.exists(tensorrt_op_path): - return False - - return importlib.util.find_spec('tensorrt') is not None - +from mmdeploy.backend.tensorrt import is_available __all__ = ['is_available'] if is_available(): - from .onnx2tensorrt import onnx2tensorrt - from .tensorrt_utils import (TRTWrapper, create_trt_engine, - load_trt_engine, save_trt_engine) - - # load tensorrt plugin lib - load_tensorrt_plugin() + from mmdeploy.backend.tensorrt.onnx2tensorrt import onnx2tensorrt - __all__ += [ - 'create_trt_engine', 'save_trt_engine', 'load_trt_engine', - 'TRTWrapper', 'onnx2tensorrt' - ] + __all__ += ['onnx2tensorrt'] diff --git a/mmdeploy/apis/test.py b/mmdeploy/apis/test.py deleted file mode 100644 index 7c77bb5dbd..0000000000 --- a/mmdeploy/apis/test.py +++ /dev/null @@ -1,167 +0,0 @@ -import warnings -from typing import Optional - -import mmcv -import numpy as np -from torch import nn -from torch.utils.data import DataLoader, Dataset - -from mmdeploy.utils import Codebase - - -def single_gpu_test(codebase: Codebase, - model: nn.Module, - data_loader: DataLoader, - show: bool = False, - out_dir: Optional[str] = None, - show_score_thr: float = 0.3): - """Run test with single gpu. - - Args: - codebase (Codebase): Specifying codebase type. - model (torch.nn.Module): Input model from nn.Module. - data_loader (DataLoader): PyTorch data loader. - show (bool): Specifying whether to show plotted results. Defaults - to `False`. - out_dir (str): A directory to save results, defaults to `None`. - show_score_thr (float): A threshold to show detection results, - defaults to `0.3`. - - Returns: - list: The prediction results. - """ - if codebase == Codebase.MMCLS: - from mmcls.apis import single_gpu_test - outputs = single_gpu_test(model, data_loader, show, out_dir) - elif codebase == Codebase.MMDET: - from mmdet.apis import single_gpu_test - outputs = single_gpu_test(model, data_loader, show, out_dir, - show_score_thr) - elif codebase == Codebase.MMSEG: - from mmseg.apis import single_gpu_test - outputs = single_gpu_test(model, data_loader, show, out_dir) - elif codebase == Codebase.MMOCR: - from mmdet.apis import single_gpu_test - outputs = single_gpu_test(model, data_loader, show, out_dir) - elif codebase == Codebase.MMEDIT: - from mmedit.apis import single_gpu_test - outputs = single_gpu_test(model, data_loader, show, out_dir) - else: - raise NotImplementedError(f'Unknown codebase type: {codebase.value}') - return outputs - - -def post_process_outputs(outputs: list, - dataset: Dataset, - model_cfg: mmcv.Config, - codebase: Codebase, - metrics: Optional[str] = None, - out: Optional[str] = None, - metric_options: Optional[dict] = None, - format_only: bool = False): - """Perform post-processing to predictions of model. - - Args: - outputs (list): A list of predictions of model inference. - dataset (Dataset): Input dataset to run test. - model_cfg (mmcv.Config): The model config. - codebase (Codebase): Specifying codebase type. - metrics (str): Evaluation metrics, which depends on - the codebase and the dataset, e.g., "bbox", "segm", "proposal" - for COCO, and "mAP", "recall" for PASCAL VOC in mmdet; "accuracy", - "precision", "recall", "f1_score", "support" for single label - dataset, and "mAP", "CP", "CR", "CF1", "OP", "OR", "OF1" for - multi-label dataset in mmcls. Defaults is `None`. - out (str): Output result file in pickle format, defaults to `None`. - metric_options (dict): Custom options for evaluation, will be kwargs - for dataset.evaluate() function. Defaults to `None`. - format_only (bool): Format the output results without perform - evaluation. It is useful when you want to format the result - to a specific format and submit it to the test server. Defaults - to `False`. - """ - if codebase == Codebase.MMCLS: - if metrics: - results = dataset.evaluate(outputs, metrics, metric_options) - for k, v in results.items(): - print(f'\n{k} : {v:.2f}') - else: - warnings.warn('Evaluation metrics are not specified.') - scores = np.vstack(outputs) - pred_score = np.max(scores, axis=1) - pred_label = np.argmax(scores, axis=1) - pred_class = [dataset.CLASSES[lb] for lb in pred_label] - results = { - 'pred_score': pred_score, - 'pred_label': pred_label, - 'pred_class': pred_class - } - if not out: - print('\nthe predicted result for the first element is ' - f'pred_score = {pred_score[0]:.2f}, ' - f'pred_label = {pred_label[0]} ' - f'and pred_class = {pred_class[0]}. ' - 'Specify --out to save all results to files.') - if out: - print(f'\nwriting results to {out}') - mmcv.dump(results, out) - - elif codebase == Codebase.MMDET: - if out: - print(f'\nwriting results to {out}') - mmcv.dump(outputs, out) - kwargs = {} if metric_options is None else metric_options - if format_only: - dataset.format_results(outputs, **kwargs) - if metrics: - eval_kwargs = model_cfg.get('evaluation', {}).copy() - # hard-code way to remove EvalHook args - for key in [ - 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', - 'rule' - ]: - eval_kwargs.pop(key, None) - eval_kwargs.update(dict(metric=metrics, **kwargs)) - print(dataset.evaluate(outputs, **eval_kwargs)) - - elif codebase == Codebase.MMSEG: - if out: - print(f'\nwriting results to {out}') - mmcv.dump(outputs, out) - kwargs = {} if metric_options is None else metric_options - if format_only: - dataset.format_results(outputs, **kwargs) - if metrics: - dataset.evaluate(outputs, metrics, **kwargs) - - elif codebase == Codebase.MMOCR: - if out: - print(f'\nwriting results to {out}') - mmcv.dump(outputs, out) - kwargs = {} if metric_options is None else metric_options - if format_only: - dataset.format_results(outputs, **kwargs) - if metrics: - eval_kwargs = model_cfg.get('evaluation', {}).copy() - # hard-code way to remove EvalHook args - for key in [ - 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', - 'rule' - ]: - eval_kwargs.pop(key, None) - eval_kwargs.update(dict(metric=metrics, **kwargs)) - print(dataset.evaluate(outputs, **eval_kwargs)) - - elif codebase == Codebase.MMEDIT: - if out: - print(f'\nwriting results to {out}') - mmcv.dump(outputs, out) - # The Dataset doesn't need metrics - print('\n') - # print metrics - stats = dataset.evaluate(outputs) - for stat in stats: - print('Eval-{}: {}'.format(stat, stats[stat])) - - else: - raise NotImplementedError(f'Unknown codebase type: {codebase.value}') diff --git a/mmdeploy/apis/utils.py b/mmdeploy/apis/utils.py index cc603bd8c0..bb30ef8649 100644 --- a/mmdeploy/apis/utils.py +++ b/mmdeploy/apis/utils.py @@ -1,367 +1,42 @@ -from typing import Any, Dict, Optional, Sequence, Union - import mmcv -import numpy as np -import torch -from torch.utils.data import Dataset - -from mmdeploy.utils import Backend, Codebase, Task, get_codebase, load_config - - -def init_pytorch_model(codebase: Codebase, - model_cfg: Union[str, mmcv.Config], - model_checkpoint: Optional[str] = None, - device: str = 'cuda:0', - cfg_options: Optional[Dict] = None): - """Initialize torch model. - - Args: - codebase (Codebase): Specifying codebase type. - model_cfg (str | mmcv.Config): Model config file or Config object. - model_checkpoint (str): The checkpoint file of torch model, defaults - to `None`. - device (str): A string specifying device type, defaults to 'cuda:0'. - cfg_options (dict): Optional config key-pair parameters. - - Returns: - nn.Module: An initialized torch model. - """ - if codebase == Codebase.MMCLS: - from mmcls.apis import init_model - model = init_model(model_cfg, model_checkpoint, device, cfg_options) - - elif codebase == Codebase.MMDET: - from mmdet.apis import init_detector - model = init_detector(model_cfg, model_checkpoint, device, cfg_options) - - elif codebase == Codebase.MMSEG: - from mmseg.apis import init_segmentor - from mmdeploy.mmseg.export import convert_syncbatchnorm - model = init_segmentor(model_cfg, model_checkpoint, device) - model = convert_syncbatchnorm(model) - - elif codebase == Codebase.MMOCR: - from mmocr.apis import init_detector - model = init_detector(model_cfg, model_checkpoint, device, cfg_options) - - elif codebase == Codebase.MMEDIT: - from mmedit.apis import init_model - model = init_model(model_cfg, model_checkpoint, device) - model.forward = model.forward_dummy - else: - raise NotImplementedError(f'Unknown codebase type: {codebase.value}') - - return model.eval() - - -def create_input(codebase: Codebase, - task: Task, - model_cfg: Union[str, mmcv.Config], - imgs: Union[str, np.ndarray], - input_shape: Sequence[int] = None, - device: str = 'cuda:0', - **kwargs): - """Create input for model. - - Args: - codebase (Codebase): Specifying codebase type. - task (Task): Specifying task type. - model_cfg (str | mmcv.Config): Model config file or loaded Config - object. - imgs (str | np.ndarray): Input image(s), accpeted data types are `str`, - `np.ndarray`. - input_shape (list[int]): Input shape of image in (width, height) - format, defaults to `None`. - device (str): A string specifying device type, defaults to 'cuda:0'. - - Returns: - tuple: (data, img), meta information for the input image and input - image tensor. - """ - model_cfg = load_config(model_cfg)[0] - - cfg = model_cfg.copy() - if codebase == Codebase.MMCLS: - from mmdeploy.mmcls.export import create_input - return create_input(task, cfg, imgs, input_shape, device, **kwargs) - - elif codebase == Codebase.MMDET: - from mmdeploy.mmdet.export import create_input - return create_input(task, cfg, imgs, input_shape, device, **kwargs) - - elif codebase == Codebase.MMOCR: - from mmdeploy.mmocr.export import create_input - return create_input(task, cfg, imgs, input_shape, device, **kwargs) - - elif codebase == Codebase.MMSEG: - from mmdeploy.mmseg.export import create_input - return create_input(task, cfg, imgs, input_shape, device, **kwargs) - - elif codebase == Codebase.MMEDIT: - from mmdeploy.mmedit.export import create_input - return create_input(task, cfg, imgs, input_shape, device, **kwargs) - else: - raise NotImplementedError(f'Unknown codebase type: {codebase.value}') +from mmdeploy.codebase import BaseTask, get_codebase_class +from mmdeploy.utils import get_codebase, get_task_type -def init_backend_model(model_files: Sequence[str], - model_cfg: Union[str, mmcv.Config], - deploy_cfg: Union[str, mmcv.Config], - device_id: int = 0, - **kwargs): - """Initialize backend model. +def build_task_processor(model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str) -> BaseTask: + """Build a task processor to manage the deploy pipeline. Args: - model_files (list[str]): Input model files. - model_cfg (str | mmcv.Config): Model config file or - loaded Config object. - deploy_cfg (str | mmcv.Config): Deployment config file or - loaded Config object. - device_id (int): An integer specifying device index. + model_cfg (str | mmcv.Config): Model config file. + deploy_cfg (str | mmcv.Config): Deployment config file. + device (str): A string specifying device type. Returns: - nn.Module: An initialized model. + BaseTask: A task processor. """ - deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) + codebase_type = get_codebase(deploy_cfg) + codebase = get_codebase_class(codebase_type) + return codebase.build_task_processor(model_cfg, deploy_cfg, device) - codebase = get_codebase(deploy_cfg) - if codebase == Codebase.MMCLS: - from mmdeploy.mmcls.apis import build_classifier - return build_classifier( - model_files, model_cfg, deploy_cfg, device_id=device_id) - - elif codebase == Codebase.MMDET: - from mmdeploy.mmdet.apis import build_detector - return build_detector( - model_files, model_cfg, deploy_cfg, device_id=device_id) - - elif codebase == Codebase.MMSEG: - from mmdeploy.mmseg.apis import build_segmentor - return build_segmentor( - model_files, model_cfg, deploy_cfg, device_id=device_id) - - elif codebase == Codebase.MMOCR: - from mmdeploy.mmocr.apis import build_ocr_processor - return build_ocr_processor( - model_files, model_cfg, deploy_cfg, device_id=device_id) - - elif codebase == Codebase.MMEDIT: - from mmdeploy.mmedit.apis import build_editing_processor - return build_editing_processor(model_files, model_cfg, deploy_cfg, - device_id) - - else: - raise NotImplementedError(f'Unknown codebase type: {codebase.value}') - - -def run_inference(codebase: Codebase, model_inputs: dict, - model: torch.nn.Module): - """Run once inference for a model of nn.Module. - - Args: - codebase (Codebase): Specifying codebase type. - model_inputs (dict): A dict containing model inputs tensor and - meta info. - model (nn.Module): Input model. - - Returns: - list: The predictions of model inference. - """ - if codebase == Codebase.MMCLS: - return model(**model_inputs, return_loss=False)[0] - elif codebase == Codebase.MMDET: - return model(**model_inputs, return_loss=False, rescale=True)[0] - elif codebase == Codebase.MMSEG: - return model(**model_inputs, return_loss=False) - elif codebase == Codebase.MMOCR: - return model(**model_inputs, return_loss=False, rescale=True)[0] - elif codebase == Codebase.MMEDIT: - result = model(model_inputs['lq'])[0] - # TODO: (For mmedit codebase) - # The data type of pytorch backend is not consistent - if not isinstance(result, np.ndarray): - result = result.detach().cpu().numpy() - return result - else: - raise NotImplementedError(f'Unknown codebase type: {codebase.value}') - - -def visualize(codebase: Codebase, - image: Union[str, np.ndarray], - result: list, - model: torch.nn.Module, - output_file: str, - backend: Backend, - show_result: bool = False): - """Visualize predictions of a model. - - Args: - codebase (Codebase): Specifying codebase type. - image (str | np.ndarray): Input image to draw predictions on. - result (list): A list of predictions. - model (nn.Module): Input model. - output_file (str): Output file to save drawn image. - backend (Backend): Specifying backend type. - show_result (bool): Whether to show result in windows, defaults - to `False`. - """ - show_img = mmcv.imread(image) if isinstance(image, str) else image - output_file = None if show_result else output_file - - if codebase == Codebase.MMCLS: - from mmdeploy.mmcls.apis import show_result as show_result_mmcls - show_result_mmcls(model, show_img, result, output_file, backend, - show_result) - elif codebase == Codebase.MMDET: - from mmdeploy.mmdet.apis import show_result as show_result_mmdet - show_result_mmdet(model, show_img, result, output_file, backend, - show_result) - elif codebase == Codebase.MMSEG: - from mmdeploy.mmseg.apis import show_result as show_result_mmseg - show_result_mmseg(model, show_img, result, output_file, backend, - show_result) - elif codebase == Codebase.MMOCR: - from mmdeploy.mmocr.apis import show_result as show_result_mmocr - show_result_mmocr(model, show_img, result, output_file, backend, - show_result) - elif codebase == Codebase.MMEDIT: - from mmdeploy.mmedit.apis import show_result as show_result_mmedit - show_result_mmedit(result, output_file, backend, show_result) - - -def get_partition_cfg(codebase: Codebase, partition_type: str): - """Get a certain partition config. +def get_predefined_partition_cfg(deploy_cfg: mmcv.Config, partition_type: str): + """Get the predefined partition config. Notes: Currently only support mmdet codebase. Args: - codebase (Codebase): Specifying codebase type. + deploy_cfg (mmcv.Config): use deploy config to get the codebase and + task type. partition_type (str): A string specifying partition type. Returns: dict: A dictionary of partition config. """ - if codebase == Codebase.MMDET: - from mmdeploy.mmdet.export import get_partition_cfg \ - as get_partition_cfg_mmdet - return get_partition_cfg_mmdet(partition_type) - else: - raise NotImplementedError(f'Unknown codebase type: {codebase.value}') - - -def build_dataset(codebase: Codebase, - dataset_cfg: Union[str, mmcv.Config], - dataset_type: str = 'val', - **kwargs): - """Build dataset for different codebase. - - Args: - codebase (Codebase): Specifying codebase type. - dataset_cfg (str | mmcv.Config): Dataset config file or Config object. - dataset_type (str): Specifying dataset type, e.g.: 'train', 'test', - 'val', defaults to 'val'. - - Returns: - Dataset: The built dataset. - """ - if codebase == Codebase.MMCLS: - from mmdeploy.mmcls.export import build_dataset \ - as build_dataset_mmcls - return build_dataset_mmcls(dataset_cfg, dataset_type, **kwargs) - elif codebase == Codebase.MMDET: - from mmdeploy.mmdet.export import build_dataset \ - as build_dataset_mmdet - return build_dataset_mmdet(dataset_cfg, dataset_type, **kwargs) - elif codebase == Codebase.MMSEG: - from mmdeploy.mmseg.export import build_dataset as build_dataset_mmseg - return build_dataset_mmseg(dataset_cfg, dataset_type, **kwargs) - elif codebase == Codebase.MMEDIT: - from mmdeploy.mmedit.export import build_dataset \ - as build_dataset_mmedit - return build_dataset_mmedit(dataset_cfg, **kwargs) - elif codebase == Codebase.MMOCR: - from mmdeploy.mmocr.export import build_dataset as build_dataset_mmocr - return build_dataset_mmocr(dataset_cfg, dataset_type, **kwargs) - else: - raise NotImplementedError(f'Unknown codebase type: {codebase.value}') - - -def build_dataloader(codebase: Codebase, dataset: Dataset, - samples_per_gpu: int, workers_per_gpu: int, **kwargs): - """Build PyTorch dataloader. - - Args: - codebase (Codebase): Specifying codebase type. - dataset (Dataset): A PyTorch dataset. - samples_per_gpu (int): Number of training samples on each GPU, i.e., - batch size of each GPU. - workers_per_gpu (int): How many subprocesses to use for data loading - for each GPU. - - Returns: - DataLoader: A PyTorch dataloader. - """ - if codebase == Codebase.MMCLS: - from mmdeploy.mmcls.export import build_dataloader \ - as build_dataloader_mmcls - return build_dataloader_mmcls(dataset, samples_per_gpu, - workers_per_gpu, **kwargs) - elif codebase == Codebase.MMDET: - from mmdeploy.mmdet.export import build_dataloader \ - as build_dataloader_mmdet - return build_dataloader_mmdet(dataset, samples_per_gpu, - workers_per_gpu, **kwargs) - elif codebase == Codebase.MMSEG: - from mmdeploy.mmseg.export import build_dataloader \ - as build_dataloader_mmseg - return build_dataloader_mmseg(dataset, samples_per_gpu, - workers_per_gpu, **kwargs) - elif codebase == Codebase.MMEDIT: - from mmdeploy.mmedit.export import build_dataloader \ - as build_dataloader_mmedit - return build_dataloader_mmedit(dataset, samples_per_gpu, - workers_per_gpu, **kwargs) - elif codebase == Codebase.MMOCR: - from mmdeploy.mmocr.export import build_dataloader \ - as build_dataloader_mmocr - return build_dataloader_mmocr(dataset, samples_per_gpu, - workers_per_gpu, **kwargs) - else: - raise NotImplementedError(f'Unknown codebase type: {codebase.value}') - - -def get_tensor_from_input(codebase: Codebase, input_data: Dict[str, Any]): - """Get input tensor from input data. - - Args: - codebase (Codebase): Specifying codebase type. - input_data (dict): Input data containing meta info and image tensor. - - Returns: - torch.Tensor: An image in `Tensor`. - """ - if codebase == Codebase.MMCLS: - from mmdeploy.mmcls.export import get_tensor_from_input \ - as get_tensor_from_input_mmcls - return get_tensor_from_input_mmcls(input_data) - elif codebase == Codebase.MMDET: - from mmdeploy.mmdet.export import get_tensor_from_input \ - as get_tensor_from_input_mmdet - return get_tensor_from_input_mmdet(input_data) - elif codebase == Codebase.MMSEG: - from mmdeploy.mmseg.export import get_tensor_from_input \ - as get_tensor_from_input_mmseg - return get_tensor_from_input_mmseg(input_data) - elif codebase == Codebase.MMOCR: - from mmdeploy.mmocr.export import get_tensor_from_input \ - as get_tensor_from_input_mmocr - return get_tensor_from_input_mmocr(input_data) - elif codebase == Codebase.MMEDIT: - from mmdeploy.mmedit.export import get_tensor_from_input \ - as get_tensor_from_input_mmedit - return get_tensor_from_input_mmedit(input_data) - else: - raise NotImplementedError(f'Unknown codebase type: {codebase.value}') + codebase_type = get_codebase(deploy_cfg) + task = get_task_type(deploy_cfg) + codebase = get_codebase_class(codebase_type) + task_processor_class = codebase.get_task_class(task) + return task_processor_class.get_partition_cfg(partition_type) diff --git a/mmdeploy/apis/visualize.py b/mmdeploy/apis/visualize.py new file mode 100644 index 0000000000..ce43af3328 --- /dev/null +++ b/mmdeploy/apis/visualize.py @@ -0,0 +1,67 @@ +from typing import Optional, Sequence, Union + +import mmcv +import numpy as np +import torch + +from mmdeploy.codebase import BaseTask +from mmdeploy.utils import Backend, get_backend, get_input_shape, load_config + + +def visualize_model(model_cfg: Union[str, mmcv.Config], + deploy_cfg: Union[str, mmcv.Config], + model: Union[str, Sequence[str], BaseTask], + img: Union[str, np.ndarray], + device: str, + backend: Optional[Backend] = None, + output_file: Optional[str] = None, + show_result: bool = False): + """Run inference with PyTorch or backend model and show results. + + Args: + model_cfg (str | mmcv.Config): Model config file or Config object. + deploy_cfg (str | mmcv.Config): Deployment config file or Config + object. + model (str | list[str], BaseSubtask): Input model or file(s). + img (str | np.ndarray): Input image file or numpy array for inference. + device (str): A string specifying device type. + backend (Backend): Specifying backend type, defaults to `None`. + output_file (str): Output file to save visualized image, defaults to + `None`. Only valid if `show_result` is set to `False`. + show_result (bool): Whether to show plotted image in windows, defaults + to `False`. + """ + deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) + + from mmdeploy.apis.utils import build_task_processor + task_processor = build_task_processor(model_cfg, deploy_cfg, device) + + input_shape = get_input_shape(deploy_cfg) + if backend is None: + backend = get_backend(deploy_cfg) + + if isinstance(model, str): + model = [model] + + if isinstance(model, (list, tuple)): + assert len(model) > 0, 'Model should have at least one element.' + assert all([isinstance(m, str) for m in model]), 'All elements in the \ + list should be str' + + if backend == Backend.PYTORCH: + model = task_processor.init_pytorch_model(model[0]) + else: + model = task_processor.init_backend_model(model) + + model_inputs, _ = task_processor.create_input(img, input_shape) + + with torch.no_grad(): + result = task_processor.run_inference(model, model_inputs)[0] + + task_processor.visualize( + image=img, + model=model, + result=result, + output_file=output_file, + window_name=backend.value, + show_result=show_result) diff --git a/mmdeploy/backend/__init__.py b/mmdeploy/backend/__init__.py new file mode 100644 index 0000000000..59675fc573 --- /dev/null +++ b/mmdeploy/backend/__init__.py @@ -0,0 +1,22 @@ +from mmdeploy.backend.ncnn import is_available as ncnn_available +from mmdeploy.backend.onnxruntime import is_available as ort_available +from mmdeploy.backend.openvino import is_available as openvino_available +from mmdeploy.backend.ppl import is_available as ppl_available +from mmdeploy.backend.tensorrt import is_available as trt_available + +__all__ = [] +if ncnn_available(): + from .ncnn import NCNNWrapper # noqa: F401,F403 + __all__.append('NCNNWrapper') +if ort_available(): + from .onnxruntime import ORTWrapper # noqa: F401,F403 + __all__.append('ORTWrapper') +if trt_available(): + from .tensorrt import TRTWrapper # noqa: F401,F403 + __all__.append('TRTWrapper') +if ppl_available(): + from .ppl import PPLWrapper # noqa: F401,F403 + __all__.append('PPLWrapper') +if openvino_available(): + from .openvino import OpenVINOWrapper # noqa: F401,F403 + __all__.append('OpenVINOWrapper') diff --git a/mmdeploy/backend/base/__init__.py b/mmdeploy/backend/base/__init__.py new file mode 100644 index 0000000000..9ac96fb31f --- /dev/null +++ b/mmdeploy/backend/base/__init__.py @@ -0,0 +1,8 @@ +from .backend_wrapper_registry import (BACKEND_WRAPPER, get_backend_file_count, + get_backend_wrapper_class) +from .base_wrapper import BaseWrapper + +__all__ = [ + 'BaseWrapper', 'BACKEND_WRAPPER', 'get_backend_wrapper_class', + 'get_backend_file_count' +] diff --git a/mmdeploy/backend/base/backend_wrapper_registry.py b/mmdeploy/backend/base/backend_wrapper_registry.py new file mode 100644 index 0000000000..0ca8798cef --- /dev/null +++ b/mmdeploy/backend/base/backend_wrapper_registry.py @@ -0,0 +1,27 @@ +from mmcv.utils import Registry + +from mmdeploy.utils.config_utils import Backend + + +def __build_backend_wrapper_class(backend: Backend, registry: Registry): + return registry.module_dict[backend.value] + + +BACKEND_WRAPPER = Registry('backend', __build_backend_wrapper_class) + + +def get_backend_wrapper_class(backend: Backend) -> type: + """Get the backend wrapper class from the registry. + + Args: + backend (Backend): The backend enum type. + + Returns: + type: The backend wrapper class + """ + return BACKEND_WRAPPER.build(backend) + + +def get_backend_file_count(backend: Backend): + backend_class = get_backend_wrapper_class(backend) + return backend_class.get_backend_file_count() diff --git a/mmdeploy/backend/base/base_wrapper.py b/mmdeploy/backend/base/base_wrapper.py new file mode 100644 index 0000000000..985315b8a8 --- /dev/null +++ b/mmdeploy/backend/base/base_wrapper.py @@ -0,0 +1,70 @@ +from abc import ABCMeta, abstractmethod +from typing import Dict, List, Sequence + +import torch + + +class BaseWrapper(torch.nn.Module, metaclass=ABCMeta): + """Abstract base class for backend wrappers. + + Args: + output_names (Sequence[str]): Names to model outputs in order, which is + useful when converting the output dict to a ordered list or converting + the output ordered list to a key-value dict. + """ + + def __init__(self, output_names: Sequence[str]): + super().__init__() + self._output_names = output_names + + @staticmethod + def get_backend_file_count() -> int: + """Return the count of backend file(s) + + Each backend has its own requirement on backend files (e.g., TensorRT + requires 1 .engine file and ncnn requires 2 files (.param, .bin)). This + interface allow developers to get the count of these required files. + + Returns: + int: The count of required backend file(s). + """ + return 1 + + @abstractmethod + def forward(self, inputs: Dict[str, + torch.Tensor]) -> Dict[str, torch.Tensor]: + """Run forward inference. + + Args: + inputs (Dict[str, torch.Tensor]): Key-value pairs of model inputs. + + Returns: + Dict[str, torch.Tensor]: Key-value pairs of model outputs. + """ + pass + + @property + def output_names(self): + """Return the output names.""" + return self._output_names + + @output_names.setter + def output_names(self, value): + """Set the output names.""" + self._output_names = value + + def output_to_list(self, output_dict: Dict[str, + torch.Tensor]) -> \ + List[torch.Tensor]: + """Convert the output dict of forward() to a tensor list. + + Args: + output_dict (Dict[str, torch.Tensor]): Key-value pairs of model + outputs. + + Returns: + List[torch.Tensor]: An output value list whose order is determined + by the ouput_names list. + """ + outputs = [output_dict[name] for name in self._output_names] + return outputs diff --git a/mmdeploy/backend/ncnn/__init__.py b/mmdeploy/backend/ncnn/__init__.py new file mode 100644 index 0000000000..ccfa0138b0 --- /dev/null +++ b/mmdeploy/backend/ncnn/__init__.py @@ -0,0 +1,26 @@ +import importlib +import os.path as osp + +from .init_plugins import get_ops_path + + +def is_available(): + """Check whether ncnn with extension is installed. + + Returns: + bool: True if ncnn and its extension are installed. + """ + ncnn_ops_path = get_ops_path() + if not osp.exists(ncnn_ops_path): + return False + has_pyncnn = importlib.util.find_spec('ncnn') is not None + has_pyncnn_ext = importlib.util.find_spec( + 'mmdeploy.backend.ncnn.ncnn_ext') is not None + + return has_pyncnn and has_pyncnn_ext + + +if is_available(): + from .wrapper import NCNNWrapper + + __all__ = ['NCNNWrapper'] diff --git a/mmdeploy/apis/ncnn/init_plugins.py b/mmdeploy/backend/ncnn/init_plugins.py similarity index 92% rename from mmdeploy/apis/ncnn/init_plugins.py rename to mmdeploy/backend/ncnn/init_plugins.py index d6b2615404..331a83e92f 100644 --- a/mmdeploy/apis/ncnn/init_plugins.py +++ b/mmdeploy/backend/ncnn/init_plugins.py @@ -2,7 +2,7 @@ import os -def get_ops_path(): +def get_ops_path() -> str: """Get NCNN custom ops library path. Returns: @@ -18,7 +18,7 @@ def get_ops_path(): return lib_path -def get_onnx2ncnn_path(): +def get_onnx2ncnn_path() -> str: """Get onnx2ncnn path. Returns: diff --git a/mmdeploy/backend/ncnn/onnx2ncnn.py b/mmdeploy/backend/ncnn/onnx2ncnn.py new file mode 100644 index 0000000000..b772a27718 --- /dev/null +++ b/mmdeploy/backend/ncnn/onnx2ncnn.py @@ -0,0 +1,40 @@ +from subprocess import call +from typing import List + +from .init_plugins import get_onnx2ncnn_path + + +def get_output_model_file(onnx_path: str, work_dir: str) -> List[str]: + """Returns the path to the .param, .bin file with export result. + + Args: + onnx_path (str): The path to the onnx model. + work_dir (str): The path to the directory for saving the results. + + Returns: + List[str]: The path to the files where the export result will be + located. + """ + save_param = onnx_path.replace('.onnx', '.param') + save_bin = onnx_path.replace('.onnx', '.bin') + + return [save_param, save_bin] + + +def onnx2ncnn(onnx_path: str, work_dir: str): + """Convert ONNX to ncnn. + + The inputs of ncnn include a model file and a weight file. We need to use + a executable program to convert the ".onnx" file to a ".param" file and + a ".bin" file. The output files will save to work_dir. + + Args: + onnx_path (str): The path of the onnx model. + work_dir (str): The path to the directory for saving the results. + """ + + onnx2ncnn_path = get_onnx2ncnn_path() + + save_param, save_bin = get_output_model_file(onnx_path, work_dir) + + call([onnx2ncnn_path, onnx_path, save_param, save_bin]) diff --git a/mmdeploy/apis/ncnn/ncnn_utils.py b/mmdeploy/backend/ncnn/wrapper.py similarity index 67% rename from mmdeploy/apis/ncnn/ncnn_utils.py rename to mmdeploy/backend/ncnn/wrapper.py index 4cf701cb00..5622a578bf 100644 --- a/mmdeploy/apis/ncnn/ncnn_utils.py +++ b/mmdeploy/backend/ncnn/wrapper.py @@ -1,24 +1,28 @@ import importlib -from typing import Dict, Iterable, Optional +from typing import Dict, Optional, Sequence import ncnn import numpy as np import torch +from mmdeploy.utils import Backend from mmdeploy.utils.timer import TimeCounter +from ..base import BACKEND_WRAPPER, BaseWrapper -class NCNNWrapper(torch.nn.Module): +@BACKEND_WRAPPER.register_module(Backend.NCNN.value) +class NCNNWrapper(BaseWrapper): """NCNN wrapper class for inference. Args: param_file (str): Path of a parameter file. bin_file (str): Path of a binary file. - output_names (list[str] | tuple[str]): Names to model outputs. Defaults - to `None`. + output_names (Sequence[str] | None): Names of model outputs in order. + Defaults to `None` and the wrapper will load the output names from + ncnn model. Examples: - >>> from mmdeploy.apis.ncnn import NCNNWrapper + >>> from mmdeploy.backend.ncnn import NCNNWrapper >>> import torch >>> >>> param_file = 'model.params' @@ -32,41 +36,36 @@ class NCNNWrapper(torch.nn.Module): def __init__(self, param_file: str, bin_file: str, - output_names: Optional[Iterable[str]] = None, + output_names: Optional[Sequence[str]] = None, **kwargs): - super(NCNNWrapper, self).__init__() net = ncnn.Net() - if importlib.util.find_spec('mmdeploy.apis.ncnn.ncnn_ext'): - from mmdeploy.apis.ncnn import ncnn_ext + if importlib.util.find_spec('mmdeploy.backend.ncnn.ncnn_ext'): + from mmdeploy.backend.ncnn import ncnn_ext ncnn_ext.register_mmdeploy_custom_layers(net) net.load_param(param_file) net.load_model(bin_file) self._net = net - self._output_names = output_names + if output_names is None: + assert hasattr(self._net, 'output_names') + output_names = self._net.output_names() - def set_output_names(self, output_names: Iterable[str]): - """Set names of the model outputs. + super().__init__(output_names) - Args: - output_names (list[str] | tuple[str]): Names to model outputs. - """ - self._output_names = output_names + @staticmethod + def get_backend_file_count() -> int: + """Return the count of backend file(s) - def get_output_names(self): - """Get names of the model outputs. + ncnn needs a .param file and a .bin file. So the count is 2. Returns: - list[str]: Names to model outputs. + int: The count of required backend file(s). """ - if self._output_names is not None: - return self._output_names - else: - assert hasattr(self._net, 'output_names') - return self._net.output_names() + return 2 - def forward(self, inputs: Dict[str, torch.Tensor]): + def forward(self, inputs: Dict[str, + torch.Tensor]) -> Dict[str, torch.Tensor]: """Run forward inference. Args: @@ -84,7 +83,7 @@ def forward(self, inputs: Dict[str, torch.Tensor]): 'NCNN only supports cpu device' # set output names - output_names = self.get_output_names() + output_names = self._output_names # create output dict outputs = dict([name, [None] * batch_size] for name in output_names) @@ -101,7 +100,8 @@ def forward(self, inputs: Dict[str, torch.Tensor]): ex.input(name, input_mat) # get outputs - result = self.ncnn_execute(extractor=ex, output_names=output_names) + result = self.__ncnn_execute( + extractor=ex, output_names=output_names) for name in output_names: outputs[name][batch_id] = torch.from_numpy( np.array(result[name])) @@ -113,8 +113,8 @@ def forward(self, inputs: Dict[str, torch.Tensor]): return outputs @TimeCounter.count_time() - def ncnn_execute(self, extractor: ncnn.Extractor, - output_names: Iterable[str]): + def __ncnn_execute(self, extractor: ncnn.Extractor, + output_names: Sequence[str]) -> Dict[str, ncnn.Mat]: """Run inference with NCNN. Args: diff --git a/mmdeploy/backend/onnxruntime/__init__.py b/mmdeploy/backend/onnxruntime/__init__.py new file mode 100644 index 0000000000..0ccaf386d5 --- /dev/null +++ b/mmdeploy/backend/onnxruntime/__init__.py @@ -0,0 +1,22 @@ +import importlib +import os.path as osp + +from .init_plugins import get_ops_path + + +def is_available(): + """Check whether onnxruntime and its custom ops are installed. + + Returns: + bool: True if onnxruntime package is installed and its + custom ops are compiled. + """ + onnxruntime_op_path = get_ops_path() + if not osp.exists(onnxruntime_op_path): + return False + return importlib.util.find_spec('onnxruntime') is not None + + +if is_available(): + from .wrapper import ORTWrapper + __all__ = ['ORTWrapper'] diff --git a/mmdeploy/apis/onnxruntime/init_plugins.py b/mmdeploy/backend/onnxruntime/init_plugins.py similarity index 93% rename from mmdeploy/apis/onnxruntime/init_plugins.py rename to mmdeploy/backend/onnxruntime/init_plugins.py index 54c1b72297..7f51e30b21 100644 --- a/mmdeploy/apis/onnxruntime/init_plugins.py +++ b/mmdeploy/backend/onnxruntime/init_plugins.py @@ -2,7 +2,7 @@ import os -def get_ops_path(): +def get_ops_path() -> str: """Get the library path of onnxruntime custom ops. Returns: diff --git a/mmdeploy/apis/onnxruntime/onnxruntime_utils.py b/mmdeploy/backend/onnxruntime/wrapper.py similarity index 59% rename from mmdeploy/apis/onnxruntime/onnxruntime_utils.py rename to mmdeploy/backend/onnxruntime/wrapper.py index 5b45287a79..84174bb569 100644 --- a/mmdeploy/apis/onnxruntime/onnxruntime_utils.py +++ b/mmdeploy/backend/onnxruntime/wrapper.py @@ -1,39 +1,43 @@ import logging import os.path as osp -from typing import Dict, Sequence +from typing import Dict, Optional, Sequence import numpy as np import onnxruntime as ort import torch +from mmdeploy.utils import Backend, parse_device_id from mmdeploy.utils.timer import TimeCounter +from ..base import BACKEND_WRAPPER, BaseWrapper from .init_plugins import get_ops_path -class ORTWrapper(torch.nn.Module): +@BACKEND_WRAPPER.register_module(Backend.ONNXRUNTIME.value) +class ORTWrapper(BaseWrapper): """ONNXRuntime wrapper for inference. - Args: - onnx_file (str): Input onnx model file. - device_id (int): The device id to input model. - output_names (list[str] | tuple[str]): Names to model outputs. - - Examples: - >>> from mmdeploy.apis.onnxruntime import ORTWrapper - >>> import torch - >>> - >>> onnx_file = 'model.onnx' - >>> model = ORTWrapper(onnx_file, -1) - >>> inputs = dict(input=torch.randn(1, 3, 224, 224, device='cpu')) - >>> outputs = model(inputs) - >>> print(outputs) + Args: + onnx_file (str): Input onnx model file. + device (str): The device to input model. + output_names (Sequence[str] | None): Names of model outputs in order. + Defaults to `None` and the wrapper will load the output names from + model. + + Examples: + >>> from mmdeploy.backend.onnxruntime import ORTWrapper + >>> import torch + >>> + >>> onnx_file = 'model.onnx' + >>> model = ORTWrapper(onnx_file, -1) + >>> inputs = dict(input=torch.randn(1, 3, 224, 224, device='cpu')) + >>> outputs = model(inputs) + >>> print(outputs) """ def __init__(self, onnx_file: str, - device_id: int, - output_names: Sequence[str] = None): - super(ORTWrapper, self).__init__() + device: str, + output_names: Optional[Sequence[str]] = None): # get the custom op path ort_custom_op_path = get_ops_path() session_options = ort.SessionOptions() @@ -41,13 +45,15 @@ def __init__(self, if osp.exists(ort_custom_op_path): session_options.register_custom_ops_library(ort_custom_op_path) logging.info(f'Successfully loaded onnxruntime custom ops from \ - {ort_custom_op_path}') + {ort_custom_op_path}') else: logging.warning(f'The library of onnxruntime custom ops does \ - not exist: {ort_custom_op_path}') + not exist: {ort_custom_op_path}') sess = ort.InferenceSession(onnx_file, session_options) + device_id = parse_device_id(device) + providers = ['CPUExecutionProvider'] options = [{}] is_cuda_available = ort.get_device() == 'GPU' @@ -59,18 +65,21 @@ def __init__(self, output_names = [_.name for _ in sess.get_outputs()] self.sess = sess self.io_binding = sess.io_binding() - self.output_names = output_names self.device_id = device_id self.is_cuda_available = is_cuda_available self.device_type = 'cuda' if is_cuda_available else 'cpu' - def forward(self, inputs: Dict[str, torch.Tensor]): + super().__init__(output_names) + + def forward(self, inputs: Dict[str, + torch.Tensor]) -> Dict[str, torch.Tensor]: """Run forward inference. Args: inputs (Dict[str, torch.Tensor]): The input name and tensor pairs. + Returns: - list[np.ndarray]: A list of output numpy array. + Dict[str, torch.Tensor]: The output name and tensor pairs. """ for name, input_tensor in inputs.items(): # set io binding for inputs/outputs @@ -84,16 +93,19 @@ def forward(self, inputs: Dict[str, torch.Tensor]): shape=input_tensor.shape, buffer_ptr=input_tensor.data_ptr()) - for name in self.output_names: + for name in self._output_names: self.io_binding.bind_output(name) # run session to get outputs - self.ort_execute(self.io_binding) - outputs = self.io_binding.copy_outputs_to_cpu() + self.__ort_execute(self.io_binding) + output_list = self.io_binding.copy_outputs_to_cpu() + outputs = {} + for output_name, numpy_tensor in zip(self._output_names, output_list): + outputs[output_name] = torch.from_numpy(numpy_tensor) return outputs @TimeCounter.count_time() - def ort_execute(self, io_binding: ort.IOBinding): + def __ort_execute(self, io_binding: ort.IOBinding): """Run inference with ONNXRuntime session. Args: diff --git a/mmdeploy/backend/openvino/__init__.py b/mmdeploy/backend/openvino/__init__.py new file mode 100644 index 0000000000..2c9f2c2903 --- /dev/null +++ b/mmdeploy/backend/openvino/__init__.py @@ -0,0 +1,16 @@ +import importlib + + +def is_available() -> bool: + """Checking if OpenVINO is installed. + + Returns: + bool: True if OpenVINO is installed. + """ + return importlib.util.find_spec('openvino') is not None + + +if is_available(): + from .wrapper import OpenVINOWrapper + from .onnx2openvino import get_output_model_file + __all__ = ['OpenVINOWrapper', 'get_output_model_file'] diff --git a/mmdeploy/apis/openvino/onnx2openvino.py b/mmdeploy/backend/openvino/onnx2openvino.py similarity index 100% rename from mmdeploy/apis/openvino/onnx2openvino.py rename to mmdeploy/backend/openvino/onnx2openvino.py diff --git a/mmdeploy/apis/openvino/openvino_utils.py b/mmdeploy/backend/openvino/wrapper.py similarity index 75% rename from mmdeploy/apis/openvino/openvino_utils.py rename to mmdeploy/backend/openvino/wrapper.py index 310ef25156..223c2ec9d9 100644 --- a/mmdeploy/apis/openvino/openvino_utils.py +++ b/mmdeploy/backend/openvino/wrapper.py @@ -1,40 +1,26 @@ import os.path as osp -from typing import Dict, List +from typing import Dict, Optional, Sequence -import mmcv import numpy as np import torch +from mmdeploy.utils import Backend from mmdeploy.utils.timer import TimeCounter +from ..base import BACKEND_WRAPPER, BaseWrapper -def get_input_shape_from_cfg(config: mmcv.Config) -> List[int]: - """Get the input shape from the model config for OpenVINO Model Optimizer. - - Args: - config (mmcv.Config): Model config. - Returns: - List[int]: The input shape in [1, 3, H, W] format from config - or [1, 3, 800, 1344]. - """ - shape = [] - test_pipeline = config.get('test_pipeline', None) - if test_pipeline is not None: - img_scale = test_pipeline[1]['img_scale'] - shape = [1, 3, img_scale[1], img_scale[0]] - else: - shape = [1, 3, 800, 1344] - return shape - - -class OpenVINOWrapper(torch.nn.Module): +@BACKEND_WRAPPER.register_module(Backend.OPENVINO.value) +class OpenVINOWrapper(BaseWrapper): """OpenVINO wrapper for inference in CPU. Args: ir_model_file (str): Input OpenVINO IR model file. + output_names (Sequence[str] | None): Names of model outputs in order. + Defaults to `None` and the wrapper will load the output names from + model. Examples: - >>> from mmdeploy.apis.openvino import OpenVINOWrapper + >>> from mmdeploy.backend.openvino import OpenVINOWrapper >>> import torch >>> >>> ir_model_file = 'model.xml' @@ -44,8 +30,11 @@ class OpenVINOWrapper(torch.nn.Module): >>> print(outputs) """ - def __init__(self, ir_model_file: str): - super(OpenVINOWrapper, self).__init__() + def __init__(self, + ir_model_file: str, + output_names: Optional[Sequence[str]] = None, + **kwargs): + from openvino.inference_engine import IECore self.ie = IECore() bin_path = osp.splitext(ir_model_file)[0] + '.bin' @@ -57,6 +46,12 @@ def __init__(self, ir_model_file: str): self.sess = self.ie.load_network( network=self.net, device_name=self.device.upper(), num_requests=1) + # TODO: Check if output_names can be read + if output_names is None: + output_names = [name for name in self.net.outputs] + + super().__init__(output_names) + def __update_device( self, inputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: """Updates the device type to 'self.device' (cpu) for the input @@ -107,11 +102,13 @@ def forward(self, inputs: Dict[str, """ inputs = self.__update_device(inputs) self.__reshape(inputs) - outputs = self.openvino_execute(inputs) + outputs = self.__openvino_execute(inputs) + for output_name, numpy_tensor in outputs.items(): + outputs[output_name] = torch.from_numpy(numpy_tensor) return outputs @TimeCounter.count_time() - def openvino_execute( + def __openvino_execute( self, inputs: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]: """Run inference with OpenVINO IE. @@ -119,7 +116,7 @@ def openvino_execute( inputs (Dict[str, torch.Tensor]): The input name and tensor pairs. Returns: - Dict[str, torch.Tensor]: The output name and tensor pairs. + Dict[str, numpy.ndarray]: The output name and tensor pairs. """ outputs = self.sess.infer(inputs) return outputs diff --git a/mmdeploy/backend/ppl/__init__.py b/mmdeploy/backend/ppl/__init__.py new file mode 100644 index 0000000000..ddc0dab59b --- /dev/null +++ b/mmdeploy/backend/ppl/__init__.py @@ -0,0 +1,16 @@ +import importlib + + +def is_available(): + """Check whether ppl is installed. + + Returns: + bool: True if ppl package is installed. + """ + return importlib.util.find_spec('pyppl') is not None + + +if is_available(): + from .onnx2ppl import onnx2ppl + from .wrapper import PPLWrapper, register_engines + __all__ = ['onnx2ppl', 'register_engines', 'PPLWrapper'] diff --git a/mmdeploy/apis/ppl/onnx2ppl.py b/mmdeploy/backend/ppl/onnx2ppl.py similarity index 79% rename from mmdeploy/apis/ppl/onnx2ppl.py rename to mmdeploy/backend/ppl/onnx2ppl.py index 331f62cb22..5409bce995 100644 --- a/mmdeploy/apis/ppl/onnx2ppl.py +++ b/mmdeploy/backend/ppl/onnx2ppl.py @@ -1,25 +1,9 @@ from typing import Optional, Sequence -import torch from pyppl import nn as pplnn -from mmdeploy.apis.ppl import register_engines - - -def parse_cuda_device_id(device: str) -> int: - """Parse cuda device index from a string. - - Args: - device (str): The typical style of string specifying cuda device, - e.g.: 'cuda:0'. - - Returns: - int: The parsed device id, defaults to `0`. - """ - device_id = 0 - if len(device) >= 6: - device_id = torch.device(device).index - return device_id +from mmdeploy.utils.device import parse_cuda_device_id +from .wrapper import register_engines def onnx2ppl(algo_file: str, diff --git a/mmdeploy/apis/ppl/ppl_utils.py b/mmdeploy/backend/ppl/wrapper.py similarity index 76% rename from mmdeploy/apis/ppl/ppl_utils.py rename to mmdeploy/backend/ppl/wrapper.py index 9ad8fcfa8b..2b5317c909 100644 --- a/mmdeploy/apis/ppl/ppl_utils.py +++ b/mmdeploy/backend/ppl/wrapper.py @@ -1,13 +1,16 @@ import logging import sys -from typing import Dict, Sequence +from typing import Dict, List, Optional, Sequence import numpy as np +import onnx +import pyppl.common as pplcommon +import pyppl.nn as pplnn import torch -from pyppl import common as pplcommon -from pyppl import nn as pplnn +from mmdeploy.utils import Backend, parse_device_id from mmdeploy.utils.timer import TimeCounter +from ..base import BACKEND_WRAPPER, BaseWrapper def register_engines(device_id: int, @@ -15,7 +18,7 @@ def register_engines(device_id: int, quick_select: bool = False, input_shapes: Sequence[Sequence[int]] = None, export_algo_file: str = None, - import_algo_file: str = None): + import_algo_file: str = None) -> List[pplnn.Engine]: """Register engines for ppl runtime. Args: @@ -97,7 +100,8 @@ def register_engines(device_id: int, return engines -class PPLWrapper(torch.nn.Module): +@BACKEND_WRAPPER.register_module(Backend.PPL.value) +class PPLWrapper(BaseWrapper): """PPL wrapper for inference. Args: @@ -106,7 +110,7 @@ class PPLWrapper(torch.nn.Module): device_id (int): Device id to put model. Examples: - >>> from mmdeploy.apis.ppl import PPLWrapper + >>> from mmdeploy.backend.ppl import PPLWrapper >>> import torch >>> >>> onnx_file = 'model.onnx' @@ -116,8 +120,19 @@ class PPLWrapper(torch.nn.Module): >>> print(outputs) """ - def __init__(self, onnx_file: str, algo_file: str, device_id: int): - super(PPLWrapper, self).__init__() + def __init__(self, + onnx_file: str, + algo_file: str, + device: str, + output_names: Optional[Sequence[str]] = None, + **kwargs): + + # enable quick select by default to speed up pipeline + # TODO: open it to users after ppl supports saving serialized models + + # TODO: assert device is gpu + device_id = parse_device_id(device) + # enable quick select by default to speed up pipeline # TODO: disable_avx512 will be removed or open to users in config engines = register_engines( @@ -139,36 +154,39 @@ def __init__(self, onnx_file: str, algo_file: str, device_id: int): for i in range(runtime.GetInputCount()) } - def forward(self, inputs: Dict[str, torch.Tensor]): + if output_names is None: + model = onnx.load(onnx_file) + output_names = [node.name for node in model.graph.output] + + super().__init__(output_names) + + def forward(self, inputs: Dict[str, + torch.Tensor]) -> Dict[str, torch.Tensor]: """Run forward inference. Args: inputs (Dict[str, torch.Tensor]): Input name and tensor pairs. Return: - list[np.ndarray]: A list of output numpy array. + Dict[str, torch.Tensor]: The output name and tensor pairs. """ for name, input_tensor in inputs.items(): input_tensor = input_tensor.contiguous() self.inputs[name].ConvertFromHost(input_tensor.cpu().numpy()) - self.ppl_execute() - outputs = [] + self.__ppl_execute() + outputs = {} for i in range(self.runtime.GetOutputCount()): out_tensor = self.runtime.GetOutputTensor(i).ConvertToHost() - if out_tensor: - outputs.append(np.array(out_tensor, copy=False)) - else: - out_shape = self.runtime.GetOutputTensor( - i).GetShape().GetDims() - outputs.append(np.random.rand(*out_shape)) + name = self.output_names[i] + outputs[name] = torch.from_numpy(np.array(out_tensor, copy=False)) return outputs @TimeCounter.count_time() - def ppl_execute(self): + def __ppl_execute(self): """Run inference with PPL.""" status = self.runtime.Run() - assert status == pplcommon.RC_SUCCESS, 'Run() '\ - 'failed: ' + pplcommon.GetRetCodeStr(status) + assert status == pplcommon.RC_SUCCESS, 'Run() failed: ' + \ + pplcommon.GetRetCodeStr(status) status = self.runtime.Sync() - assert status == pplcommon.RC_SUCCESS, 'Sync() '\ - 'failed: ' + pplcommon.GetRetCodeStr(status) + assert status == pplcommon.RC_SUCCESS, 'Sync() failed: ' + \ + pplcommon.GetRetCodeStr(status) diff --git a/mmdeploy/backend/tensorrt/__init__.py b/mmdeploy/backend/tensorrt/__init__.py new file mode 100644 index 0000000000..e03048d729 --- /dev/null +++ b/mmdeploy/backend/tensorrt/__init__.py @@ -0,0 +1,30 @@ +# flake8: noqa +import importlib +import os.path as osp + +from .init_plugins import get_ops_path, load_tensorrt_plugin + + +def is_available(): + """Check whether TensorRT and plugins are installed. + + Returns: + bool: True if TensorRT and plugins are installed. + """ + tensorrt_op_path = get_ops_path() + if not osp.exists(tensorrt_op_path): + return False + + return importlib.util.find_spec('tensorrt') is not None + + +if is_available(): + from .utils import create_trt_engine, load_trt_engine, save_trt_engine + from .wrapper import TRTWrapper + + # load tensorrt plugin lib + load_tensorrt_plugin() + + __all__ = [ + 'create_trt_engine', 'save_trt_engine', 'load_trt_engine', 'TRTWrapper' + ] diff --git a/mmdeploy/apis/tensorrt/calib_utils.py b/mmdeploy/backend/tensorrt/calib_utils.py similarity index 96% rename from mmdeploy/apis/tensorrt/calib_utils.py rename to mmdeploy/backend/tensorrt/calib_utils.py index e2fad16028..3122730b93 100644 --- a/mmdeploy/apis/tensorrt/calib_utils.py +++ b/mmdeploy/backend/tensorrt/calib_utils.py @@ -59,7 +59,7 @@ def __del__(self): if hasattr(self, 'calib_file'): self.calib_file.close() - def get_batch(self, names: Sequence[str], **kwargs): + def get_batch(self, names: Sequence[str], **kwargs) -> list: """Get batch data.""" if self.count < self.dataset_length: @@ -95,7 +95,7 @@ def get_batch(self, names: Sequence[str], **kwargs): else: return None - def get_algorithm(self): + def get_algorithm(self) -> trt.CalibrationAlgoType: """Get Calibration algo type. Returns: @@ -103,7 +103,7 @@ def get_algorithm(self): """ return self.algorithm - def get_batch_size(self): + def get_batch_size(self) -> int: """Get batch size. Returns: diff --git a/mmdeploy/apis/tensorrt/init_plugins.py b/mmdeploy/backend/tensorrt/init_plugins.py similarity index 93% rename from mmdeploy/apis/tensorrt/init_plugins.py rename to mmdeploy/backend/tensorrt/init_plugins.py index fd06597d63..97b8efa599 100644 --- a/mmdeploy/apis/tensorrt/init_plugins.py +++ b/mmdeploy/backend/tensorrt/init_plugins.py @@ -4,7 +4,7 @@ import os -def get_ops_path(): +def get_ops_path() -> str: """Get path of the TensorRT plugin library. Returns: @@ -20,7 +20,7 @@ def get_ops_path(): return lib_path -def load_tensorrt_plugin(): +def load_tensorrt_plugin() -> bool: """Load TensorRT plugins library. Returns: diff --git a/mmdeploy/apis/tensorrt/onnx2tensorrt.py b/mmdeploy/backend/tensorrt/onnx2tensorrt.py similarity index 81% rename from mmdeploy/apis/tensorrt/onnx2tensorrt.py rename to mmdeploy/backend/tensorrt/onnx2tensorrt.py index 123150ffc2..b96e96cea9 100644 --- a/mmdeploy/apis/tensorrt/onnx2tensorrt.py +++ b/mmdeploy/backend/tensorrt/onnx2tensorrt.py @@ -6,24 +6,8 @@ import tensorrt as trt from mmdeploy.utils import (get_calib_filename, get_common_config, - get_model_inputs, load_config) -from .tensorrt_utils import create_trt_engine, save_trt_engine - - -def parse_device_id(device: str) -> int: - """Parse cuda device index from a string. - - Args: - device (str): The typical style of string specifying cuda device, - e.g.: 'cuda:0'. - - Returns: - int: The parsed device id, defaults to `0`. - """ - device_id = 0 - if len(device) >= 6: - device_id = int(device[5:]) - return device_id + get_model_inputs, load_config, parse_device_id) +from .utils import create_trt_engine, save_trt_engine def onnx2tensorrt(work_dir: str, diff --git a/mmdeploy/apis/tensorrt/tensorrt_utils.py b/mmdeploy/backend/tensorrt/utils.py similarity index 57% rename from mmdeploy/apis/tensorrt/tensorrt_utils.py rename to mmdeploy/backend/tensorrt/utils.py index dd4fa53970..4cd546460d 100644 --- a/mmdeploy/apis/tensorrt/tensorrt_utils.py +++ b/mmdeploy/backend/tensorrt/utils.py @@ -1,11 +1,10 @@ -from typing import Any, Dict, Sequence, Union +from typing import Dict, Sequence, Union import onnx import tensorrt as trt import torch from packaging import version -from mmdeploy.utils.timer import TimeCounter from .calib_utils import HDF5Calibrator @@ -17,7 +16,7 @@ def create_trt_engine(onnx_model: Union[str, onnx.ModelProto], int8_param: dict = None, max_workspace_size: int = 0, device_id: int = 0, - **kwargs): + **kwargs) -> trt.ICudaEngine: """Create a tensorrt engine from ONNX. Args: @@ -87,7 +86,8 @@ def create_trt_engine(onnx_model: Union[str, onnx.ModelProto], config.add_optimization_profile(profile) if fp16_mode: - builder.fp16_mode = fp16_mode + if version.parse(trt.__version__) < version.parse('8'): + builder.fp16_mode = fp16_mode config.set_flag(trt.BuilderFlag.FP16) if int8_mode: @@ -100,9 +100,9 @@ def create_trt_engine(onnx_model: Union[str, onnx.ModelProto], device_id=device_id, algorithm=int8_param.get( 'algorithm', trt.CalibrationAlgoType.ENTROPY_CALIBRATION_2)) - - builder.int8_mode = int8_mode - builder.int8_calibrator = config.int8_calibrator + if version.parse(trt.__version__) < version.parse('8'): + builder.int8_mode = int8_mode + builder.int8_calibrator = config.int8_calibrator # create engine with torch.cuda.device(device): @@ -112,7 +112,7 @@ def create_trt_engine(onnx_model: Union[str, onnx.ModelProto], return engine -def save_trt_engine(engine: trt.ICudaEngine, path: str): +def save_trt_engine(engine: trt.ICudaEngine, path: str) -> None: """Serialize TensorRT engine to disk. Args: @@ -123,7 +123,7 @@ def save_trt_engine(engine: trt.ICudaEngine, path: str): f.write(bytearray(engine.serialize())) -def load_trt_engine(path: str): +def load_trt_engine(path: str) -> trt.ICudaEngine: """Deserialize TensorRT engine from disk. Args: @@ -139,7 +139,7 @@ def load_trt_engine(path: str): return engine -def torch_dtype_from_trt(dtype: trt.DataType): +def torch_dtype_from_trt(dtype: trt.DataType) -> torch.dtype: """Convert pytorch dtype to TensorRT dtype. Args: @@ -168,7 +168,6 @@ def torch_device_from_trt(device: trt.TensorLocation): Args: device (trt.TensorLocation): The device in tensorrt. - Returns: torch.device: The corresponding device in torch. """ @@ -178,108 +177,3 @@ def torch_device_from_trt(device: trt.TensorLocation): return torch.device('cpu') else: return TypeError(f'{device} is not supported by torch') - - -class TRTWrapper(torch.nn.Module): - """TensorRT engine wrapper for inference. - - Args: - engine (tensorrt.ICudaEngine): TensorRT engine to wrap. - - Note: - If the engine is converted from onnx model. The input_names and - output_names should be the same as onnx model. - - Examples: - >>> from mmdeploy.apis.tensorrt import TRTWrapper - >>> engine_file = 'resnet.engine' - >>> model = TRTWrapper(engine_file) - >>> inputs = dict(input=torch.randn(1, 3, 224, 224)) - >>> outputs = model(inputs) - >>> print(outputs) - """ - - def __init__(self, engine: Union[str, trt.ICudaEngine]): - super(TRTWrapper, self).__init__() - self.engine = engine - if isinstance(self.engine, str): - self.engine = load_trt_engine(engine) - - if not isinstance(self.engine, trt.ICudaEngine): - raise TypeError(f'`engine` should be str or trt.ICudaEngine, \ - but given: {type(self.engine)}') - - self._register_state_dict_hook(TRTWrapper._on_state_dict) - self.context = self.engine.create_execution_context() - - self._load_io_names() - - def _load_io_names(self): - """Load input/output names from engine.""" - names = [_ for _ in self.engine] - input_names = list(filter(self.engine.binding_is_input, names)) - output_names = list(set(names) - set(input_names)) - self.input_names = input_names - self.output_names = output_names - - def _on_state_dict(self, state_dict: Dict[str, Any], prefix: str): - """State dict hook - Args: - state_dict (Dict[str, Any]): A dict to save state information - such as the serialized engine, input/output names. - prefix (str): A string to be prefixed at the key of the - state dict. - """ - state_dict[prefix + 'engine'] = bytearray(self.engine.serialize()) - state_dict[prefix + 'input_names'] = self.input_names - state_dict[prefix + 'output_names'] = self.output_names - - def forward(self, inputs: Dict[str, torch.Tensor]): - """Run forward inference. - - Args: - inputs (Dict[str, torch.Tensor]): The input name and tensor pairs. - - Return: - Dict[str, torch.Tensor]: The output name and tensor pairs. - """ - assert self.input_names is not None - assert self.output_names is not None - bindings = [None] * (len(self.input_names) + len(self.output_names)) - - for input_name, input_tensor in inputs.items(): - idx = self.engine.get_binding_index(input_name) - - # All input tensors must be gpu variables - assert 'cuda' in input_tensor.device.type - - if input_tensor.dtype == torch.long: - input_tensor = input_tensor.int() - self.context.set_binding_shape(idx, tuple(input_tensor.shape)) - bindings[idx] = input_tensor.contiguous().data_ptr() - - # create output tensors - outputs = {} - for i, output_name in enumerate(self.output_names): - idx = self.engine.get_binding_index(output_name) - dtype = torch_dtype_from_trt(self.engine.get_binding_dtype(idx)) - shape = tuple(self.context.get_binding_shape(idx)) - - device = torch_device_from_trt(self.engine.get_location(idx)) - output = torch.empty(size=shape, dtype=dtype, device=device) - outputs[output_name] = output - bindings[idx] = output.data_ptr() - - self.trt_execute(bindings=bindings) - - return outputs - - @TimeCounter.count_time() - def trt_execute(self, bindings: Sequence[int]): - """Run inference with TensorRT. - - Args: - bindings (list[int]): A list of integer binding the input/output. - """ - self.context.execute_async_v2(bindings, - torch.cuda.current_stream().cuda_stream) diff --git a/mmdeploy/backend/tensorrt/wrapper.py b/mmdeploy/backend/tensorrt/wrapper.py new file mode 100644 index 0000000000..324ffa8f29 --- /dev/null +++ b/mmdeploy/backend/tensorrt/wrapper.py @@ -0,0 +1,123 @@ +from typing import Any, Dict, Optional, Sequence, Union + +import tensorrt as trt +import torch + +from mmdeploy.utils import Backend +from mmdeploy.utils.timer import TimeCounter +from ..base import BACKEND_WRAPPER, BaseWrapper +from .utils import load_trt_engine, torch_device_from_trt, torch_dtype_from_trt + + +@BACKEND_WRAPPER.register_module(Backend.TENSORRT.value) +class TRTWrapper(BaseWrapper): + """TensorRT engine wrapper for inference. + + Args: + engine (tensorrt.ICudaEngine): TensorRT engine to wrap. + output_names (Sequence[str] | None): Names of model outputs in order. + Defaults to `None` and the wrapper will load the output names from + model. + + Note: + If the engine is converted from onnx model. The input_names and + output_names should be the same as onnx model. + + Examples: + >>> from mmdeploy.backend.tensorrt import TRTWrapper + >>> engine_file = 'resnet.engine' + >>> model = TRTWrapper(engine_file) + >>> inputs = dict(input=torch.randn(1, 3, 224, 224)) + >>> outputs = model(inputs) + >>> print(outputs) + """ + + def __init__(self, + engine: Union[str, trt.ICudaEngine], + output_names: Optional[Sequence[str]] = None): + super().__init__(output_names) + self.engine = engine + if isinstance(self.engine, str): + self.engine = load_trt_engine(engine) + + if not isinstance(self.engine, trt.ICudaEngine): + raise TypeError(f'`engine` should be str or trt.ICudaEngine, \ + but given: {type(self.engine)}') + + self._register_state_dict_hook(TRTWrapper.__on_state_dict) + self.context = self.engine.create_execution_context() + + self.__load_io_names() + + def __load_io_names(self): + """Load input/output names from engine.""" + names = [_ for _ in self.engine] + input_names = list(filter(self.engine.binding_is_input, names)) + self._input_names = input_names + + if self._output_names is None: + output_names = list(set(names) - set(input_names)) + self._output_names = output_names + + def __on_state_dict(self, state_dict: Dict[str, Any], prefix: str): + """State dict hook + Args: + state_dict (Dict[str, Any]): A dict to save state information + such as the serialized engine, input/output names. + prefix (str): A string to be prefixed at the key of the + state dict. + """ + state_dict[prefix + 'engine'] = bytearray(self.engine.serialize()) + state_dict[prefix + 'input_names'] = self._input_names + state_dict[prefix + 'output_names'] = self._output_names + + def forward(self, inputs: Dict[str, + torch.Tensor]) -> Dict[str, torch.Tensor]: + """Run forward inference. + + Args: + inputs (Dict[str, torch.Tensor]): The input name and tensor pairs. + + Return: + Dict[str, torch.Tensor]: The output name and tensor pairs. + """ + assert self._input_names is not None + assert self._output_names is not None + bindings = [None] * (len(self._input_names) + len(self._output_names)) + + for input_name, input_tensor in inputs.items(): + idx = self.engine.get_binding_index(input_name) + + # All input tensors must be gpu variables + assert 'cuda' in input_tensor.device.type + + if input_tensor.dtype == torch.long: + input_tensor = input_tensor.int() + self.context.set_binding_shape(idx, tuple(input_tensor.shape)) + bindings[idx] = input_tensor.contiguous().data_ptr() + + # create output tensors + outputs = {} + for output_name in self._output_names: + idx = self.engine.get_binding_index(output_name) + dtype = torch_dtype_from_trt(self.engine.get_binding_dtype(idx)) + shape = tuple(self.context.get_binding_shape(idx)) + + device = torch_device_from_trt(self.engine.get_location(idx)) + output = torch.empty(size=shape, dtype=dtype, device=device) + outputs[output_name] = output + bindings[idx] = output.data_ptr() + + self.__trt_execute(bindings=bindings) + + return outputs + + @TimeCounter.count_time() + def __trt_execute(self, bindings: Sequence[int]): + """Run inference with TensorRT. + + Args: + bindings (list[int]): A list of integer binding the input/output. + """ + self.context.execute_async_v2(bindings, + torch.cuda.current_stream().cuda_stream) diff --git a/mmdeploy/codebase/__init__.py b/mmdeploy/codebase/__init__.py new file mode 100644 index 0000000000..f5710c8c70 --- /dev/null +++ b/mmdeploy/codebase/__init__.py @@ -0,0 +1,31 @@ +import importlib +import logging + +from .base import BaseTask, MMCodebase, get_codebase_class + +if importlib.util.find_spec('mmcls'): + importlib.import_module('mmdeploy.codebase.mmcls') +else: + logging.debug('mmcls is not installed.') + +if importlib.util.find_spec('mmdet'): + importlib.import_module('mmdeploy.codebase.mmdet') +else: + logging.debug('mmdet is not installed.') + +if importlib.util.find_spec('mmseg'): + importlib.import_module('mmdeploy.codebase.mmseg') +else: + logging.debug('mmseg is not installed.') + +if importlib.util.find_spec('mmocr'): + importlib.import_module('mmdeploy.codebase.mmocr') +else: + logging.debug('mmocr is not installed.') + +if importlib.util.find_spec('mmedit'): + importlib.import_module('mmdeploy.codebase.mmedit') +else: + logging.debug('mmedit is not installed.') + +__all__ = ['MMCodebase', 'BaseTask', 'get_codebase_class'] diff --git a/mmdeploy/codebase/base/__init__.py b/mmdeploy/codebase/base/__init__.py new file mode 100644 index 0000000000..f5bda10b70 --- /dev/null +++ b/mmdeploy/codebase/base/__init__.py @@ -0,0 +1,8 @@ +from .backend_model import BaseBackendModel +from .mmcodebase import CODEBASE, MMCodebase, get_codebase_class +from .task import BaseTask + +__all__ = [ + 'BaseBackendModel', 'BaseTask', 'MMCodebase', 'get_codebase_class', + 'CODEBASE' +] diff --git a/mmdeploy/codebase/base/backend_model.py b/mmdeploy/codebase/base/backend_model.py new file mode 100644 index 0000000000..fd93239480 --- /dev/null +++ b/mmdeploy/codebase/base/backend_model.py @@ -0,0 +1,78 @@ +from abc import ABCMeta, abstractmethod +from typing import Optional, Sequence + +import torch + +from mmdeploy.utils.constants import Backend + + +class BaseBackendModel(torch.nn.Module, metaclass=ABCMeta): + """A backend model wraps the details to initialize and run a backend + engine.""" + + def __init__(self): + super().__init__() + + @staticmethod + def _build_wrapper(backend: Backend, + backend_files: Sequence[str], + device: str, + output_names: Optional[Sequence[str]] = None): + """The default methods to build backend wrappers. + + Args: + backend (Backend): The backend enum type. + beckend_files (Sequence[str]): Paths to all required backend files( + e.g. '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn). + device (str): A string specifying device type. + output_names (Sequence[str] | None): Names of model outputs in + order. Defaults to `None` and the wrapper will load the output + names from the model. + """ + if backend == Backend.ONNXRUNTIME: + from mmdeploy.backend.onnxruntime import ORTWrapper + return ORTWrapper( + onnx_file=backend_files[0], + device=device, + output_names=output_names) + elif backend == Backend.TENSORRT: + from mmdeploy.backend.tensorrt import TRTWrapper + return TRTWrapper( + engine=backend_files[0], output_names=output_names) + elif backend == Backend.PPL: + from mmdeploy.backend.ppl import PPLWrapper + return PPLWrapper( + onnx_file=backend_files[0], + algo_file=backend_files[1], + device=device, + output_names=output_names) + elif backend == Backend.NCNN: + from mmdeploy.backend.ncnn import NCNNWrapper + return NCNNWrapper( + param_file=backend_files[0], + bin_file=backend_files[1], + output_names=output_names) + elif backend == Backend.OPENVINO: + from mmdeploy.backend.openvino import OpenVINOWrapper + return OpenVINOWrapper( + ir_model_file=backend_files[0], output_names=output_names) + else: + raise NotImplementedError(f'Unknown backend type: {backend.value}') + + @abstractmethod + def forward(self, *args, **kwargs): + """The forward interface that must be implemented. + + The arguments should align to forward() of the corresponding model of + OpenMMLab codebases + """ + pass + + @abstractmethod + def show_result(self, *args, **kwargs): + """The visualize interface that must be implemented. + + The arguments should align to show_result() of the corresponding model + of OpenMMLab codebases + """ + pass diff --git a/mmdeploy/codebase/base/mmcodebase.py b/mmdeploy/codebase/base/mmcodebase.py new file mode 100644 index 0000000000..552a12b6af --- /dev/null +++ b/mmdeploy/codebase/base/mmcodebase.py @@ -0,0 +1,122 @@ +from abc import ABCMeta, abstractmethod +from typing import Optional, Union + +import mmcv +import torch +from mmcv.utils.registry import Registry +from torch.utils.data import DataLoader, Dataset + +from mmdeploy.utils import Codebase, Task + + +class MMCodebase(metaclass=ABCMeta): + """Wrap the apis of OpenMMLab Codebase.""" + + task_registry: Registry = None + + def __init__() -> None: + pass + + @classmethod + def get_task_class(cls, task: Task) -> type: + """Get the task processors class according to the task type. + + Args: + task (Task): The task enumeration. + + Returns: + type: The task processor class. + """ + return cls.task_registry.module_dict[task.value] + + @staticmethod + @abstractmethod + def build_task_processor(model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str): + """The interface to build the task processors of the codebase. + + Args: + model_cfg (str | mmcv.Config): Model config file. + deploy_cfg (str | mmcv.Config): Deployment config file. + device (str): A string specifying device type. + + Returns: + BaseTask: A task processor. + """ + pass + + @staticmethod + @abstractmethod + def build_dataset(dataset_cfg: Union[str, mmcv.Config], + dataset_type: str = 'val', + **kwargs) -> Dataset: + """Build dataset for different codebase. + + Args: + dataset_cfg (str | mmcv.Config): Dataset config file or Config + object. + dataset_type (str): Specifying dataset type, e.g.: 'train', 'test', + 'val', defaults to 'val'. + + Returns: + Dataset: The built dataset. + """ + pass + + @staticmethod + @abstractmethod + def build_dataloader(dataset: Dataset, samples_per_gpu: int, + workers_per_gpu: int, **kwargs) -> DataLoader: + """Build PyTorch dataloader. + + Args: + dataset (Dataset): A PyTorch dataset. + samples_per_gpu (int): Number of training samples on each GPU, + i.e., batch size of each GPU. + workers_per_gpu (int): How many subprocesses to use for data + loading for each GPU. + + Returns: + DataLoader: A PyTorch dataloader. + """ + pass + + @staticmethod + @abstractmethod + def single_gpu_test(model: torch.nn.Module, + data_loader: DataLoader, + show: bool = False, + out_dir: Optional[str] = None, + **kwargs): + """Run test with single gpu. + + Args: + model (torch.nn.Module): Input model from nn.Module. + data_loader (DataLoader): PyTorch data loader. + show (bool): Specifying whether to show plotted results. Defaults + to `False`. + out_dir (str): A directory to save results, defaults to `None`. + + Returns: + list: The prediction results. + """ + pass + + +def __build_codebase_class(codebase: Codebase, registry: Registry): + return registry.module_dict[codebase.value] + + +CODEBASE = Registry('Codebases', build_func=__build_codebase_class) + + +def get_codebase_class(codebase: Codebase) -> type: + """Get the codebase class from the registry. + + Args: + codebase (Codebase): The codebase enum type. + + Returns: + type: The codebase class + """ + return CODEBASE.build(codebase) diff --git a/mmdeploy/codebase/base/task.py b/mmdeploy/codebase/base/task.py new file mode 100644 index 0000000000..373dfca653 --- /dev/null +++ b/mmdeploy/codebase/base/task.py @@ -0,0 +1,238 @@ +from abc import ABCMeta, abstractmethod +from typing import Any, Dict, Optional, Sequence, Tuple, Union + +import mmcv +import numpy as np +import torch +from torch.utils.data import DataLoader, Dataset + +from mmdeploy.utils import get_codebase + + +class BaseTask(metaclass=ABCMeta): + """Wrap the processing functions of a Computer Vision task. + + Args: + model_cfg (str | mmcv.Config): Model config file. + deploy_cfg (str | mmcv.Config): Deployment config file. + device (str): A string specifying device type. + """ + + def __init__(self, model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str): + + self.model_cfg = model_cfg + self.deploy_cfg = deploy_cfg + self.device = device + + codebase = get_codebase(deploy_cfg) + + from .mmcodebase import get_codebase_class + self.codebase_class = get_codebase_class(codebase) + + @abstractmethod + def init_backend_model(self, + model_files: Sequence[str] = None, + **kwargs) -> torch.nn.Module: + """Initialize backend model. + + Args: + model_files (Sequence[str]): Input model files. + + Returns: + nn.Module: An initialized backend model. + """ + pass + + @abstractmethod + def init_pytorch_model(self, + model_checkpoint: Optional[str] = None, + cfg_options: Optional[Dict] = None, + **kwargs) -> torch.nn.Module: + """Initialize torch model. + + Args: + model_checkpoint (str): The checkpoint file of torch model, + defaults to `None`. + cfg_options (dict): Optional config key-pair parameters. + + Returns: + nn.Module: An initialized torch model generated by other OpenMMLab + codebases. + """ + pass + + def build_dataset(self, + dataset_cfg: Union[str, mmcv.Config], + dataset_type: str = 'val', + **kwargs) -> Dataset: + """Build dataset for different codebase. + + Args: + dataset_cfg (str | mmcv.Config): Dataset config file or Config + object. + dataset_type (str): Specifying dataset type, e.g.: 'train', 'test', + 'val', defaults to 'val'. + + Returns: + Dataset: The built dataset. + """ + return self.codebase_class.build_dataset(dataset_cfg, dataset_type, + **kwargs) + + def build_dataloader(self, dataset: Dataset, samples_per_gpu: int, + workers_per_gpu: int, **kwargs) -> DataLoader: + """Build PyTorch dataloader. + + Args: + dataset (Dataset): A PyTorch dataset. + samples_per_gpu (int): Number of training samples on each GPU, + i.e., batch size of each GPU. + workers_per_gpu (int): How many subprocesses to use for data + loading for each GPU. + + Returns: + DataLoader: A PyTorch dataloader. + """ + return self.codebase_class.build_dataloader(dataset, samples_per_gpu, + workers_per_gpu, **kwargs) + + def single_gpu_test(self, + model: torch.nn.Module, + data_loader: DataLoader, + show: bool = False, + out_dir: Optional[str] = None, + **kwargs): + """Run test with single gpu. + + Args: + model (torch.nn.Module): Input model from nn.Module. + data_loader (DataLoader): PyTorch data loader. + show (bool): Specifying whether to show plotted results. Defaults + to `False`. + out_dir (str): A directory to save results, defaults to `None`. + + Returns: + list: The prediction results. + """ + return self.codebase_class.single_gpu_test(model, data_loader, show, + out_dir, **kwargs) + + @abstractmethod + def create_input(self, + imgs: Union[str, np.ndarray], + input_shape: Sequence[int] = None, + **kwargs) -> Tuple[Dict, torch.Tensor]: + """Create input for model. + + Args: + imgs (str | np.ndarray): Input image(s), accpeted data types are + `str`, `np.ndarray`. + input_shape (list[int]): Input shape of image in (width, height) + format, defaults to `None`. + + Returns: + tuple: (data, img), meta information for the input image and input + image tensor. + """ + pass + + @abstractmethod + def visualize(self, + model: torch.nn.Module, + image: Union[str, np.ndarray], + result: list, + output_file: str, + window_name: str = '', + show_result: bool = False, + **kwargs): + """Visualize predictions of a model. + + Args: + model (nn.Module): Input model. + image (str | np.ndarray): Input image to draw predictions on. + result (list): A list of predictions. + output_file (str): Output file to save drawn image. + backend (Backend): Specifying backend type. + window_name (str): The name of visualization window. Defaults to + an empty string. + show_result (bool): Whether to show result in windows, defaults + to `False`. + """ + pass + + @staticmethod + @abstractmethod + def run_inference(model, model_inputs: Dict[str, torch.Tensor]): + """Run inference once for a model of a OpenMMLab Codebase. + + Args: + model (nn.Module): Input model. + model_inputs (dict): A dict containing model inputs tensor and + meta info. + + Returns: + list: The predictions of model inference. + """ + pass + + @staticmethod + @abstractmethod + def get_partition_cfg(partition_type: str, **kwargs) -> Dict: + """Get a certain partition config. + + Args: + partition_type (str): A string specifying partition type. + + Returns: + dict: A dictionary of partition config. + """ + pass + + @staticmethod + @abstractmethod + def get_tensor_from_input(self, input_data: Dict[str, Any], + **kwargs) -> torch.Tensor: + """Get input tensor from input data. + + Args: + input_data (dict): Input data containing meta info and image + tensor. + Returns: + torch.Tensor: An image in `Tensor`. + """ + pass + + @staticmethod + @abstractmethod + def evaluate_outputs(model_cfg, + outputs: Sequence, + dataset: Dataset, + metrics: Optional[str] = None, + out: Optional[str] = None, + metric_options: Optional[dict] = None, + format_only: bool = False, + **kwargs): + """Perform post-processing to predictions of model. + + Args: + outputs (list): A list of predictions of model inference. + dataset (Dataset): Input dataset to run test. + model_cfg (mmcv.Config): The model config. + codebase (Codebase): Specifying codebase type. + metrics (str): Evaluation metrics, which depends on + the codebase and the dataset, e.g., "bbox", "segm", "proposal" + for COCO, and "mAP", "recall" for PASCAL VOC in mmdet; + "accuracy", "precision", "recall", "f1_score", "support" + for single label dataset, and "mAP", "CP", "CR", "CF1", + "OP", "OR", "OF1" for multi-label dataset in mmcls. + Defaults is `None`. + out (str): Output result file in pickle format, defaults to `None`. + metric_options (dict): Custom options for evaluation, will be + kwargs for dataset.evaluate() function. Defaults to `None`. + format_only (bool): Format the output results without perform + evaluation. It is useful when you want to format the result + to a specific format and submit it to the test server. Defaults + to `False`. + """ + pass diff --git a/mmdeploy/mmcls/__init__.py b/mmdeploy/codebase/mmcls/__init__.py similarity index 50% rename from mmdeploy/mmcls/__init__.py rename to mmdeploy/codebase/mmcls/__init__.py index d2b62e1cb6..33b69c74df 100644 --- a/mmdeploy/mmcls/__init__.py +++ b/mmdeploy/codebase/mmcls/__init__.py @@ -1,2 +1,2 @@ -from .export import * # noqa: F401,F403 +from .deploy import * # noqa: F401,F403 from .models import * # noqa: F401,F403 diff --git a/mmdeploy/codebase/mmcls/deploy/__init__.py b/mmdeploy/codebase/mmcls/deploy/__init__.py new file mode 100644 index 0000000000..42688e026e --- /dev/null +++ b/mmdeploy/codebase/mmcls/deploy/__init__.py @@ -0,0 +1,4 @@ +from .classification import Classification +from .mmclassification import MMClassification + +__all__ = ['MMClassification', 'Classification'] diff --git a/mmdeploy/codebase/mmcls/deploy/classification.py b/mmdeploy/codebase/mmcls/deploy/classification.py new file mode 100644 index 0000000000..0a3dba3a91 --- /dev/null +++ b/mmdeploy/codebase/mmcls/deploy/classification.py @@ -0,0 +1,234 @@ +import logging +from typing import Any, Dict, Optional, Sequence, Tuple, Union + +import mmcv +import numpy as np +import torch +from torch.utils.data import Dataset + +from mmdeploy.codebase.base import BaseTask +from mmdeploy.utils import Task +from .mmclassification import MMCLS_TASK + + +@MMCLS_TASK.register_module(Task.CLASSIFICATION.value) +class Classification(BaseTask): + """Classification task class. + + Args: + model_cfg (mmcv.Config): Original PyTorch model config file. + deploy_cfg (mmcv.Config): Deployment config file or loaded Config + object. + device (str): A string represents device type. + """ + + def __init__(self, model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str): + super(Classification, self).__init__(model_cfg, deploy_cfg, device) + + def init_backend_model(self, + model_files: Sequence[str] = None, + **kwargs) -> torch.nn.Module: + """Initialize backend model. + + Args: + model_files (Sequence[str]): Input model files. + + Returns: + nn.Module: An initialized backend model. + """ + from .classification_model import build_classification_model + + model = build_classification_model( + model_files, self.model_cfg, self.deploy_cfg, device=self.device) + + return model.eval() + + def init_pytorch_model(self, + model_checkpoint: Optional[str] = None, + cfg_options: Optional[Dict] = None, + **kwargs) -> torch.nn.Module: + """Initialize torch model. + + Args: + model_checkpoint (str): The checkpoint file of torch model, + Default: None. + cfg_options (dict): Optional config key-pair parameters. + + Returns: + nn.Module: An initialized torch model generated by OpenMMLab + codebases. + """ + from mmcls.apis import init_model + model = init_model(self.model_cfg, model_checkpoint, self.device, + cfg_options) + + return model.eval() + + def create_input(self, + imgs: Union[str, np.ndarray], + input_shape: Optional[Sequence[int]] = None) \ + -> Tuple[Dict, torch.Tensor]: + """Create input for classifier. + + Args: + imgs (Any): Input image(s), accepted data type are `str`, + `np.ndarray`, `torch.Tensor`. + input_shape (list[int]): A list of two integer in (width, height) + format specifying input shape. Default: None. + + Returns: + tuple: (data, img), meta information for the input image and input. + """ + import logging + + from mmcls.datasets.pipelines import Compose + from mmcv.parallel import collate, scatter + + cfg = self.model_cfg.copy() + if isinstance(imgs, str): + if cfg.data.test.pipeline[0]['type'] != 'LoadImageFromFile': + cfg.data.test.pipeline.insert(0, + dict(type='LoadImageFromFile')) + data = dict(img_info=dict(filename=imgs), img_prefix=None) + else: + if cfg.data.test.pipeline[0]['type'] == 'LoadImageFromFile': + cfg.data.test.pipeline.pop(0) + data = dict(img=imgs) + # check whether input_shape is valid + if input_shape is not None: + if 'crop_size' in cfg.data.test.pipeline[2]: + crop_size = cfg.data.test.pipeline[2]['crop_size'] + if tuple(input_shape) != (crop_size, crop_size): + logging.warning( + f'`input shape` should be equal to `crop_size`: {crop_size},\ + but given: {input_shape}') + test_pipeline = Compose(cfg.data.test.pipeline) + data = test_pipeline(data) + data = collate([data], samples_per_gpu=1) + data['img'] = [data['img']] + if self.device != 'cpu': + data = scatter(data, [self.device])[0] + return data, data['img'] + + def visualize(self, + model: torch.nn.Module, + image: Union[str, np.ndarray], + result: list, + output_file: str, + window_name: str = '', + show_result: bool = False): + """Visualize predictions of a model. + + Args: + model (nn.Module): Input model. + image (str | np.ndarray): Input image to draw predictions on. + result (list): A list of predictions. + output_file (str): Output file to save drawn image. + window_name (str): The name of visualization window. Defaults to + an empty string. + show_result (bool): Whether to show result in windows. + Default: False. + """ + show_img = mmcv.imread(image) if isinstance(image, str) else image + output_file = None if show_result else output_file + pred_score = np.max(result) + pred_label = np.argmax(result) + result = {'pred_label': pred_label, 'pred_score': float(pred_score)} + result['pred_class'] = model.CLASSES[result['pred_label']] + return model.show_result( + show_img, + result, + show=show_result, + win_name=window_name, + out_file=output_file) + + @staticmethod + def run_inference(model: torch.nn.Module, + model_inputs: Dict[str, torch.Tensor]) -> list: + """Run inference once for a classification model of mmcls. + + Args: + model (nn.Module): Input model. + model_inputs (dict): A dict containing model inputs tensor and + meta info. + + Returns: + list: The predictions of model inference. + """ + return model(**model_inputs, return_loss=False) + + @staticmethod + def get_partition_cfg(partition_type: str) -> Dict: + """Get a certain partition config. + + Args: + partition_type (str): A string specifying partition type. + + Returns: + dict: A dictionary of partition config. + """ + raise NotImplementedError('Not supported yet.') + + @staticmethod + def get_tensor_from_input(input_data: Dict[str, Any]) -> torch.Tensor: + """Get input tensor from input data. + + Args: + input_data (tuple): Input data containing meta info and image + tensor. + Returns: + torch.Tensor: An image in `Tensor`. + """ + return input_data['img'] + + @staticmethod + def evaluate_outputs(model_cfg: mmcv.Config, + outputs: list, + dataset: Dataset, + metrics: Optional[str] = None, + out: Optional[str] = None, + metric_options: Optional[dict] = None, + format_only: bool = False) -> None: + """Perform post-processing to predictions of model. + + Args: + model_cfg (mmcv.Config): The model config. + outputs (list): A list of predictions of model inference. + dataset (Dataset): Input dataset to run test. + metrics (str): Evaluation metrics, which depends on + the codebase and the dataset, e.g., "mAP" in mmcls. + out (str): Output result file in pickle format, Default: None. + metric_options (dict): Custom options for evaluation, will be + kwargs for dataset.evaluate() function. Default: None. + format_only (bool): Format the output results without perform + evaluation. It is useful when you want to format the result + to a specific format and submit it to the test server. + Default: False. + """ + import warnings + + if metrics: + results = dataset.evaluate(outputs, metrics, metric_options) + for k, v in results.items(): + logging.info(f'\n{k} : {v:.2f}') + else: + warnings.warn('Evaluation metrics are not specified.') + scores = np.vstack(outputs) + pred_score = np.max(scores, axis=1) + pred_label = np.argmax(scores, axis=1) + pred_class = [dataset.CLASSES[lb] for lb in pred_label] + results = { + 'pred_score': pred_score, + 'pred_label': pred_label, + 'pred_class': pred_class + } + if not out: + logging.info('\nthe predicted result for the first element is ' + f'pred_score = {pred_score[0]:.2f}, ' + f'pred_label = {pred_label[0]} ' + f'and pred_class = {pred_class[0]}. ' + 'Specify --out to save all results to files.') + if out: + logging.info(f'\nwriting results to {out}') + mmcv.dump(results, out) diff --git a/mmdeploy/codebase/mmcls/deploy/classification_model.py b/mmdeploy/codebase/mmcls/deploy/classification_model.py new file mode 100644 index 0000000000..062eac2f02 --- /dev/null +++ b/mmdeploy/codebase/mmcls/deploy/classification_model.py @@ -0,0 +1,160 @@ +from typing import List, Sequence, Union + +import mmcv +import numpy as np +import torch +from mmcls.datasets import DATASETS +from mmcls.models.classifiers.base import BaseClassifier + +from mmdeploy.codebase.base import BaseBackendModel +from mmdeploy.utils import Backend, get_backend, get_onnx_config, load_config + + +class End2EndModel(BaseBackendModel): + """End to end model for inference of classification. + + Args: + backend (Backend): The backend enum, specifying backend type. + backend_files (Sequence[str]): Paths to all required backend files(e.g. + '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn). + device (str): A string represents device type. + class_names (Sequence[str]): A list of string specifying class names. + deploy_cfg (str | mmcv.Config): Deployment config file or loaded Config + object. + """ + + def __init__( + self, + backend: Backend, + backend_files: Sequence[str], + device: str, + class_names: Sequence[str], + deploy_cfg: Union[str, mmcv.Config] = None, + ): + super(End2EndModel, self).__init__() + self.CLASSES = class_names + self.deploy_cfg = deploy_cfg + self._init_wrapper( + backend=backend, backend_files=backend_files, device=device) + + def _init_wrapper(self, backend: Backend, backend_files: Sequence[str], + device: str): + onnx_config = get_onnx_config(self.deploy_cfg) + output_names = onnx_config['output_names'] + self.wrapper = BaseBackendModel._build_wrapper( + backend=backend, + backend_files=backend_files, + device=device, + output_names=output_names) + + def forward(self, img: List[torch.Tensor], *args, **kwargs) -> list: + """Run forward inference. + + Args: + img (List[torch.Tensor]): A list contains input image(s) + in [N x C x H x W] format. + *args: Other arguments. + **kwargs: Other key-pair arguments. + + Returns: + list: A list contains predictions. + """ + + input_img = img[0].contiguous() + outputs = self.forward_test(input_img, *args, **kwargs) + + return list(outputs) + + def forward_test(self, imgs: torch.Tensor, *args, **kwargs) -> \ + List[np.ndarray]: + """The interface for forward test. + + Args: + imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. + + Returns: + List[np.ndarray]: A list of classification prediction. + """ + outputs = self.wrapper({'input': imgs}) + outputs = self.wrapper.output_to_list(outputs) + outputs = [out.detach().cpu().numpy() for out in outputs] + return outputs + + def show_result(self, + img: np.ndarray, + result: list, + win_name: str, + show: bool = True, + out_file: str = None): + """Show predictions of classification. + Args: + img: (np.ndarray): Input image to draw predictions. + result (list): A list of predictions. + win_name (str): The name of visualization window. + show (bool): Whether to show plotted image in windows. Defaults to + `True`. + out_file (str): Output image file to save drawn predictions. + + Returns: + np.ndarray: Drawn image, only if not `show` or `out_file`. + """ + return BaseClassifier.show_result( + self, img, result, show=show, win_name=win_name, out_file=out_file) + + +def get_classes_from_config(model_cfg: Union[str, mmcv.Config]): + """Get class name from config. + + Args: + model_cfg (str | mmcv.Config): Input model config file or + Config object. + + Returns: + list[str]: A list of string specifying names of different class. + """ + model_cfg = load_config(model_cfg)[0] + module_dict = DATASETS.module_dict + data_cfg = model_cfg.data + + if 'train' in data_cfg: + module = module_dict[data_cfg.train.type] + elif 'val' in data_cfg: + module = module_dict[data_cfg.val.type] + elif 'test' in data_cfg: + module = module_dict[data_cfg.test.type] + else: + raise RuntimeError(f'No dataset config found in: {model_cfg}') + + return module.CLASSES + + +def build_classification_model(model_files: Sequence[str], + model_cfg: Union[str, mmcv.Config], + deploy_cfg: Union[str, mmcv.Config], + device: str, **kwargs): + """Build classification model for different backend. + + Args: + model_files (Sequence[str]): Input model file(s). + model_cfg (str | mmcv.Config): Input model config file or Config + object. + deploy_cfg (str | mmcv.Config): Input deployment config file or + Config object. + device (str): Device to input model. + + Returns: + BaseBackendModel: Classifier for a configured backend. + """ + # load cfg if necessary + deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) + + backend = get_backend(deploy_cfg) + class_names = get_classes_from_config(model_cfg) + backend_classifier = End2EndModel( + backend, + model_files, + device, + class_names, + deploy_cfg=deploy_cfg, + **kwargs) + return backend_classifier diff --git a/mmdeploy/codebase/mmcls/deploy/mmclassification.py b/mmdeploy/codebase/mmcls/deploy/mmclassification.py new file mode 100644 index 0000000000..8d55a01b12 --- /dev/null +++ b/mmdeploy/codebase/mmcls/deploy/mmclassification.py @@ -0,0 +1,140 @@ +from typing import List, Optional, Union + +import mmcv +import torch +from mmcv.utils import Registry +from torch.utils.data import DataLoader, Dataset + +from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase +from mmdeploy.utils import Codebase, get_task_type + + +def __build_mmcls_task(model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str, registry: Registry) -> BaseTask: + task = get_task_type(deploy_cfg) + return registry.module_dict[task.value](model_cfg, deploy_cfg, device) + + +MMCLS_TASK = Registry('mmcls_tasks', build_func=__build_mmcls_task) + + +@CODEBASE.register_module(Codebase.MMCLS.value) +class MMClassification(MMCodebase): + """mmclassification codebase class.""" + + task_registry = MMCLS_TASK + + def __init__(self): + super(MMClassification, self).__init__() + + @staticmethod + def build_task_processor(model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str) -> BaseTask: + """The interface to build the task processors of mmseg. + + Args: + model_cfg (mmcv.Config): Model config file. + deploy_cfg (mmcv.Config): Deployment config file. + device (str): A string specifying device type. + + Returns: + BaseTask: A task processor. + """ + return MMCLS_TASK.build(model_cfg, deploy_cfg, device) + + @staticmethod + def build_dataset(dataset_cfg: Union[str, mmcv.Config], + dataset_type: str = 'val', + **kwargs) -> Dataset: + """Build dataset for classification. + + Args: + dataset_cfg (str | mmcv.Config): The input dataset config. + dataset_type (str): A string represents dataset type, e.g.: 'train' + , 'test', 'val'. + Default: 'val'. + + Returns: + Dataset: A PyTorch dataset. + """ + + from mmcls.datasets import build_dataset as build_dataset_mmcls + + from mmdeploy.utils import load_config + + dataset_cfg = load_config(dataset_cfg)[0] + data = dataset_cfg.data + assert dataset_type in data + + dataset = build_dataset_mmcls(data[dataset_type]) + + return dataset + + def build_dataloader(dataset: Dataset, + samples_per_gpu: int, + workers_per_gpu: int, + num_gpus: int = 1, + dist: bool = False, + shuffle: bool = False, + round_up: bool = True, + seed: Optional[int] = None, + pin_memory: bool = True, + persistent_workers: bool = True, + **kwargs) -> DataLoader: + """Build dataloader for classifier. + + Args: + dataset (Dataset): Input dataset. + samples_per_gpu (int): Number of training samples on each GPU, + i.e., batch size of each GPU. + workers_per_gpu (int): How many subprocesses to use for data + loading for each GPU. + num_gpus (int): Number of GPUs. Only used in non-distributed + training. + dist (bool): Distributed training/test or not. Default: False. + shuffle (bool): Whether to shuffle the data at every epoch. + Default: False. + round_up (bool): Whether to round up the length of dataset by + adding extra samples to make it evenly divisible. + Default: True. + seed (int): An integer set to be seed. Default: None. + pin_memory (bool): Whether to use pin_memory in DataLoader. + Default: True. + persistent_workers (bool): If `True`, the data loader will not + shutdown the worker processes after a dataset has been + consumed once. This allows to maintain the workers Dataset + instances alive. The argument also has effect in + PyTorch>=1.7.0. Default: True. + kwargs: Any other keyword argument to be used to initialize + DataLoader. + + Returns: + DataLoader: A PyTorch dataloader. + """ + from mmcls.datasets import build_dataloader as build_dataloader_mmcls + return build_dataloader_mmcls(dataset, samples_per_gpu, + workers_per_gpu, num_gpus, dist, shuffle, + round_up, seed, pin_memory, + persistent_workers, **kwargs) + + @staticmethod + def single_gpu_test(model: torch.nn.Module, + data_loader: DataLoader, + show: bool = False, + out_dir: Optional[str] = None, + **kwargs) -> List: + """Run test with single gpu. + + Args: + model (torch.nn.Module): Input model from nn.Module. + data_loader (DataLoader): PyTorch data loader. + show (bool): Specifying whether to show plotted results. + Default: False. + out_dir (str): A directory to save results, Default: None. + + Returns: + list: The prediction results. + """ + from mmcls.apis import single_gpu_test + outputs = single_gpu_test(model, data_loader, show, out_dir, **kwargs) + return outputs diff --git a/mmdeploy/mmcls/models/__init__.py b/mmdeploy/codebase/mmcls/models/__init__.py similarity index 100% rename from mmdeploy/mmcls/models/__init__.py rename to mmdeploy/codebase/mmcls/models/__init__.py diff --git a/mmdeploy/mmcls/models/backbones/__init__.py b/mmdeploy/codebase/mmcls/models/backbones/__init__.py similarity index 100% rename from mmdeploy/mmcls/models/backbones/__init__.py rename to mmdeploy/codebase/mmcls/models/backbones/__init__.py diff --git a/mmdeploy/mmcls/models/backbones/shufflenet_v2.py b/mmdeploy/codebase/mmcls/models/backbones/shufflenet_v2.py similarity index 100% rename from mmdeploy/mmcls/models/backbones/shufflenet_v2.py rename to mmdeploy/codebase/mmcls/models/backbones/shufflenet_v2.py diff --git a/mmdeploy/mmcls/models/classifiers/__init__.py b/mmdeploy/codebase/mmcls/models/classifiers/__init__.py similarity index 100% rename from mmdeploy/mmcls/models/classifiers/__init__.py rename to mmdeploy/codebase/mmcls/models/classifiers/__init__.py diff --git a/mmdeploy/mmcls/models/classifiers/base.py b/mmdeploy/codebase/mmcls/models/classifiers/base.py similarity index 100% rename from mmdeploy/mmcls/models/classifiers/base.py rename to mmdeploy/codebase/mmcls/models/classifiers/base.py diff --git a/mmdeploy/mmcls/models/heads/__init__.py b/mmdeploy/codebase/mmcls/models/heads/__init__.py similarity index 100% rename from mmdeploy/mmcls/models/heads/__init__.py rename to mmdeploy/codebase/mmcls/models/heads/__init__.py diff --git a/mmdeploy/mmcls/models/heads/cls_head.py b/mmdeploy/codebase/mmcls/models/heads/cls_head.py similarity index 100% rename from mmdeploy/mmcls/models/heads/cls_head.py rename to mmdeploy/codebase/mmcls/models/heads/cls_head.py diff --git a/mmdeploy/mmcls/models/heads/multi_label_head.py b/mmdeploy/codebase/mmcls/models/heads/multi_label_head.py similarity index 100% rename from mmdeploy/mmcls/models/heads/multi_label_head.py rename to mmdeploy/codebase/mmcls/models/heads/multi_label_head.py diff --git a/mmdeploy/codebase/mmdet/__init__.py b/mmdeploy/codebase/mmdet/__init__.py new file mode 100644 index 0000000000..66d489e720 --- /dev/null +++ b/mmdeploy/codebase/mmdet/__init__.py @@ -0,0 +1,9 @@ +from .core import * # noqa: F401,F403 +from .deploy import (MMDetection, ObjectDetection, clip_bboxes, + get_post_processing_params, pad_with_value) +from .models import * # noqa: F401,F403 + +__all__ = [ + 'get_post_processing_params', 'clip_bboxes', 'pad_with_value', + 'MMDetection', 'ObjectDetection' +] diff --git a/mmdeploy/mmdet/core/__init__.py b/mmdeploy/codebase/mmdet/core/__init__.py similarity index 100% rename from mmdeploy/mmdet/core/__init__.py rename to mmdeploy/codebase/mmdet/core/__init__.py diff --git a/mmdeploy/codebase/mmdet/core/bbox/__init__.py b/mmdeploy/codebase/mmdet/core/bbox/__init__.py new file mode 100644 index 0000000000..1d9a90bde6 --- /dev/null +++ b/mmdeploy/codebase/mmdet/core/bbox/__init__.py @@ -0,0 +1,3 @@ +from .delta_xywh_bbox_coder import * # noqa: F401,F403 +from .tblr_bbox_coder import * # noqa: F401,F403 +from .transforms import * # noqa: F401,F403 diff --git a/mmdeploy/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py b/mmdeploy/codebase/mmdet/core/bbox/delta_xywh_bbox_coder.py similarity index 94% rename from mmdeploy/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py rename to mmdeploy/codebase/mmdet/core/bbox/delta_xywh_bbox_coder.py index 541d5a9a5c..0d989d6ad9 100644 --- a/mmdeploy/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py +++ b/mmdeploy/codebase/mmdet/core/bbox/delta_xywh_bbox_coder.py @@ -95,7 +95,7 @@ def delta2bbox(ctx, y2 = gy + gh * 0.5 if clip_border and max_shape is not None: - from mmdeploy.mmdet.export import clip_bboxes + from mmdeploy.codebase.mmdet.deploy import clip_bboxes x1, y1, x2, y2 = clip_bboxes(x1, y1, x2, y2, max_shape) bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size()) @@ -105,16 +105,16 @@ def delta2bbox(ctx, @FUNCTION_REWRITER.register_rewriter( func_name='mmdet.core.bbox.coder.delta_xywh_bbox_coder.delta2bbox', # noqa backend='ncnn') -def delta2bbox_ncnn(ctx, - rois, - deltas, - means=(0., 0., 0., 0.), - stds=(1., 1., 1., 1.), - max_shape=None, - wh_ratio_clip=16 / 1000, - clip_border=True, - add_ctr_clamp=False, - ctr_clamp=32): +def delta2bbox__ncnn(ctx, + rois, + deltas, + means=(0., 0., 0., 0.), + stds=(1., 1., 1., 1.), + max_shape=None, + wh_ratio_clip=16 / 1000, + clip_border=True, + add_ctr_clamp=False, + ctr_clamp=32): """Rewrite `delta2bbox` for ncnn backend. Batch dimension is not supported by ncnn, but supported by pytorch. @@ -216,7 +216,7 @@ def delta2bbox_ncnn(ctx, y2 = gy + gh * 0.5 if clip_border and max_shape is not None: - from mmdeploy.mmdet.export import clip_bboxes + from mmdeploy.codebase.mmdet.deploy import clip_bboxes x1, y1, x2, y2 = clip_bboxes(x1, y1, x2, y2, max_shape) bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size()) diff --git a/mmdeploy/mmdet/core/bbox/coder/tblr_bbox_coder.py b/mmdeploy/codebase/mmdet/core/bbox/tblr_bbox_coder.py similarity index 93% rename from mmdeploy/mmdet/core/bbox/coder/tblr_bbox_coder.py rename to mmdeploy/codebase/mmdet/core/bbox/tblr_bbox_coder.py index 7c5ff5c71f..a568ce8ef2 100644 --- a/mmdeploy/mmdet/core/bbox/coder/tblr_bbox_coder.py +++ b/mmdeploy/codebase/mmdet/core/bbox/tblr_bbox_coder.py @@ -65,7 +65,7 @@ def tblr2bboxes(ctx, ymax = prior_centers[..., 1].unsqueeze(-1) + bottom if clip_border and max_shape is not None: - from mmdeploy.mmdet.export import clip_bboxes + from mmdeploy.codebase.mmdet.deploy import clip_bboxes xmin, ymin, xmax, ymax = clip_bboxes(xmin, ymin, xmax, ymax, max_shape) bboxes = torch.cat([xmin, ymin, xmax, ymax], dim=-1).view(priors.size()) @@ -75,13 +75,13 @@ def tblr2bboxes(ctx, @FUNCTION_REWRITER.register_rewriter( func_name='mmdet.core.bbox.coder.tblr_bbox_coder.tblr2bboxes', backend='ncnn') -def tblr2bboxes_ncnn(ctx, - priors, - tblr, - normalizer=4.0, - normalize_by_wh=True, - max_shape=None, - clip_border=True): +def tblr2bboxes__ncnn(ctx, + priors, + tblr, + normalizer=4.0, + normalize_by_wh=True, + max_shape=None, + clip_border=True): """Rewrite `tblr2bboxes` for ncnn backend. Batch dimension is not supported by ncnn, but supported by pytorch. @@ -137,7 +137,7 @@ def tblr2bboxes_ncnn(ctx, ymax = prior_centers[..., 1].unsqueeze(-1) + bottom if clip_border and max_shape is not None: - from mmdeploy.mmdet.export import clip_bboxes + from mmdeploy.codebase.mmdet.deploy import clip_bboxes xmin, ymin, xmax, ymax = clip_bboxes(xmin, ymin, xmax, ymax, max_shape) bboxes = torch.cat([xmin, ymin, xmax, ymax], dim=-1).view(priors.size()) diff --git a/mmdeploy/mmdet/core/bbox/transforms.py b/mmdeploy/codebase/mmdet/core/bbox/transforms.py similarity index 87% rename from mmdeploy/mmdet/core/bbox/transforms.py rename to mmdeploy/codebase/mmdet/core/bbox/transforms.py index a4713b31b0..7f0a0d016f 100644 --- a/mmdeploy/mmdet/core/bbox/transforms.py +++ b/mmdeploy/codebase/mmdet/core/bbox/transforms.py @@ -1,10 +1,12 @@ import torch -from mmdeploy.mmdet.export import clip_bboxes +from mmdeploy.codebase.mmdet.deploy import clip_bboxes def distance2bbox(points, distance, max_shape=None): - """Decode distance prediction to bounding box. + """Rewrite `mmdet.core.bbox.transforms.distance2bbox` + + Decode distance prediction to bounding box. Args: points (Tensor): Shape (B, N, 2) or (N, 2). diff --git a/mmdeploy/mmdet/core/post_processing/__init__.py b/mmdeploy/codebase/mmdet/core/post_processing/__init__.py similarity index 100% rename from mmdeploy/mmdet/core/post_processing/__init__.py rename to mmdeploy/codebase/mmdet/core/post_processing/__init__.py diff --git a/mmdeploy/mmdet/core/post_processing/bbox_nms.py b/mmdeploy/codebase/mmdet/core/post_processing/bbox_nms.py similarity index 97% rename from mmdeploy/mmdet/core/post_processing/bbox_nms.py rename to mmdeploy/codebase/mmdet/core/post_processing/bbox_nms.py index 8f4b7be375..70aa1afd92 100644 --- a/mmdeploy/mmdet/core/post_processing/bbox_nms.py +++ b/mmdeploy/codebase/mmdet/core/post_processing/bbox_nms.py @@ -129,7 +129,7 @@ def _multiclass_nms(boxes: Tensor, @FUNCTION_REWRITER.register_rewriter( - func_name='mmdeploy.mmdet.core.post_processing._multiclass_nms', + func_name='mmdeploy.codebase.mmdet.core.post_processing._multiclass_nms', backend='tensorrt') def multiclass_nms_static(ctx, boxes: Tensor, @@ -173,4 +173,5 @@ def multiclass_nms_static(ctx, @mark('multiclass_nms', inputs=['boxes', 'scores'], outputs=['dets', 'labels']) def multiclass_nms(*args, **kwargs): """Wrapper function for `_multiclass_nms`.""" - return mmdeploy.mmdet.core.post_processing._multiclass_nms(*args, **kwargs) + return mmdeploy.codebase.mmdet.core.post_processing._multiclass_nms( + *args, **kwargs) diff --git a/mmdeploy/codebase/mmdet/deploy/__init__.py b/mmdeploy/codebase/mmdet/deploy/__init__.py new file mode 100644 index 0000000000..c1563b7c3f --- /dev/null +++ b/mmdeploy/codebase/mmdet/deploy/__init__.py @@ -0,0 +1,8 @@ +from .mmdetection import MMDetection +from .object_detection import ObjectDetection +from .utils import clip_bboxes, get_post_processing_params, pad_with_value + +__all__ = [ + 'get_post_processing_params', 'clip_bboxes', 'pad_with_value', + 'MMDetection', 'ObjectDetection' +] diff --git a/mmdeploy/codebase/mmdet/deploy/mmdetection.py b/mmdeploy/codebase/mmdet/deploy/mmdetection.py new file mode 100644 index 0000000000..7d063dcc4e --- /dev/null +++ b/mmdeploy/codebase/mmdet/deploy/mmdetection.py @@ -0,0 +1,142 @@ +from typing import Optional, Union + +import mmcv +import torch +from mmcv.utils import Registry +from mmdet.datasets import replace_ImageToTensor +from torch.utils.data import DataLoader, Dataset + +from mmdeploy.utils import Codebase, get_task_type +from ...base import CODEBASE, BaseTask, MMCodebase + + +def __build_mmdet_task(model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str, registry: Registry) -> BaseTask: + task = get_task_type(deploy_cfg) + return registry.module_dict[task.value](model_cfg, deploy_cfg, device) + + +MMDET_TASK = Registry('mmdet_tasks', build_func=__build_mmdet_task) + + +@CODEBASE.register_module(Codebase.MMDET.value) +class MMDetection(MMCodebase): + + task_registry = MMDET_TASK + + def __init__(self) -> None: + super().__init__() + + @staticmethod + def build_task_processor(model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str): + """The interface to build the task processors of mmdet. + + Args: + model_cfg (str | mmcv.Config): Model config file. + deploy_cfg (str | mmcv.Config): Deployment config file. + device (str): A string specifying device type. + + Returns: + BaseTask: A task processor. + """ + return MMDET_TASK.build(model_cfg, deploy_cfg, device) + + @staticmethod + def build_dataset(dataset_cfg: Union[str, mmcv.Config], + dataset_type: str = 'val', + **kwargs) -> Dataset: + """Build dataset for detection. + + Args: + dataset_cfg (str | mmcv.Config): The input dataset config. + dataset_type (str): A string represents dataset type, e.g.: 'train' + , 'test', 'val'. Defaults to 'val'. + + Returns: + Dataset: A PyTorch dataset. + """ + from mmdet.datasets import build_dataset as build_dataset_mmdet + + assert dataset_type in dataset_cfg.data + data_cfg = dataset_cfg.data[dataset_type] + # in case the dataset is concatenated + if isinstance(data_cfg, dict): + data_cfg.test_mode = True + samples_per_gpu = data_cfg.get('samples_per_gpu', 1) + if samples_per_gpu > 1: + # Replace 'ImageToTensor' to 'DefaultFormatBundle' + data_cfg.pipeline = replace_ImageToTensor(data_cfg.pipeline) + elif isinstance(data_cfg, list): + for ds_cfg in data_cfg: + ds_cfg.test_mode = True + samples_per_gpu = max( + [ds_cfg.get('samples_per_gpu', 1) for ds_cfg in data_cfg]) + if samples_per_gpu > 1: + for ds_cfg in data_cfg: + ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) + dataset = build_dataset_mmdet(data_cfg) + + return dataset + + @staticmethod + def build_dataloader(dataset: Dataset, + samples_per_gpu: int, + workers_per_gpu: int, + num_gpus: int = 1, + dist: bool = False, + shuffle: bool = False, + seed: Optional[int] = None, + **kwargs) -> DataLoader: + """Build dataloader for detection. + + Args: + dataset (Dataset): Input dataset. + samples_per_gpu (int): Number of training samples on each GPU, i.e. + ,batch size of each GPU. + workers_per_gpu (int): How many subprocesses to use for data + loading for each GPU. + num_gpus (int): Number of GPUs. Only used in non-distributed + training. dist (bool): Distributed training/test or not. + Defaults to `False`.shuffle (bool): Whether to shuffle the + data at every epoch. + Defaults to `False`. + seed (int): An integer set to be seed. Default is `None`. + kwargs: Any other keyword argument to be used to initialize + DataLoader. + + Returns: + DataLoader: A PyTorch dataloader. + """ + from mmdet.datasets import build_dataloader as build_dataloader_mmdet + return build_dataloader_mmdet( + dataset, + samples_per_gpu, + workers_per_gpu, + num_gpus=num_gpus, + dist=dist, + shuffle=shuffle, + seed=seed, + **kwargs) + + @staticmethod + def single_gpu_test(model: torch.nn.Module, + data_loader: DataLoader, + show: bool = False, + out_dir: Optional[str] = None, + **kwargs): + """Run test with single gpu. + + Args: + model (torch.nn.Module): Input model from nn.Module. + data_loader (DataLoader): PyTorch data loader. + show (bool): Specifying whether to show plotted results. Defaults + to `False`. + out_dir (str): A directory to save results, defaults to `None`. + + Returns: + list: The prediction results. + """ + from mmdet.apis import single_gpu_test + outputs = single_gpu_test(model, data_loader, show, out_dir, **kwargs) + return outputs diff --git a/mmdeploy/mmdet/export/model_partition.py b/mmdeploy/codebase/mmdet/deploy/model_partition_cfg.py similarity index 80% rename from mmdeploy/mmdet/export/model_partition.py rename to mmdeploy/codebase/mmdet/deploy/model_partition_cfg.py index d53522d4cc..76b11d5bae 100644 --- a/mmdeploy/mmdet/export/model_partition.py +++ b/mmdeploy/codebase/mmdet/deploy/model_partition_cfg.py @@ -59,17 +59,3 @@ }, ) ]) - - -def get_partition_cfg(partition_type: str): - """Get a certain partition config for mmdet. - - Args: - partition_type (str): A string specifying partition type. - - Returns: - dict: A dictionary of partition config. - """ - assert (partition_type - in MMDET_PARTITION_CFG), f'Unknown partition_type {partition_type}' - return MMDET_PARTITION_CFG[partition_type] diff --git a/mmdeploy/codebase/mmdet/deploy/object_detection.py b/mmdeploy/codebase/mmdet/deploy/object_detection.py new file mode 100644 index 0000000000..e47e89e409 --- /dev/null +++ b/mmdeploy/codebase/mmdet/deploy/object_detection.py @@ -0,0 +1,235 @@ +import logging +from typing import Any, Dict, Optional, Sequence, Tuple, Union + +import mmcv +import numpy as np +import torch +from torch.utils.data import Dataset + +from mmdeploy.utils import Task +from ...base import BaseTask +from .mmdetection import MMDET_TASK + + +@MMDET_TASK.register_module(Task.OBJECT_DETECTION.value) +class ObjectDetection(BaseTask): + + def __init__(self, model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str) -> None: + super().__init__(model_cfg, deploy_cfg, device) + + def init_backend_model(self, + model_files: Optional[str] = None, + **kwargs) -> torch.nn.Module: + """Initialize backend model. + + Args: + model_files (Sequence[str]): Input model files. + + Returns: + nn.Module: An initialized backend model. + """ + from .object_detection_model import build_object_detection_model + model = build_object_detection_model( + model_files, self.model_cfg, self.deploy_cfg, device=self.device) + return model.eval() + + def init_pytorch_model(self, + model_checkpoint: Optional[str] = None, + cfg_options: Optional[Dict] = None, + **kwargs) -> torch.nn.Module: + """Initialize torch model. + + Args: + model_checkpoint (str): The checkpoint file of torch model, + defaults to `None`. + cfg_options (dict): Optional config key-pair parameters. + + Returns: + nn.Module: An initialized torch model generated by other OpenMMLab + codebases. + """ + from mmdet.apis import init_detector + model = init_detector(self.model_cfg, model_checkpoint, self.device, + cfg_options) + return model.eval() + + def create_input(self, + imgs: Union[str, np.ndarray], + input_shape: Sequence[int] = None) \ + -> Tuple[Dict, torch.Tensor]: + """Create input for detector. + + Args: + task (Task): Specifying task type. + imgs (Any): Input image(s), accpeted data type are `str`, + `np.ndarray`, `torch.Tensor`. + input_shape (list[int]): A list of two integer in (width, height) + format specifying input shape. Defaults to `None`. + + Returns: + tuple: (data, img), meta information for the input image and input. + """ + from mmdet.datasets import replace_ImageToTensor + from mmdet.datasets.pipelines import Compose + from mmcv.parallel import collate, scatter + + cfg = self.model_cfg.copy() + + if not isinstance(imgs, (list, tuple)): + imgs = [imgs] + + if isinstance(imgs[0], np.ndarray): + cfg = cfg.copy() + # set loading pipeline type + cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' + # for static exporting + if input_shape is not None: + cfg.data.test.pipeline[1]['img_scale'] = tuple(input_shape) + transforms = cfg.data.test.pipeline[1]['transforms'] + for trans in transforms: + trans_type = trans['type'] + if trans_type == 'Resize': + trans['keep_ratio'] = False + elif trans_type == 'Pad': + trans['size_divisor'] = 1 + + cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) + test_pipeline = Compose(cfg.data.test.pipeline) + data_list = [] + for img in imgs: + # prepare data + if isinstance(img, np.ndarray): + # directly add img + data = dict(img=img) + else: + # add information into dict + data = dict(img_info=dict(filename=img), img_prefix=None) + # build the data pipeline + data = test_pipeline(data) + data_list.append(data) + + data = collate(data_list, samples_per_gpu=len(imgs)) + + data['img_metas'] = [ + img_metas.data[0] for img_metas in data['img_metas'] + ] + data['img'] = [img.data[0] for img in data['img']] + if self.device != 'cpu': + data = scatter(data, [self.device])[0] + + return data, data['img'] + + def visualize(self, + model, + image: Union[str, np.ndarray], + result: list, + output_file: str, + window_name: str, + show_result: bool = False, + score_thr=0.3): + """Visualize predictions of a model. + + Args: + model (nn.Module): Input model. + image (str | np.ndarray): Input image to draw predictions on. + result (list): A list of predictions. + output_file (str): Output file to save drawn image. + window_name (str): The name of visualization window. Defaults to + an empty string. + show_result (bool): Whether to show result in windows, defaults + to `False`. + score_thr (float): The score threshold to display the bbox. + Defaults to 0.3. + """ + show_img = mmcv.imread(image) if isinstance(image, str) else image + output_file = None if show_result else output_file + model.show_result( + show_img, + result=result, + win_name=window_name, + show=show_result, + out_file=output_file, + score_thr=score_thr) + + @staticmethod + def run_inference(model, model_inputs: Dict[str, torch.Tensor]): + """Run inference once for a object detection model of mmdet. + + Args: + model (nn.Module): Input model. + model_inputs (dict): A dict containing model inputs tensor and + meta info. + + Returns: + list: The predictions of model inference. + """ + return model(**model_inputs, return_loss=False, rescale=True) + + @staticmethod + def get_partition_cfg(partition_type: str) -> Dict: + """Get a certain partition config for mmdet. + + Args: + partition_type (str): A string specifying partition type. + + Returns: + dict: A dictionary of partition config. + """ + from .model_partition_cfg import MMDET_PARTITION_CFG + assert (partition_type in MMDET_PARTITION_CFG), \ + f'Unknown partition_type {partition_type}' + return MMDET_PARTITION_CFG[partition_type] + + @staticmethod + def get_tensor_from_input(input_data: Dict[str, Any]) -> torch.Tensor: + """Get input tensor from input data. + + Args: + input_data (dict): Input data containing meta info and image + tensor. + Returns: + torch.Tensor: An image in `Tensor`. + """ + return input_data['img'][0] + + @staticmethod + def evaluate_outputs(model_cfg, + outputs: Sequence, + dataset: Dataset, + metrics: Optional[str] = None, + out: Optional[str] = None, + metric_options: Optional[dict] = None, + format_only: bool = False): + """Perform post-processing to predictions of model. + + Args: + outputs (list): A list of predictions of model inference. + dataset (Dataset): Input dataset to run test. + metrics (str): Evaluation metrics, which depends on + the codebase and the dataset, e.g., "bbox", "segm", "proposal" + for COCO, and "mAP", "recall" for PASCAL VOC in mmdet. + out (str): Output result file in pickle format, defaults to `None`. + metric_options (dict): Custom options for evaluation, will be + kwargs for dataset.evaluate() function. Defaults to `None`. + format_only (bool): Format the output results without perform + evaluation. It is useful when you want to format the result + to a specific format and submit it to the test server. Defaults + to `False`. + """ + if out: + logging.info(f'\nwriting results to {out}') + mmcv.dump(outputs, out) + kwargs = {} if metric_options is None else metric_options + if format_only: + dataset.format_results(outputs, **kwargs) + if metrics: + eval_kwargs = model_cfg.get('evaluation', {}).copy() + # hard-code way to remove EvalHook args + for key in [ + 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', + 'rule' + ]: + eval_kwargs.pop(key, None) + eval_kwargs.update(dict(metric=metrics, **kwargs)) + print(dataset.evaluate(outputs, **eval_kwargs)) diff --git a/mmdeploy/codebase/mmdet/deploy/object_detection_model.py b/mmdeploy/codebase/mmdet/deploy/object_detection_model.py new file mode 100644 index 0000000000..236d4a104f --- /dev/null +++ b/mmdeploy/codebase/mmdet/deploy/object_detection_model.py @@ -0,0 +1,579 @@ +from functools import partial +from typing import List, Sequence, Tuple, Union + +import mmcv +import numpy as np +import torch +import torch.nn.functional as F +from mmcv.utils import Registry +from mmdet.core import bbox2result +from mmdet.datasets import DATASETS +from mmdet.models import BaseDetector + +from mmdeploy.backend.base import get_backend_file_count +from mmdeploy.codebase.base import BaseBackendModel +from mmdeploy.codebase.mmdet import get_post_processing_params, multiclass_nms +from mmdeploy.utils import (Backend, get_backend, get_onnx_config, + get_partition_config, load_config) + + +def __build_backend_model(partition_name: str, backend: Backend, + backend_files: Sequence[str], device: str, + class_names: Sequence[str], + model_cfg: Union[str, mmcv.Config], + deploy_cfg: Union[str, mmcv.Config], + registry: Registry, **kwargs): + return registry.module_dict[partition_name]( + backend=backend, + backend_files=backend_files, + class_names=class_names, + device=device, + model_cfg=model_cfg, + deploy_cfg=deploy_cfg, + **kwargs) + + +# Use registry to store models with different partition methods +# If a model doesn't need to partition, we don't need this registry +__BACKEND_MODEl = mmcv.utils.Registry( + 'backend_detectors', build_func=__build_backend_model) + + +@__BACKEND_MODEl.register_module('end2end') +class End2EndModel(BaseBackendModel): + """End to end model for inference of detection. + + TODO: UPDATE this docstring + Args: + class_names (Sequence[str]): A list of string specifying class names. + device_id (int): An integer represents device index. + """ + + def __init__(self, backend: Backend, backend_files: Sequence[str], + device: str, class_names: Sequence[str], + deploy_cfg: Union[str, mmcv.Config], **kwargs): + super().__init__() + self.CLASSES = class_names + self.deploy_cfg = deploy_cfg + self._init_wrapper( + backend=backend, backend_files=backend_files, device=device) + + def _init_wrapper(self, backend, backend_files, device): + onnx_config = get_onnx_config(self.deploy_cfg) + output_names = onnx_config['output_names'] + self.wrapper = BaseBackendModel._build_wrapper( + backend=backend, + backend_files=backend_files, + device=device, + output_names=output_names) + + @staticmethod + def __clear_outputs( + test_outputs: List[Union[torch.Tensor, np.ndarray]] + ) -> List[Union[List[torch.Tensor], List[np.ndarray]]]: + """Removes additional outputs and detections with zero and negative + score. + + Args: + test_outputs (List[Union[torch.Tensor, np.ndarray]]): + outputs of forward_test. + + Returns: + List[Union[List[torch.Tensor], List[np.ndarray]]]: + outputs with without zero score object. + """ + batch_size = len(test_outputs[0]) + + num_outputs = len(test_outputs) + outputs = [[None for _ in range(batch_size)] + for _ in range(num_outputs)] + + for i in range(batch_size): + inds = test_outputs[0][i, :, 4] > 0.0 + for output_id in range(num_outputs): + outputs[output_id][i] = test_outputs[output_id][i, inds, ...] + return outputs + + @staticmethod + def __postprocessing_masks(det_bboxes: np.ndarray, + det_masks: np.ndarray, + img_w: int, + img_h: int, + mask_thr_binary: float = 0.5) -> np.ndarray: + """Additional processing of masks. Resizes masks from [num_det, 28, 28] + to [num_det, img_w, img_h]. Analog of the 'mmdeploy.codebase.mmdet. + models.roi_heads.fcn_mask_head._do_paste_mask' function. + + Args: + det_bboxes (np.ndarray): Bbox of shape [num_det, 5] + det_masks (np.ndarray): Masks of shape [num_det, 28, 28]. + img_w (int): Width of the original image. + img_h (int): Height of the original image. + mask_thr_binary (float): The threshold for the mask. + + Returns: + np.ndarray: masks of shape [N, num_det, img_w, img_h]. + """ + masks = det_masks + bboxes = det_bboxes + + if isinstance(masks, np.ndarray): + masks = torch.tensor(masks) + bboxes = torch.tensor(bboxes) + + result_masks = [] + for bbox, mask in zip(bboxes, masks): + + x0_int, y0_int = 0, 0 + x1_int, y1_int = img_w, img_h + + img_y = torch.arange(y0_int, y1_int, dtype=torch.float32) + 0.5 + img_x = torch.arange(x0_int, x1_int, dtype=torch.float32) + 0.5 + x0, y0, x1, y1 = bbox + + img_y = (img_y - y0) / (y1 - y0) * 2 - 1 + img_x = (img_x - x0) / (x1 - x0) * 2 - 1 + if torch.isinf(img_x).any(): + inds = torch.where(torch.isinf(img_x)) + img_x[inds] = 0 + if torch.isinf(img_y).any(): + inds = torch.where(torch.isinf(img_y)) + img_y[inds] = 0 + + gx = img_x[None, :].expand(img_y.size(0), img_x.size(0)) + gy = img_y[:, None].expand(img_y.size(0), img_x.size(0)) + grid = torch.stack([gx, gy], dim=2) + + img_masks = F.grid_sample( + mask.to(dtype=torch.float32)[None, None, :, :], + grid[None, :, :, :], + align_corners=False) + + mask = img_masks + mask = (mask >= mask_thr_binary).to(dtype=torch.bool) + result_masks.append(mask.numpy()) + result_masks = np.concatenate(result_masks, axis=1) + return result_masks.squeeze(0) + + def forward(self, img: Sequence[torch.Tensor], img_metas: Sequence[dict], + *args, **kwargs): + """Run forward inference. + + Args: + img (Sequence[torch.Tensor]): A list contains input image(s) + in [N x C x H x W] format. + img_metas (Sequence[dict]): A list of meta info for image(s). + *args: Other arguments. + **kwargs: Other key-pair arguments. + + Returns: + list: A list contains predictions. + """ + input_img = img[0].contiguous() + outputs = self.forward_test(input_img, img_metas, *args, **kwargs) + outputs = End2EndModel.__clear_outputs(outputs) + batch_dets, batch_labels = outputs[:2] + batch_masks = outputs[2] if len(outputs) == 3 else None + batch_size = input_img.shape[0] + img_metas = img_metas[0] + results = [] + rescale = kwargs.get('rescale', True) + for i in range(batch_size): + dets, labels = batch_dets[i], batch_labels[i] + if rescale: + scale_factor = img_metas[i]['scale_factor'] + + if isinstance(scale_factor, (list, tuple, np.ndarray)): + assert len(scale_factor) == 4 + scale_factor = np.array(scale_factor)[None, :] # [1,4] + dets[:, :4] /= scale_factor + + if 'border' in img_metas[i]: + # offset pixel of the top-left corners between original image + # and padded/enlarged image, 'border' is used when exporting + # CornerNet and CentripetalNet to onnx + x_off = img_metas[i]['border'][2] + y_off = img_metas[i]['border'][0] + dets[:, [0, 2]] -= x_off + dets[:, [1, 3]] -= y_off + dets[:, :4] *= (dets[:, :4] > 0).astype(dets.dtype) + + dets_results = bbox2result(dets, labels, len(self.CLASSES)) + + if batch_masks is not None: + masks = batch_masks[i] + img_h, img_w = img_metas[i]['img_shape'][:2] + ori_h, ori_w = img_metas[i]['ori_shape'][:2] + export_postprocess_mask = True + if self.deploy_cfg is not None: + + mmdet_deploy_cfg = get_post_processing_params( + self.deploy_cfg) + # this flag enable postprocess when export. + export_postprocess_mask = mmdet_deploy_cfg.get( + 'export_postprocess_mask', True) + if not export_postprocess_mask: + masks = End2EndModel.__postprocessing_masks( + dets[:, :4], masks, ori_w, ori_h) + else: + masks = masks[:, :img_h, :img_w] + # avoid to resize masks with zero dim + if rescale and masks.shape[0] != 0: + masks = masks.astype(np.float32) + masks = torch.from_numpy(masks) + masks = torch.nn.functional.interpolate( + masks.unsqueeze(0), size=(ori_h, ori_w)) + masks = masks.squeeze(0).detach().numpy() + if masks.dtype != np.bool: + masks = masks >= 0.5 + segms_results = [[] for _ in range(len(self.CLASSES))] + for j in range(len(dets)): + segms_results[labels[j]].append(masks[j]) + results.append((dets_results, segms_results)) + else: + results.append(dets_results) + return results + + def forward_test(self, imgs: torch.Tensor, *args, **kwargs) -> \ + Tuple[np.ndarray, np.ndarray]: + """The interface for forward test. + + Args: + imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. + + Returns: + tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] + and class labels of shape [N, num_det]. + """ + outputs = self.wrapper({'input': imgs}) + outputs = self.wrapper.output_to_list(outputs) + outputs = [out.detach().cpu().numpy() for out in outputs] + return outputs + + def show_result(self, + img: np.ndarray, + result: list, + win_name: str, + show: bool = True, + score_thr: float = 0.3, + out_file=None): + return BaseDetector.show_result( + self, + img=img, + result=result, + score_thr=score_thr, + show=show, + win_name=win_name, + out_file=out_file) + + +@__BACKEND_MODEl.register_module('single_stage') +class PartitionSingleStageModel(End2EndModel): + """Partitioned single stage detection model. + + Args: + model_file (str): The path of input model file. + class_names (Sequence[str]): A list of string specifying class names. + model_cfg: (str | mmcv.Config): Input model config. + deploy_cfg: (str | mmcv.Config): Input deployment config. + device_id (int): An integer represents device index. + """ + + def __init__(self, backend: Backend, backend_files: Sequence[str], + device: str, class_names: Sequence[str], + model_cfg: Union[str, mmcv.Config], + deploy_cfg: Union[str, mmcv.Config], **kwargs): + super().__init__(backend, backend_files, device, class_names, + deploy_cfg, **kwargs) + # load cfg if necessary + model_cfg = load_config(model_cfg)[0] + self.model_cfg = model_cfg + + def _init_wrapper(self, backend, backend_files, device): + self.wrapper = BaseBackendModel._build_wrapper( + backend=backend, + backend_files=backend_files, + device=device, + output_names=['scores', 'boxes']) + + def partition0_postprocess(self, scores: torch.Tensor, + bboxes: torch.Tensor): + """Perform post-processing for partition 0. + + Args: + scores (Tensor): The detection scores of shape + [N, num_boxes, num_classes]. + bboxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. + + Returns: + tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] and + class labels of shape [N, num_det]. + """ + cfg = self.model_cfg.model.test_cfg + deploy_cfg = self.deploy_cfg + + post_params = get_post_processing_params(deploy_cfg) + max_output_boxes_per_class = post_params.max_output_boxes_per_class + iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) + score_threshold = cfg.get('score_thr', post_params.score_threshold) + pre_top_k = -1 if post_params.pre_top_k >= bboxes.shape[1] \ + else post_params.pre_top_k + keep_top_k = cfg.get('max_per_img', post_params.keep_top_k) + ret = multiclass_nms( + bboxes, + scores, + max_output_boxes_per_class, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + pre_top_k=pre_top_k, + keep_top_k=keep_top_k) + ret = [r.cpu() for r in ret] + return ret + + def forward_test(self, imgs: torch.Tensor, *args, **kwargs): + """Implement forward test. + + Args: + imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. + + Returns: + list[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] and + class labels of shape [N, num_det]. + """ + outputs = self.wrapper({'input': imgs}) + outputs = self.wrapper.output_to_list(outputs) + scores, bboxes = outputs[:2] + return self.partition0_postprocess(scores, bboxes) + + +@__BACKEND_MODEl.register_module('two_stage') +class PartitionTwoStageModel(End2EndModel): + """Partitioned two stage detection model. + + Args: + class_names (Sequence[str]): A list of string specifying class names. + model_cfg: (str | mmcv.Config): Input model config. + deploy_cfg: (str | mmcv.Config): Input deployment config. + device_id (int): An integer represents device index. + """ + + def __init__(self, backend: Backend, backend_files: Sequence[str], + device: str, class_names: Sequence[str], + model_cfg: Union[str, mmcv.Config], + deploy_cfg: Union[str, mmcv.Config], **kwargs): + + # load cfg if necessary + model_cfg = load_config(model_cfg)[0] + + self.model_cfg = model_cfg + + super().__init__(backend, backend_files, device, class_names, + deploy_cfg, **kwargs) + from mmdet.models.builder import build_head, build_roi_extractor + from ..models.roi_heads.bbox_head import bbox_head__get_bboxes + + self.bbox_roi_extractor = build_roi_extractor( + model_cfg.model.roi_head.bbox_roi_extractor) + self.bbox_head = build_head(model_cfg.model.roi_head.bbox_head) + + class Context: + pass + + ctx = Context() + ctx.cfg = self.deploy_cfg + self.bbox_head__get_bboxes = partial(bbox_head__get_bboxes, ctx) + + def _init_wrapper(self, backend, backend_files, device): + n = get_backend_file_count(backend) + num_feat = self.model_cfg['model']['neck']['num_outs'] + partition0_output_names = [ + 'feat/{}'.format(i) for i in range(num_feat) + ] + ['scores', 'boxes'] + + self.first_wrapper = BaseBackendModel._build_wrapper( + backend, backend_files[0:n], device, partition0_output_names) + + self.second_wrapper = BaseBackendModel._build_wrapper( + backend, backend_files[n:2 * n], device, + ['cls_score', 'bbox_pred']) + + def partition0_postprocess(self, x: Sequence[torch.Tensor], + scores: torch.Tensor, bboxes: torch.Tensor): + """Perform post-processing for partition 0. + + Args: + x (tuple[Tensor]): Feature maps of all scale levels. + scores (Tensor): The detection scores of shape + [N, num_boxes, num_classes]. + bboxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. + + Returns: + tuple(Tensor, Tensor): rois and bbox_feats. + """ + # rpn-nms + roi-extractor + cfg = self.model_cfg.model.test_cfg.rpn + deploy_cfg = self.deploy_cfg + + post_params = get_post_processing_params(deploy_cfg) + iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) + score_threshold = cfg.get('score_thr', post_params.score_threshold) + pre_top_k = -1 if post_params.pre_top_k >= bboxes.shape[1] \ + else post_params.pre_top_k + keep_top_k = cfg.get('max_per_img', post_params.keep_top_k) + # only one class in rpn + max_output_boxes_per_class = keep_top_k + proposals, _ = multiclass_nms( + bboxes, + scores, + max_output_boxes_per_class, + iou_threshold=iou_threshold, + score_threshold=score_threshold, + pre_top_k=pre_top_k, + keep_top_k=keep_top_k) + + rois = proposals + batch_index = torch.arange( + rois.shape[0], device=rois.device).float().view(-1, 1, 1).expand( + rois.size(0), rois.size(1), 1) + rois = torch.cat([batch_index, rois[..., :4]], dim=-1) + batch_size = rois.shape[0] + num_proposals_per_img = rois.shape[1] + + # Eliminate the batch dimension + rois = rois.view(-1, 5) + bbox_feats = self.bbox_roi_extractor( + x[:self.bbox_roi_extractor.num_inputs], rois) + + rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1)) + return rois, bbox_feats + + def partition1_postprocess(self, rois: torch.Tensor, + cls_score: torch.Tensor, + bbox_pred: torch.Tensor, + img_metas: Sequence[dict]): + """Perform post-processing for partition 1. + Args: + rois (torch.Tensor): Input tensor of roi. + cls_score (torch.Tensor): Scores of all classes. + bbox_pred (torch.Tensor): Bounding box proposals. + img_metas (Sequence[dict]): A list of image(s) meta information. + + Returns: + tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] and class + labels of shape [N, num_det]. + """ + batch_size = rois.shape[0] + num_proposals_per_img = rois.shape[1] + + cls_score = cls_score.reshape(batch_size, num_proposals_per_img, + cls_score.size(-1)) + + bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, + bbox_pred.size(-1)) + + rcnn_test_cfg = self.model_cfg.model.test_cfg.rcnn + return self.bbox_head__get_bboxes(self.bbox_head, rois, cls_score, + bbox_pred, + img_metas[0][0]['img_shape'], + rcnn_test_cfg) + + def forward_test(self, imgs: torch.Tensor, img_metas: Sequence[dict], + *args, **kwargs): + """Implement forward test. + + Args: + imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. + img_metas (Sequence[dict]): A list of image(s) meta information. + + Returns: + tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] and + class labels of shape [N, num_det]. + """ + outputs = self.first_wrapper({'input': imgs}) + outputs = self.first_wrapper.output_to_list(outputs) + feats = outputs[:-2] + scores, bboxes = outputs[-2:] + + # partition0_postprocess + rois, bbox_feats = self.partition0_postprocess(feats, scores, bboxes) + + # partition1 forward + bbox_feats = bbox_feats.contiguous() + outputs = self.second_wrapper({'bbox_feats': bbox_feats}) + outputs = self.second_wrapper.output_to_list(outputs) + cls_score, bbox_pred = outputs[:2] + + # partition1_postprocess + outputs = self.partition1_postprocess(rois, cls_score, bbox_pred, + img_metas) + outputs = [out.detach().cpu() for out in outputs] + return outputs + + +def get_classes_from_config(model_cfg: Union[str, mmcv.Config], **kwargs): + """Get class name from config. + + Args: + model_cfg (str | mmcv.Config): Input model config file or + Config object. + + Returns: + list[str]: A list of string specifying names of different class. + """ + # load cfg if necessary + model_cfg = load_config(model_cfg)[0] + module_dict = DATASETS.module_dict + data_cfg = model_cfg.data + + if 'test' in data_cfg: + module = module_dict[data_cfg.test.type] + elif 'val' in data_cfg: + module = module_dict[data_cfg.val.type] + elif 'train' in data_cfg: + module = module_dict[data_cfg.train.type] + else: + raise RuntimeError(f'No dataset config found in: {model_cfg}') + + return module.CLASSES + + +def build_object_detection_model(model_files: Sequence[str], + model_cfg: Union[str, mmcv.Config], + deploy_cfg: Union[str, mmcv.Config], + device: str, **kwargs): + """Build object detection model for different backends. + + Args: + model_files (Sequence[str]): Input model file(s). + model_cfg (str | mmcv.Config): Input model config file or Config + object. + deploy_cfg (str | mmcv.Config): Input deployment config file or + Config object. + device (str): Device to input model + + Returns: + DeployBaseDetector: Detector for a configured backend. + """ + # load cfg if necessary + deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) + + backend = get_backend(deploy_cfg) + class_names = get_classes_from_config(model_cfg) + + # Default Config is 'end2end' + partition_type = 'end2end' + partition_config = get_partition_config(deploy_cfg) + if partition_config is not None: + partition_type = partition_config.get('type', None) + + backend_detector = __BACKEND_MODEl.build( + partition_type, + backend=backend, + backend_files=model_files, + class_names=class_names, + device=device, + model_cfg=model_cfg, + deploy_cfg=deploy_cfg, + **kwargs) + + return backend_detector diff --git a/mmdeploy/codebase/mmdet/deploy/utils.py b/mmdeploy/codebase/mmdet/deploy/utils.py new file mode 100644 index 0000000000..d8981a3cf6 --- /dev/null +++ b/mmdeploy/codebase/mmdet/deploy/utils.py @@ -0,0 +1,99 @@ +from typing import Any, Optional, Sequence, Union + +import mmcv +import torch +from torch import Tensor + +from mmdeploy.utils import load_config + + +def get_post_processing_params(deploy_cfg: Union[str, mmcv.Config]): + """Get mmdet post-processing parameters from config. + + Args: + deploy_cfg (str | mmcv.Config): The path or content of config. + + Returns: + dict: A dict of parameters for mmdet. + """ + deploy_cfg = load_config(deploy_cfg)[0] + codebase_key = 'codebase_config' + assert codebase_key in deploy_cfg + codebase_config = deploy_cfg[codebase_key] + post_params = codebase_config.get('post_processing', None) + assert post_params is not None, 'Failed to get `post_processing`.' + return post_params + + +def clip_bboxes(x1: Tensor, y1: Tensor, x2: Tensor, y2: Tensor, + max_shape: Union[Tensor, Sequence[int]]): + """Clip bboxes for onnx. + + Since torch.clamp cannot have dynamic `min` and `max`, we scale the + boxes by 1/max_shape and clamp in the range [0, 1] if necessary. + + Args: + x1 (Tensor): The x1 for bounding boxes. + y1 (Tensor): The y1 for bounding boxes. + x2 (Tensor): The x2 for bounding boxes. + y2 (Tensor): The y2 for bounding boxes. + max_shape (Tensor | Sequence[int]): The (H,W) of original image. + Returns: + tuple(Tensor): The clipped x1, y1, x2, y2. + """ + assert len(max_shape) == 2, '`max_shape` should be [h, w]' + if isinstance(max_shape, torch.Tensor): + # scale by 1/max_shape + x1 = x1 / max_shape[1] + y1 = y1 / max_shape[0] + x2 = x2 / max_shape[1] + y2 = y2 / max_shape[0] + + # clamp [0, 1] + x1 = torch.clamp(x1, 0, 1) + y1 = torch.clamp(y1, 0, 1) + x2 = torch.clamp(x2, 0, 1) + y2 = torch.clamp(y2, 0, 1) + + # scale back + x1 = x1 * max_shape[1] + y1 = y1 * max_shape[0] + x2 = x2 * max_shape[1] + y2 = y2 * max_shape[0] + else: + x1 = torch.clamp(x1, 0, max_shape[1]) + y1 = torch.clamp(y1, 0, max_shape[0]) + x2 = torch.clamp(x2, 0, max_shape[1]) + y2 = torch.clamp(y2, 0, max_shape[0]) + return x1, y1, x2, y2 + + +def pad_with_value(x: Tensor, + pad_dim: int, + pad_size: int, + pad_value: Optional[Any] = None): + """Pad a tensor with a value along some dim. + + Args: + x (Tensor): Input tensor. + pad_dim (int): Along which dim to pad. + pad_size (int): To which size to pad. + pad_value (Any): Filled value for padding. Defaults to `None`. + + Returns: + Tensor: Padded tensor. + """ + num_dims = len(x.shape) + pad_slice = (slice(None, None, None), ) * num_dims + pad_slice = pad_slice[:pad_dim] + (slice(0, 1, + 1), ) + pad_slice[pad_dim + 1:] + repeat_size = [1] * num_dims + repeat_size[pad_dim] = pad_size + + x_pad = x.__getitem__(pad_slice) + if pad_value is not None: + x_pad = x_pad * 0 + pad_value + + x_pad = x_pad.repeat(*repeat_size) + x = torch.cat([x, x_pad], dim=pad_dim) + return x diff --git a/mmdeploy/mmdet/models/__init__.py b/mmdeploy/codebase/mmdet/models/__init__.py similarity index 100% rename from mmdeploy/mmdet/models/__init__.py rename to mmdeploy/codebase/mmdet/models/__init__.py diff --git a/mmdeploy/codebase/mmdet/models/dense_heads/__init__.py b/mmdeploy/codebase/mmdet/models/dense_heads/__init__.py new file mode 100644 index 0000000000..eb6c5937ee --- /dev/null +++ b/mmdeploy/codebase/mmdet/models/dense_heads/__init__.py @@ -0,0 +1,17 @@ +from .anchor_head import anchor_head__get_bboxes, anchor_head__get_bboxes__ncnn +from .atss_head import atss_head__get_bboxes +from .fcos_head import fcos_head__get_bboxes, fcos_head__get_bboxes__ncnn +from .fovea_head import fovea_head__get_bboxes +from .rpn_head import rpn_head__get_bboxes +from .vfnet_head import vfnet_head__get_bboxes +from .yolo_head import yolov3_head__get_bboxes, yolov3_head__get_bboxes__ncnn +from .yolox_head import yolox_head__get_bboxes + +__all__ = [ + 'anchor_head__get_bboxes', 'anchor_head__get_bboxes__ncnn', + 'atss_head__get_bboxes', 'fcos_head__get_bboxes', + 'fcos_head__get_bboxes__ncnn', 'fovea_head__get_bboxes', + 'rpn_head__get_bboxes', 'vfnet_head__get_bboxes', + 'yolov3_head__get_bboxes', 'yolov3_head__get_bboxes__ncnn', + 'yolox_head__get_bboxes' +] diff --git a/mmdeploy/mmdet/models/dense_heads/anchor_head.py b/mmdeploy/codebase/mmdet/models/dense_heads/anchor_head.py similarity index 95% rename from mmdeploy/mmdet/models/dense_heads/anchor_head.py rename to mmdeploy/codebase/mmdet/models/dense_heads/anchor_head.py index fdd33599d2..e394d4b50d 100644 --- a/mmdeploy/mmdet/models/dense_heads/anchor_head.py +++ b/mmdeploy/codebase/mmdet/models/dense_heads/anchor_head.py @@ -1,14 +1,13 @@ import torch +from mmdeploy.codebase.mmdet import (get_post_processing_params, + multiclass_nms, pad_with_value) from mmdeploy.core import FUNCTION_REWRITER -from mmdeploy.mmdet.core import multiclass_nms -from mmdeploy.mmdet.export import pad_with_value -from mmdeploy.utils import (Backend, get_backend, get_mmdet_params, - is_dynamic_shape) +from mmdeploy.utils import Backend, get_backend, is_dynamic_shape @FUNCTION_REWRITER.register_rewriter( - func_name='mmdet.models.AnchorHead.get_bboxes') + func_name='mmdet.models.dense_heads.AnchorHead.get_bboxes') def anchor_head__get_bboxes(ctx, self, cls_scores, @@ -129,7 +128,7 @@ def anchor_head__get_bboxes(ctx, if not with_nms: return batch_mlvl_bboxes, batch_mlvl_scores - post_params = get_mmdet_params(deploy_cfg) + post_params = get_post_processing_params(deploy_cfg) max_output_boxes_per_class = post_params.max_output_boxes_per_class iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) @@ -146,7 +145,7 @@ def anchor_head__get_bboxes(ctx, @FUNCTION_REWRITER.register_rewriter( - func_name='mmdet.models.AnchorHead.get_bboxes', backend='ncnn') + func_name='mmdet.models.dense_heads.AnchorHead.get_bboxes', backend='ncnn') def anchor_head__get_bboxes__ncnn(ctx, self, cls_scores, @@ -257,7 +256,7 @@ def anchor_head__get_bboxes__ncnn(ctx, if not with_nms: return batch_mlvl_bboxes, batch_mlvl_scores - post_params = get_mmdet_params(deploy_cfg) + post_params = get_post_processing_params(deploy_cfg) max_output_boxes_per_class = post_params.max_output_boxes_per_class iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) diff --git a/mmdeploy/mmdet/models/dense_heads/atss_head.py b/mmdeploy/codebase/mmdet/models/dense_heads/atss_head.py similarity index 93% rename from mmdeploy/mmdet/models/dense_heads/atss_head.py rename to mmdeploy/codebase/mmdet/models/dense_heads/atss_head.py index fea597c86c..ea2d4f5a7c 100644 --- a/mmdeploy/mmdet/models/dense_heads/atss_head.py +++ b/mmdeploy/codebase/mmdet/models/dense_heads/atss_head.py @@ -1,11 +1,11 @@ import torch +from mmdeploy.codebase.mmdet import get_post_processing_params, multiclass_nms from mmdeploy.core import FUNCTION_REWRITER -from mmdeploy.mmdet.core import multiclass_nms -from mmdeploy.utils import get_mmdet_params -@FUNCTION_REWRITER.register_rewriter('mmdet.models.ATSSHead.get_bboxes') +@FUNCTION_REWRITER.register_rewriter( + 'mmdet.models.dense_heads.ATSSHead.get_bboxes') def atss_head__get_bboxes(ctx, self, cls_scores, @@ -15,7 +15,7 @@ def atss_head__get_bboxes(ctx, cfg=None, rescale=False, with_nms=True): - """Rewrite `get_bboxes` from ATSSHead for default backend. + """Rewrite `get_bboxes` of `ATSSHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. @@ -96,7 +96,7 @@ def atss_head__get_bboxes(ctx, batch_mlvl_scores.shape) batch_mlvl_scores = batch_mlvl_scores * batch_mlvl_centerness deploy_cfg = ctx.cfg - post_params = get_mmdet_params(deploy_cfg) + post_params = get_post_processing_params(deploy_cfg) max_output_boxes_per_class = post_params.max_output_boxes_per_class iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) diff --git a/mmdeploy/mmdet/models/dense_heads/fcos_head.py b/mmdeploy/codebase/mmdet/models/dense_heads/fcos_head.py similarity index 94% rename from mmdeploy/mmdet/models/dense_heads/fcos_head.py rename to mmdeploy/codebase/mmdet/models/dense_heads/fcos_head.py index c6584c8369..6f27e0df0e 100644 --- a/mmdeploy/mmdet/models/dense_heads/fcos_head.py +++ b/mmdeploy/codebase/mmdet/models/dense_heads/fcos_head.py @@ -1,14 +1,13 @@ import torch +from mmdeploy.codebase.mmdet import (distance2bbox, get_post_processing_params, + multiclass_nms, pad_with_value) from mmdeploy.core import FUNCTION_REWRITER -from mmdeploy.mmdet.core import distance2bbox, multiclass_nms -from mmdeploy.mmdet.export import pad_with_value -from mmdeploy.utils import (Backend, get_backend, get_mmdet_params, - is_dynamic_shape) +from mmdeploy.utils import Backend, get_backend, is_dynamic_shape @FUNCTION_REWRITER.register_rewriter( - func_name='mmdet.models.FCOSHead.get_bboxes') + func_name='mmdet.models.dense_heads.FCOSHead.get_bboxes') def fcos_head__get_bboxes(ctx, self, cls_scores, @@ -18,7 +17,7 @@ def fcos_head__get_bboxes(ctx, with_nms=True, cfg=None, **kwargs): - """Rewrite `get_bboxes` of FCOSHead for default backend. + """Rewrite `get_bboxes` of `FCOSHead` for default backend. Rewrite this function to support deployment of default backend and dynamic shape export. Transform network output for a batch into @@ -130,7 +129,7 @@ def fcos_head__get_bboxes(ctx, return batch_mlvl_bboxes, batch_mlvl_scores, batch_mlvl_centerness batch_mlvl_scores = batch_mlvl_scores * batch_mlvl_centerness - post_params = get_mmdet_params(deploy_cfg) + post_params = get_post_processing_params(deploy_cfg) max_output_boxes_per_class = post_params.max_output_boxes_per_class iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) @@ -142,7 +141,7 @@ def fcos_head__get_bboxes(ctx, @FUNCTION_REWRITER.register_rewriter( - func_name='mmdet.models.FCOSHead.get_bboxes', backend='ncnn') + func_name='mmdet.models.dense_heads.FCOSHead.get_bboxes', backend='ncnn') def fcos_head__get_bboxes__ncnn(ctx, self, cls_scores, @@ -152,7 +151,7 @@ def fcos_head__get_bboxes__ncnn(ctx, with_nms=True, cfg=None, **kwargs): - """Rewrite `get_bboxes` of FCOSHead for ncnn backend. + """Rewrite `get_bboxes` of `FCOSHead` for ncnn backend. 1. Shape node and batch inference is not supported by ncnn. This function transform dynamic shape to constant shape and remove batch inference. @@ -257,7 +256,7 @@ def fcos_head__get_bboxes__ncnn(ctx, batch_mlvl_scores = (_batch_mlvl_scores * _batch_mlvl_centerness). \ reshape(batch_mlvl_scores.shape) batch_mlvl_bboxes = batch_mlvl_bboxes.reshape(batch_size, -1, 4) - post_params = get_mmdet_params(deploy_cfg) + post_params = get_post_processing_params(deploy_cfg) max_output_boxes_per_class = post_params.max_output_boxes_per_class iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) diff --git a/mmdeploy/mmdet/models/dense_heads/fovea_head.py b/mmdeploy/codebase/mmdet/models/dense_heads/fovea_head.py similarity index 92% rename from mmdeploy/mmdet/models/dense_heads/fovea_head.py rename to mmdeploy/codebase/mmdet/models/dense_heads/fovea_head.py index ea1d8c16ce..81f4add4c6 100644 --- a/mmdeploy/mmdet/models/dense_heads/fovea_head.py +++ b/mmdeploy/codebase/mmdet/models/dense_heads/fovea_head.py @@ -1,11 +1,11 @@ import torch +from mmdeploy.codebase.mmdet import get_post_processing_params, multiclass_nms from mmdeploy.core import FUNCTION_REWRITER -from mmdeploy.mmdet.core import multiclass_nms -from mmdeploy.utils import get_mmdet_params -@FUNCTION_REWRITER.register_rewriter('mmdet.models.FoveaHead.get_bboxes') +@FUNCTION_REWRITER.register_rewriter( + 'mmdet.models.dense_heads.FoveaHead.get_bboxes') def fovea_head__get_bboxes(ctx, self, cls_scores, @@ -13,7 +13,7 @@ def fovea_head__get_bboxes(ctx, img_metas, cfg=None, rescale=None): - """Rewrite `get_bboxes` from FoveaHead for default backend. + """Rewrite `get_bboxes` of `FoveaHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. @@ -78,7 +78,7 @@ def fovea_head__get_bboxes(ctx, det_scores = torch.cat(det_scores, dim=1) deploy_cfg = ctx.cfg - post_params = get_mmdet_params(deploy_cfg) + post_params = get_post_processing_params(deploy_cfg) max_output_boxes_per_class = post_params.max_output_boxes_per_class iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) diff --git a/mmdeploy/mmdet/models/dense_heads/rpn_head.py b/mmdeploy/codebase/mmdet/models/dense_heads/rpn_head.py similarity index 94% rename from mmdeploy/mmdet/models/dense_heads/rpn_head.py rename to mmdeploy/codebase/mmdet/models/dense_heads/rpn_head.py index 5604e83b90..1921d9a5fd 100644 --- a/mmdeploy/mmdet/models/dense_heads/rpn_head.py +++ b/mmdeploy/codebase/mmdet/models/dense_heads/rpn_head.py @@ -1,13 +1,13 @@ import torch +from mmdeploy.codebase.mmdet import (get_post_processing_params, + multiclass_nms, pad_with_value) from mmdeploy.core import FUNCTION_REWRITER -from mmdeploy.mmdet.core import multiclass_nms -from mmdeploy.mmdet.export import pad_with_value -from mmdeploy.utils import (Backend, get_backend, get_mmdet_params, - is_dynamic_shape) +from mmdeploy.utils import Backend, get_backend, is_dynamic_shape -@FUNCTION_REWRITER.register_rewriter('mmdet.models.RPNHead.get_bboxes') +@FUNCTION_REWRITER.register_rewriter( + 'mmdet.models.dense_heads.RPNHead.get_bboxes') def rpn_head__get_bboxes(ctx, self, cls_scores, @@ -16,7 +16,7 @@ def rpn_head__get_bboxes(ctx, with_nms=True, cfg=None, **kwargs): - """Rewrite `get_bboxes` of RPNHead for default backend. + """Rewrite `get_bboxes` of `RPNHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. @@ -120,7 +120,7 @@ def rpn_head__get_bboxes(ctx, if not with_nms: return batch_mlvl_bboxes, batch_mlvl_scores - post_params = get_mmdet_params(deploy_cfg) + post_params = get_post_processing_params(deploy_cfg) iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) pre_top_k = post_params.pre_top_k @@ -138,7 +138,7 @@ def rpn_head__get_bboxes(ctx, @FUNCTION_REWRITER.register_rewriter( - 'mmdet.models.RPNHead.get_bboxes', backend='ncnn') + 'mmdet.models.dense_heads.RPNHead.get_bboxes', backend='ncnn') def rpn_head__get_bboxes__ncnn(ctx, self, cls_scores, @@ -147,7 +147,7 @@ def rpn_head__get_bboxes__ncnn(ctx, with_nms=True, cfg=None, **kwargs): - """Rewrite `get_bboxes` of RPNHead for ncnn backend. + """Rewrite `get_bboxes` of `RPNHead` for NCNN backend. Shape node and batch inference is not supported by ncnn. This function transform dynamic shape to constant shape and remove batch inference. @@ -242,7 +242,7 @@ def rpn_head__get_bboxes__ncnn(ctx, if not with_nms: return batch_mlvl_bboxes, batch_mlvl_scores - post_params = get_mmdet_params(deploy_cfg) + post_params = get_post_processing_params(deploy_cfg) iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) pre_top_k = post_params.pre_top_k diff --git a/mmdeploy/mmdet/models/dense_heads/vfnet_head.py b/mmdeploy/codebase/mmdet/models/dense_heads/vfnet_head.py similarity index 93% rename from mmdeploy/mmdet/models/dense_heads/vfnet_head.py rename to mmdeploy/codebase/mmdet/models/dense_heads/vfnet_head.py index 8816231d62..2378727608 100644 --- a/mmdeploy/mmdet/models/dense_heads/vfnet_head.py +++ b/mmdeploy/codebase/mmdet/models/dense_heads/vfnet_head.py @@ -1,12 +1,12 @@ import torch +from mmdeploy.codebase.mmdet import (distance2bbox, get_post_processing_params, + multiclass_nms) from mmdeploy.core import FUNCTION_REWRITER -from mmdeploy.mmdet.core import distance2bbox, multiclass_nms -from mmdeploy.utils import get_mmdet_params @FUNCTION_REWRITER.register_rewriter( - func_name='mmdet.models.VFNetHead.get_bboxes') + 'mmdet.models.dense_heads.VFNetHead.get_bboxes') def vfnet_head__get_bboxes(ctx, self, cls_scores, @@ -16,7 +16,7 @@ def vfnet_head__get_bboxes(ctx, cfg=None, rescale=None, with_nms=True): - """Rewrite `get_bboxes` of VFNetHead for default backend. + """Rewrite `get_bboxes` of `VFNetHead` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. @@ -102,7 +102,7 @@ def vfnet_head__get_bboxes(ctx, return batch_mlvl_bboxes, batch_mlvl_scores deploy_cfg = ctx.cfg - post_params = get_mmdet_params(deploy_cfg) + post_params = get_post_processing_params(deploy_cfg) max_output_boxes_per_class = post_params.max_output_boxes_per_class iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) score_threshold = cfg.get('score_thr', post_params.score_threshold) diff --git a/mmdeploy/mmdet/models/dense_heads/yolo_head.py b/mmdeploy/codebase/mmdet/models/dense_heads/yolo_head.py similarity index 96% rename from mmdeploy/mmdet/models/dense_heads/yolo_head.py rename to mmdeploy/codebase/mmdet/models/dense_heads/yolo_head.py index 3983fa5828..24869be2b1 100644 --- a/mmdeploy/mmdet/models/dense_heads/yolo_head.py +++ b/mmdeploy/codebase/mmdet/models/dense_heads/yolo_head.py @@ -1,21 +1,20 @@ import torch +from mmdeploy.codebase.mmdet import (get_post_processing_params, + multiclass_nms, pad_with_value) from mmdeploy.core import FUNCTION_REWRITER -from mmdeploy.mmdet.core import multiclass_nms -from mmdeploy.mmdet.export import pad_with_value -from mmdeploy.utils import (Backend, get_backend, get_mmdet_params, - is_dynamic_shape) +from mmdeploy.utils import Backend, get_backend, is_dynamic_shape @FUNCTION_REWRITER.register_rewriter( - func_name='mmdet.models.YOLOV3Head.get_bboxes') + func_name='mmdet.models.dense_heads.YOLOV3Head.get_bboxes') def yolov3_head__get_bboxes(ctx, self, pred_maps, with_nms=True, cfg=None, **kwargs): - """Rewrite `get_bboxes` of YOLOV3Head for default backend. + """Rewrite `get_bboxes` of `YOLOV3Head` for default backend. Rewrite this function to deploy model, transform network output for a batch into bbox predictions. @@ -83,7 +82,6 @@ def yolov3_head__get_bboxes(ctx, conf_pred = torch.sigmoid(pred_map[..., 4]) cls_pred = torch.sigmoid(pred_map[..., 5:]).view( batch_size, -1, self.num_classes) # Cls pred one-hot. - backend = get_backend(ctx.cfg) # topk in tensorrt does not support shape BaseTask: + task = get_task_type(deploy_cfg) + return registry.module_dict[task.value](model_cfg, deploy_cfg, device) + + +MMEDIT_TASK = Registry('mmedit_tasks', build_func=__build_mmedit_task) + + +@CODEBASE.register_module(Codebase.MMEDIT.value) +class MMEditing(MMCodebase): + """mmediting codebase class.""" + + task_registry = MMEDIT_TASK + + def __init__(self): + super().__init__() + + @staticmethod + def build_task_processor(model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str) -> BaseTask: + """The interface to build the task processors of mmedit. + + Args: + model_cfg (mmcv.Config): Model config file. + deploy_cfg (mmcv.Config): Deployment config file. + device (str): A string specifying device type. + + Returns: + BaseTask: A task processor. + """ + return MMEDIT_TASK.build(model_cfg, deploy_cfg, device) + + @staticmethod + def build_dataset(dataset_cfg: Union[str, mmcv.Config], *args, + **kwargs) -> Dataset: + """Build dataset for processor. + + Args: + dataset_cfg (str | mmcv.Config): The input dataset config. + + Returns: + Dataset: A PyTorch dataset. + """ + from mmedit.datasets import build_dataset as build_dataset_mmedit + from mmdeploy.utils import load_config + dataset_cfg = load_config(dataset_cfg)[0] + data = dataset_cfg.data + + dataset = build_dataset_mmedit(data.test) + return dataset + + @staticmethod + def build_dataloader(dataset: Dataset, + samples_per_gpu: int, + workers_per_gpu: int, + num_gpus: int = 1, + dist: bool = False, + shuffle: bool = False, + seed: Optional[int] = None, + drop_last: bool = False, + pin_memory: bool = True, + persistent_workers: bool = True, + **kwargs) -> DataLoader: + """Build PyTorch DataLoader. + + In distributed training, each GPU/process has a dataloader. + In non-distributed training, there is only one dataloader for all GPUs. + + Args: + dataset (:obj:`Dataset`): A PyTorch dataset. + samples_per_gpu (int): Number of samples on each GPU, i.e., + batch size of each GPU. + workers_per_gpu (int): How many subprocesses to use for data + loading for each GPU. + num_gpus (int): Number of GPUs. Only used in non-distributed + training. Default: 1. + dist (bool): Distributed training/test or not. Default: True. + shuffle (bool): Whether to shuffle the data at every epoch. + Default: True. + seed (int | None): Seed to be used. Default: None. + drop_last (bool): Whether to drop the last incomplete batch + in epoch. Default: False. + pin_memory (bool): Whether to use pin_memory in DataLoader. + Default: True. + persistent_workers (bool): If True, the data loader will not + shutdown the worker processes after a dataset has been + consumed once. + This allows to maintain the workers Dataset instances alive. + The argument also has effect in PyTorch>=1.7.0. + Default: True. + kwargs (dict, optional): Any keyword argument to be used to + initialize DataLoader. + + Returns: + DataLoader: A PyTorch dataloader. + """ + from mmedit.datasets import build_dataloader as build_dataloader_mmedit + return build_dataloader_mmedit(dataset, samples_per_gpu, + workers_per_gpu, num_gpus, dist, + shuffle, seed, drop_last, pin_memory, + persistent_workers, **kwargs) + + @staticmethod + def single_gpu_test(model: torch.nn.Module, + data_loader: DataLoader, + save_image: bool = False, + save_path: Optional[str] = None, + iteration: int = None) -> list: + """Run test with single gpu. + + Args: + model (torch.nn.Module): Input model from nn.Module. + data_loader (DataLoader): PyTorch data loader. + save_image (bool): Whether save image. Default: False. + save_path (str): The path to save image. Default: None. + iteration (int): Iteration number. It is used for the save + image name. Default: None. + + Returns: + list: The prediction results. + """ + from mmedit.apis import single_gpu_test + outputs = single_gpu_test(model, data_loader, save_image, save_path, + iteration) + return outputs diff --git a/mmdeploy/codebase/mmedit/deploy/super_resolution.py b/mmdeploy/codebase/mmedit/deploy/super_resolution.py new file mode 100644 index 0000000000..2a7cfe1d70 --- /dev/null +++ b/mmdeploy/codebase/mmedit/deploy/super_resolution.py @@ -0,0 +1,280 @@ +import logging +import warnings +from typing import Any, Dict, Optional, Sequence, Tuple, Union + +import mmcv +import numpy as np +import torch +from mmcv.parallel import collate, scatter +from torch.utils.data import Dataset + +from mmdeploy.codebase.base import BaseTask +from mmdeploy.codebase.mmedit.deploy.mmediting import MMEDIT_TASK +from mmdeploy.utils import Task, load_config + + +@MMEDIT_TASK.register_module(Task.SUPER_RESOLUTION.value) +class SuperResolution(BaseTask): + """BaseTask class of super resolution task. + + Args: + model_cfg (mmcv.Config): Model config file. + deploy_cfg (mmcv.Config): Deployment config file. + device (str): A string specifying device type. + """ + + def __init__(self, model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str): + super().__init__(model_cfg, deploy_cfg, device) + + def init_backend_model(self, + model_files: Sequence[str] = None, + **kwargs) -> torch.nn.Module: + """Initialize backend model. + + Args: + model_files (Sequence[str]): Input model files. Default is None. + + Returns: + nn.Module: An initialized backend model. + """ + from .super_resolution_model import build_super_resolution_model + model = build_super_resolution_model( + model_files, self.model_cfg, self.deploy_cfg, device=self.device) + return model + + def init_pytorch_model(self, + model_checkpoint: Optional[str] = None, + **kwargs) -> torch.nn.Module: + """Initialize torch model. + + Args: + model_checkpoint (str): The checkpoint file of torch model, + defaults to `None`. + + Returns: + nn.Module: An initialized torch model generated by other OpenMMLab + codebases. + """ + from mmedit.apis import init_model + model = init_model(self.model_cfg, model_checkpoint, self.device) + model.forward = model.forward_dummy + return model.eval() + + def create_input(self, + imgs: Union[str, np.ndarray], + input_shape: Optional[Sequence[int]] = None, + **kwargs) -> Tuple[Dict, torch.Tensor]: + """Create input for editing processor. + + Args: + imgs (str | np.ndarray): Input image(s). + input_shape (Sequence[int] | None): A list of two integer in + (width, height) format specifying input shape. Defaults to `None`. + + Returns: + tuple: (data, img), meta information for the input image and input. + """ + from mmedit.datasets.pipelines import Compose + + if isinstance(imgs, (list, tuple)): + if not isinstance(imgs[0], (np.ndarray, str)): + raise AssertionError('imgs must be strings or numpy arrays') + elif isinstance(imgs, (np.ndarray, str)): + imgs = [imgs] + else: + raise AssertionError('imgs must be strings or numpy arrays') + + cfg = load_config(self.model_cfg)[0].copy() + + self._preprocess_cfg( + cfg, + load_from_file=isinstance(imgs[0], str), + is_static_cfg=input_shape is not None, + input_shape=input_shape) + + test_pipeline = Compose(cfg.test_pipeline) + + data_arr = [] + for img in imgs: + if isinstance(img, np.ndarray): + data = dict(lq=img) + else: + data = dict(lq_path=img) + + data = test_pipeline(data) + data_arr.append(data) + + data = collate(data_arr, samples_per_gpu=len(imgs)) + + data['img'] = data['lq'] + + if self.device != 'cpu': + data = scatter(data, [self.device])[0] + + return data, data['img'] + + def visualize(self, + model: torch.nn.Module, + image: Union[str, np.ndarray], + result: Union[list, np.ndarray], + output_file: str, + window_name: str = '', + show_result: bool = False, + **kwargs) -> np.ndarray: + """Visualize result of a model. + + Args: + model (nn.Module): Input model. + image (str | np.ndarray): Input image to draw predictions on. + result (list | np.ndarray): A list of result. + output_file (str): Output file to save drawn image. + window_name (str): The name of visualization window. Defaults to + an empty string. + show_result (bool): Whether to show result in windows, defaults + to `False`. + """ + if len(result.shape) == 4: + result = result[0] + + with torch.no_grad(): + result = result.transpose(1, 2, 0) + result = np.clip(result, 0, 1)[:, :, ::-1] + result = (result * 255.0).round() + + output_file = None if show_result else output_file + + if show_result: + int_result = result.astype(np.uint8) + mmcv.imshow(int_result, window_name, 0) + if output_file is not None: + mmcv.imwrite(result, output_file) + + if not (show_result or output_file): + warnings.warn( + 'show_result==False and output_file is not specified, only ' + 'result image will be returned') + return result + + @staticmethod + def run_inference(model: torch.nn.Module, + model_inputs: Dict[str, torch.Tensor]) -> list: + """Run inference once for a super resolution model of mmedit. + + Args: + model (nn.Module): Input model. + model_inputs (dict): A dict containing model inputs tensor and + meta info. + + Returns: + list: The predictions of model inference. + """ + result = model(model_inputs['lq']) + if not isinstance(result[0], np.ndarray): + result = [result[0].detach().cpu().numpy()] + return result + + @staticmethod + def get_partition_cfg(partition_type: str, **kwargs) -> Dict: + """Get a certain partition config for mmedit. + + Args: + partition_type (str): A string specifying partition type. + + Returns: + dict: A dictionary of partition config. + """ + raise NotImplementedError + + @staticmethod + def get_tensor_from_input(input_data: Dict[str, Any]) -> torch.Tensor: + """Get input tensor from input data. + + Args: + input_data (dict): Input data containing meta info + and image tensor. + Returns: + torch.Tensor: An image in `Tensor`. + """ + return input_data['lq'] + + @staticmethod + def evaluate_outputs(model_cfg, + outputs: list, + dataset: Dataset, + metrics: Optional[str] = None, + out: Optional[str] = None, + metric_options: Optional[dict] = None, + format_only: bool = False, + **kwargs) -> None: + """Evaluation function implemented in mmedit. + + Args: + model_cfg (mmcv.Config): The model config. + outputs (list): A list of result of model inference. + dataset (Dataset): Input dataset to run test. + metrics (str): Evaluation metrics, which depends on + the codebase and the dataset, e.g., "PSNR", "SSIM" in mmedit. + out (str): Output result file in pickle format, defaults to `None`. + metric_options (dict): Custom options for evaluation, will be + kwargs for dataset.evaluate() function. Defaults to `None`. + format_only (bool): Format the output results without perform + evaluation. It is useful when you want to format the result + to a specific format and submit it to the test server. Defaults + to `False`. + """ + if out: + logging.info(f'\nwriting results to {out}') + mmcv.dump(outputs, out) + # The Dataset doesn't need metrics + print('\n') + # print metrics + stats = dataset.evaluate(outputs) + for stat in stats: + print('Eval-{}: {}'.format(stat, stats[stat])) + + def _preprocess_cfg(self, config: mmcv.Config, load_from_file: bool, + is_static_cfg: bool, + input_shape: Sequence[int]) -> None: + """Remove unnecessary information in config. + + Args: + config (mmcv.Config): The input model config. + load_from_file (bool): Whether the input is a filename of a numpy + matrix. If this variable is True, extra preprocessing is + required. + is_static_cfg (bool): Whether the config specifys a static export. + If this variable if True, the input image will be resize to a + fix resolution. + input_shape (Sequence[int]): A list of two integer in + (width, height) format specifying input shape. + Defaults to `None`. + """ + keys_to_remove = ['gt', 'gt_path'] + # MMEdit doesn't support LoadImageFromWebcam. + # Remove "LoadImageFromFile" and related metakeys. + if not load_from_file: + config.test_pipeline.pop(0) + keys_to_remove.append('lq_path') + + # Fix the input shape by 'Resize' + if is_static_cfg: + resize = { + 'type': 'Resize', + 'scale': (input_shape[0], input_shape[1]), + 'keys': ['lq'] + } + config.test_pipeline.insert(1, resize) + + for key in keys_to_remove: + for pipeline in list(config.test_pipeline): + if 'key' in pipeline and key == pipeline['key']: + config.test_pipeline.remove(pipeline) + if 'keys' in pipeline: + while key in pipeline['keys']: + pipeline['keys'].remove(key) + if len(pipeline['keys']) == 0: + config.test_pipeline.remove(pipeline) + if 'meta_keys' in pipeline: + while key in pipeline['meta_keys']: + pipeline['meta_keys'].remove(key) diff --git a/mmdeploy/codebase/mmedit/deploy/super_resolution_model.py b/mmdeploy/codebase/mmedit/deploy/super_resolution_model.py new file mode 100644 index 0000000000..d8db5e143f --- /dev/null +++ b/mmdeploy/codebase/mmedit/deploy/super_resolution_model.py @@ -0,0 +1,181 @@ +from typing import List, Optional, Sequence, Union + +import mmcv +import numpy as np +import torch +from mmedit.core import psnr, ssim, tensor2img + +from mmdeploy.codebase.base import BaseBackendModel +from mmdeploy.utils import Backend, get_backend, get_onnx_config, load_config + + +class End2EndModel(BaseBackendModel): + """End to end model for inference of super resolution. + + Args: + backend (Backend): The backend enum, specifying backend type. + backend_files (Sequence[str]): Paths to all required backend files(e.g. + '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn). + device (str): A string represents device type. + model_cfg(mmcv.Config): Input model config object. + deploy_cfg(str | mmcv.Config):Deployment config file or loaded Config + object. + """ + + def __init__(self, + backend: Backend, + backend_files: Sequence[str], + device: str, + model_cfg: mmcv.Config, + deploy_cfg: Union[str, mmcv.Config] = None): + super().__init__() + self.deploy_cfg = deploy_cfg + self.test_cfg = model_cfg.test_cfg + self.allowed_metrics = {'PSNR': psnr, 'SSIM': ssim} + self._init_wrapper( + backend=backend, backend_files=backend_files, device=device) + + def _init_wrapper(self, backend: Backend, backend_files: Sequence[str], + device: str): + onnx_config = get_onnx_config(self.deploy_cfg) + output_names = onnx_config['output_names'] + self.wrapper = BaseBackendModel._build_wrapper( + backend=backend, + backend_files=backend_files, + device=device, + output_names=output_names) + + def forward(self, + lq: torch.Tensor, + test_mode: bool = False, + *args, + **kwargs) -> Union[list, dict]: + """Run test inference for restorer. + + We want forward() to output an image or a evaluation result. + When test_mode is set, the output is evaluation result. Otherwise + it is an image. + + Args: + lq (torch.Tensor): The input low-quality image of the model. + test_mode (bool): When test_mode is set, the output is evaluation + result. Otherwise it is an image. Default to `False`. + *args: Other arguments. + **kwargs: Other key-pair arguments. + + Returns: + list | dict: High resolution image or a evaluation results. + """ + + if test_mode: + return self.forward_test(lq, *args, **kwargs) + else: + return self.forward_dummy(lq, *args, **kwargs) + + def forward_test(self, + lq: torch.Tensor, + gt: Optional[torch.Tensor] = None, + *args, + **kwargs): + """Run inference for restorer to generate evaluation result. + + Args: + lq (torch.Tensor): The input low-quality image of the model. + gt (torch.Tensor): The ground truth of input image. Defaults to + `None`. + *args: Other arguments. + **kwargs: Other key-pair arguments. + + Returns: + dict: Evaluation results. + """ + outputs = self.forward_dummy(lq) + result = self.test_post_process(outputs, lq, gt) + return result + + def forward_dummy(self, lq: torch.Tensor, *args, **kwargs): + """Run test inference for restorer with backend wrapper. + + Args: + lq (torch.Tensor): The input low-quality image of the model. + + Returns: + list[np.ndarray] : High resolution image. + """ + outputs = self.wrapper({'input': lq}) + outputs = self.wrapper.output_to_list(outputs) + outputs = [out.detach().cpu().numpy() for out in outputs] + return outputs + + def evaluate(self, output: Union[torch.Tensor, np.ndarray], + gt: torch.Tensor): + """Evaluation function implemented in mmedit. + + Args: + output (torch.Tensor | np.ndarray): Model output with + shape (n, c, h, w). + gt (torch.Tensor): GT Tensor with shape (n, c, h, w). + + Returns: + dict: Evaluation results. + """ + crop_border = self.test_cfg.crop_border + + if isinstance(output, np.ndarray): + output = torch.from_numpy(output) + output = tensor2img(output) + gt = tensor2img(gt) + + eval_result = dict() + for metric in self.test_cfg.metrics: + eval_result[metric] = self.allowed_metrics[metric](output, gt, + crop_border) + return eval_result + + def test_post_process(self, + outputs: List[np.ndarray], + lq: torch.Tensor, + gt: Optional[torch.Tensor] = None): + """Get evaluation results by post-processing model outputs. + + Args: + output (list[np.ndarray]) : The output high resolution image. + lq (torch.Tensor): The input low-quality image of the model. + gt (torch.Tensor): The ground truth of input image, default is + `None`. + + Returns: + dict: Evaluation results. + """ + if self.test_cfg is not None and self.test_cfg.get('metrics', None): + assert gt is not None, ( + 'evaluation with metrics must have gt images.') + results = dict(eval_result=self.evaluate(outputs[0], gt)) + else: + results = dict(lq=lq.cpu(), output=outputs) + if gt is not None: + results['gt'] = gt.cpu() + + return results + + def show_result(self, *args, **kwargs): + raise NotImplementedError + + +def build_super_resolution_model(model_files: Sequence[str], + model_cfg: Union[str, mmcv.Config], + deploy_cfg: Union[str, + mmcv.Config], device: str): + model_cfg = load_config(model_cfg)[0] + deploy_cfg = load_config(deploy_cfg)[0] + + backend = get_backend(deploy_cfg) + + backend_model = End2EndModel( + backend=backend, + backend_files=model_files, + device=device, + model_cfg=model_cfg, + deploy_cfg=deploy_cfg) + + return backend_model diff --git a/mmdeploy/mmedit/models/__init__.py b/mmdeploy/codebase/mmedit/models/__init__.py similarity index 100% rename from mmdeploy/mmedit/models/__init__.py rename to mmdeploy/codebase/mmedit/models/__init__.py diff --git a/mmdeploy/mmedit/models/backbones/sr_backbones/__init__.py b/mmdeploy/codebase/mmedit/models/backbones/__init__.py similarity index 100% rename from mmdeploy/mmedit/models/backbones/sr_backbones/__init__.py rename to mmdeploy/codebase/mmedit/models/backbones/__init__.py diff --git a/mmdeploy/mmedit/models/backbones/sr_backbones/srcnn.py b/mmdeploy/codebase/mmedit/models/backbones/srcnn.py similarity index 100% rename from mmdeploy/mmedit/models/backbones/sr_backbones/srcnn.py rename to mmdeploy/codebase/mmedit/models/backbones/srcnn.py diff --git a/mmdeploy/mmedit/__init__.py b/mmdeploy/codebase/mmocr/__init__.py similarity index 50% rename from mmdeploy/mmedit/__init__.py rename to mmdeploy/codebase/mmocr/__init__.py index d2b62e1cb6..33b69c74df 100644 --- a/mmdeploy/mmedit/__init__.py +++ b/mmdeploy/codebase/mmocr/__init__.py @@ -1,2 +1,2 @@ -from .export import * # noqa: F401,F403 +from .deploy import * # noqa: F401,F403 from .models import * # noqa: F401,F403 diff --git a/mmdeploy/codebase/mmocr/deploy/__init__.py b/mmdeploy/codebase/mmocr/deploy/__init__.py new file mode 100644 index 0000000000..43b39c5ed0 --- /dev/null +++ b/mmdeploy/codebase/mmocr/deploy/__init__.py @@ -0,0 +1,5 @@ +from .mmocr import MMOCR +from .text_detection import TextDetection +from .text_recognition import TextRecognition + +__all__ = ['MMOCR', 'TextDetection', 'TextRecognition'] diff --git a/mmdeploy/codebase/mmocr/deploy/mmocr.py b/mmdeploy/codebase/mmocr/deploy/mmocr.py new file mode 100644 index 0000000000..71613217c6 --- /dev/null +++ b/mmdeploy/codebase/mmocr/deploy/mmocr.py @@ -0,0 +1,141 @@ +from typing import Optional, Union + +import mmcv +import torch +from mmcv.utils import Registry +from torch.utils.data import DataLoader, Dataset + +from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase +from mmdeploy.utils import Codebase, get_task_type +from mmdeploy.utils.config_utils import load_config + + +def __build_mmocr_task(model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str, registry: Registry) -> BaseTask: + task = get_task_type(deploy_cfg) + return registry.module_dict[task.value](model_cfg, deploy_cfg, device) + + +MMOCR_TASK = Registry('mmocr_tasks', build_func=__build_mmocr_task) + + +@CODEBASE.register_module(Codebase.MMOCR.value) +class MMOCR(MMCodebase): + """mmocr codebase class.""" + + task_registry = MMOCR_TASK + + def __init__(self): + super(MMOCR, self).__init__() + + @staticmethod + def build_task_processor(model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str): + """The interface to build the task processors of mmocr. + + Args: + model_cfg (str | mmcv.Config): Model config file or loaded Config + object. + deploy_cfg (str | mmcv.Config): Deployment config file or loaded + Config object. + device (str): A string specifying device type. + + Returns: + BaseTask: A task processor. + """ + return MMOCR_TASK.build(model_cfg, deploy_cfg, device) + + @staticmethod + def build_dataset(dataset_cfg: Union[str, mmcv.Config], + dataset_type: str = 'val', + **kwargs) -> Dataset: + """Build dataset for mmocr. + + Args: + dataset_cfg (str | mmcv.Config): The input dataset config. + dataset_type (str): A string represents dataset type, e.g.: 'train' + , 'test', 'val'. Defaults to 'val'. + + Returns: + Dataset: A PyTorch dataset. + """ + from mmocr.datasets import build_dataset as build_dataset_mmocr + + dataset_cfg = load_config(dataset_cfg)[0] + assert dataset_type in dataset_cfg.data + data_cfg = dataset_cfg.data[dataset_type] + dataset = build_dataset_mmocr(data_cfg) + return dataset + + @staticmethod + def build_dataloader(dataset: Dataset, + samples_per_gpu: int, + workers_per_gpu: int, + num_gpus: int = 1, + dist: bool = False, + shuffle: bool = False, + seed: Optional[int] = None, + drop_last: bool = False, + persistent_workers: bool = True, + **kwargs) -> DataLoader: + """Build dataloader for mmocr. + + Args: + dataset (Dataset): Input dataset. + samples_per_gpu (int): Number of training samples on each GPU, i.e. + ,batch size of each GPU. + workers_per_gpu (int): How many subprocesses to use for data + loading for each GPU. + num_gpus (int): Number of GPUs. Only used in non-distributed + training. + dist (bool): Distributed training/test or not. Defaults to `False`. + shuffle (bool): Whether to shuffle the data at every epoch. + Defaults to `False`. + seed (int): An integer set to be seed. Default is `None`. + drop_last (bool): Whether to drop the last incomplete batch in + epoch. Default to `False`. + persistent_workers (bool): If `True`, the data loader will not + shutdown the worker processes after a dataset has been + consumed once. This allows to maintain the workers Dataset + instances alive. The argument also has effect in + PyTorch>=1.7.0. Default is `True`. + kwargs: Any other keyword argument to be used to initialize + DataLoader. + + Returns: + DataLoader: A PyTorch dataloader. + """ + from mmocr.datasets import build_dataloader as build_dataloader_mmocr + return build_dataloader_mmocr( + dataset, + samples_per_gpu, + workers_per_gpu, + num_gpus=num_gpus, + dist=dist, + shuffle=shuffle, + seed=seed, + drop_last=drop_last, + persistent_workers=persistent_workers, + **kwargs) + + @staticmethod + def single_gpu_test(model: torch.nn.Module, + data_loader: DataLoader, + show: bool = False, + out_dir: Optional[str] = None, + **kwargs): + """Run test with single gpu. + + Args: + model (torch.nn.Module): Input model from nn.Module. + data_loader (DataLoader): PyTorch data loader. + show (bool): Specifying whether to show plotted results. Defaults + to `False`. + out_dir (str): A directory to save results, defaults to `None`. + + Returns: + list: The prediction results. + """ + from mmdet.apis import single_gpu_test + outputs = single_gpu_test(model, data_loader, show, out_dir, **kwargs) + return outputs diff --git a/mmdeploy/codebase/mmocr/deploy/text_detection.py b/mmdeploy/codebase/mmocr/deploy/text_detection.py new file mode 100644 index 0000000000..540426f569 --- /dev/null +++ b/mmdeploy/codebase/mmocr/deploy/text_detection.py @@ -0,0 +1,264 @@ +import logging +from typing import Any, Dict, Optional, Sequence, Tuple, Union + +import mmcv +import numpy as np +import torch +from mmcv.parallel import DataContainer, collate, scatter +from mmdet.datasets import replace_ImageToTensor +from torch import nn +from torch.utils.data import Dataset + +from mmdeploy.codebase.base import BaseTask +from mmdeploy.utils import Task +from .mmocr import MMOCR_TASK + + +@MMOCR_TASK.register_module(Task.TEXT_DETECTION.value) +class TextDetection(BaseTask): + """Text detection task class. + + Args: + model_cfg (mmcv.Config): Loaded model Config object.. + deploy_cfg (mmcv.Config): Loaded deployment Config object. + device (str): A string represents device type. + """ + + def __init__(self, model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str): + super(TextDetection, self).__init__(model_cfg, deploy_cfg, device) + + def init_backend_model(self, + model_files: Optional[str] = None, + **kwargs) -> torch.nn.Module: + """Initialize backend model. + + Args: + model_files (Sequence[str]): Input model files. + + Returns: + nn.Module: An initialized backend model. + """ + from .text_detection_model import build_text_detection_model + model = build_text_detection_model( + model_files, self.model_cfg, self.deploy_cfg, device=self.device) + return model.eval() + + def init_pytorch_model(self, + model_checkpoint: Optional[str] = None, + cfg_options: Optional[Dict] = None, + **kwargs) -> torch.nn.Module: + """Initialize torch model. + + Args: + model_checkpoint (str): The checkpoint file of torch model, + defaults to `None`. + cfg_options (dict): Optional config key-pair parameters. + + Returns: + nn.Module: An initialized torch model generated by OpenMMLab + codebases. + """ + from mmocr.apis import init_detector + model = init_detector(self.model_cfg, model_checkpoint, self.device, + cfg_options) + + return model.eval() + + def create_input(self, + imgs: Union[str, np.ndarray], + input_shape: Sequence[int] = None) \ + -> Tuple[Dict, torch.Tensor]: + """Create input for segmentor. + + Args: + imgs (str | np.ndarray): Input image(s), accepted data type are + `str`, `np.ndarray`. + input_shape (list[int]): A list of two integer in (width, height) + format specifying input shape. Defaults to `None`. + + Returns: + tuple: (data, img), meta information for the input image and input. + """ + if isinstance(imgs, (list, tuple)): + if not isinstance(imgs[0], (np.ndarray, str)): + raise AssertionError('imgs must be strings or numpy arrays') + + elif isinstance(imgs, (np.ndarray, str)): + imgs = [imgs] + else: + raise AssertionError('imgs must be strings or numpy arrays') + + if self.model_cfg.data.test['type'] == 'ConcatDataset': + self.model_cfg.data.test.pipeline = \ + self.model_cfg.data.test['datasets'][0].pipeline + + is_ndarray = isinstance(imgs[0], np.ndarray) + + if is_ndarray: + self.model_cfg.data.test.pipeline[0].type = 'LoadImageFromNdarray' + + test_pipeline = self.model_cfg.data.test.pipeline + test_pipeline = replace_ImageToTensor(test_pipeline) + # for static exporting + if input_shape is not None: + test_pipeline[1].img_scale = tuple(input_shape) + test_pipeline[1].transforms[0].keep_ratio = False + test_pipeline[1].transforms[0].img_scale = tuple(input_shape) + + from mmdet.datasets.pipelines import Compose + from mmocr.datasets import build_dataset # noqa: F401 + test_pipeline = Compose(test_pipeline) + + data_list = [] + for img in imgs: + # prepare data + if is_ndarray: + # directly add img + data = dict(img=img) + else: + # add information into dict + data = dict(img_info=dict(filename=img), img_prefix=None) + + # build the data pipeline + data = test_pipeline(data) + # get tensor from list to stack for batch mode (text detection) + data_list.append(data) + + if isinstance(data_list[0]['img'], list) and len(data_list) > 1: + raise Exception('aug test does not support ' + f'inference with batch size ' + f'{len(data_list)}') + + data = collate(data_list, samples_per_gpu=len(imgs)) + + # process img_metas + if isinstance(data['img_metas'], list): + data['img_metas'] = [ + img_metas.data[0] for img_metas in data['img_metas'] + ] + else: + data['img_metas'] = data['img_metas'].data + + if isinstance(data['img'], list): + data['img'] = [img.data for img in data['img']] + if isinstance(data['img'][0], list): + data['img'] = [img[0] for img in data['img']] + else: + data['img'] = data['img'].data + + if self.device != 'cpu': + data = scatter(data, [self.device])[0] + + return data, data['img'] + + def visualize(self, + model: nn.Module, + image: Union[str, np.ndarray], + result: list, + output_file: str, + window_name: str = '', + show_result: bool = False): + """Visualize predictions of a model. + + Args: + model (nn.Module): Input model. + image (str | np.ndarray): Input image to draw predictions on. + result (list): A list of predictions. + output_file (str): Output file to save drawn image. + window_name (str): The name of visualization window. Defaults to + an empty string. + show_result (bool): Whether to show result in windows, defaults + to `False`. + """ + show_img = mmcv.imread(image) if isinstance(image, str) else image + output_file = None if show_result else output_file + model.show_result( + show_img, + result, + out_file=output_file, + win_name=window_name, + show=show_result) + + @staticmethod + def run_inference(model: nn.Module, + model_inputs: Dict[str, torch.Tensor]) -> list: + """Run inference once for a segmentation model of mmseg. + + Args: + model (nn.Module): Input model. + model_inputs (dict): A dict containing model inputs tensor and + meta info. + + Returns: + list: The predictions of model inference. + """ + return model(**model_inputs, return_loss=False, rescale=True) + + @staticmethod + def get_partition_cfg(partition_type: str) -> Dict: + """Get a certain partition config. + + Args: + partition_type (str): A string specifying partition type. + + Returns: + dict: A dictionary of partition config. + """ + raise NotImplementedError('Not supported yet.') + + @staticmethod + def get_tensor_from_input(input_data: Dict[str, Any]) -> torch.Tensor: + """Get input tensor from input data. + + Args: + input_data (dict): Input data containing meta info and image + tensor. + Returns: + torch.Tensor: An image in `Tensor`. + """ + if isinstance(input_data['img'], DataContainer): + return input_data['img'].data[0] + return input_data['img'][0] + + @staticmethod + def evaluate_outputs(model_cfg, + outputs: Sequence, + dataset: Dataset, + metrics: Optional[str] = None, + out: Optional[str] = None, + metric_options: Optional[dict] = None, + format_only: bool = False): + """Perform post-processing to predictions of model. + + Args: + outputs (Sequence): A list of predictions of model inference. + dataset (Dataset): Input dataset to run test. + model_cfg (mmcv.Config): The model config. + metrics (str): Evaluation metrics, which depends on + the codebase and the dataset, e.g., e.g., "acc" for text + recognition, and "hmean-iou" for text detection. + out (str): Output result file in pickle format, defaults to `None`. + metric_options (dict): Custom options for evaluation, will be + kwargs for dataset.evaluate() function. Defaults to `None`. + format_only (bool): Format the output results without perform + evaluation. It is useful when you want to format the result + to a specific format and submit it to the test server. Defaults + to `False`. + """ + if out: + logging.info(f'\nwriting results to {out}') + mmcv.dump(outputs, out) + kwargs = {} if metric_options is None else metric_options + if format_only: + dataset.format_results(outputs, **kwargs) + if metrics: + eval_kwargs = model_cfg.get('evaluation', {}).copy() + # hard-code way to remove EvalHook args + for key in [ + 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', + 'rule' + ]: + eval_kwargs.pop(key, None) + eval_kwargs.update(dict(metric=metrics, **kwargs)) + print(dataset.evaluate(outputs, **eval_kwargs)) diff --git a/mmdeploy/codebase/mmocr/deploy/text_detection_model.py b/mmdeploy/codebase/mmocr/deploy/text_detection_model.py new file mode 100644 index 0000000000..812f00f3ee --- /dev/null +++ b/mmdeploy/codebase/mmocr/deploy/text_detection_model.py @@ -0,0 +1,165 @@ +from typing import List, Sequence, Union + +import mmcv +import numpy as np +import torch +from mmocr.models.builder import build_head +from mmocr.models.textdet import TextDetectorMixin + +from mmdeploy.codebase.base import BaseBackendModel +from mmdeploy.utils import Backend, get_backend, get_onnx_config, load_config + + +class End2EndModel(BaseBackendModel): + """End to end model for inference of text detection. + + Args: + backend (Backend): The backend enum, specifying backend type. + backend_files (Sequence[str]): Paths to all required backend files(e.g. + '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn). + device (str): A string represents device type. + deploy_cfg (str | mmcv.Config): Deployment config file or loaded Config + object. + model_cfg (str | mmcv.Config): Model config file or loaded Config + object. + """ + + def __init__( + self, + backend: Backend, + backend_files: Sequence[str], + device: str, + deploy_cfg: Union[str, mmcv.Config] = None, + model_cfg: Union[str, mmcv.Config] = None, + ): + super(End2EndModel, self).__init__() + model_cfg, deploy_cfg = load_config(model_cfg, deploy_cfg) + self.deploy_cfg = deploy_cfg + self.show_score = False + self.bbox_head = build_head(model_cfg.model.bbox_head) + self._init_wrapper( + backend=backend, backend_files=backend_files, device=device) + + def _init_wrapper(self, backend: Backend, backend_files: Sequence[str], + device: str): + """Initialize the wrapper of backends. + + Args: + backend (Backend): The backend enum, specifying backend type. + backend_files (Sequence[str]): Paths to all required backend files + (e.g. .onnx' for ONNX Runtime, '.param' and '.bin' for ncnn). + device (str): A string represents device type. + """ + onnx_config = get_onnx_config(self.deploy_cfg) + output_names = onnx_config['output_names'] + self.wrapper = BaseBackendModel._build_wrapper( + backend=backend, + backend_files=backend_files, + device=device, + output_names=output_names) + + def forward(self, img: Sequence[torch.Tensor], + img_metas: Sequence[Sequence[dict]], *args, **kwargs) -> list: + """Run forward inference. + + Args: + img (Sequence[torch.Tensor]): A list contains input image(s) + in [N x C x H x W] format. + img_metas (Sequence[Sequence[dict]]): A list of meta info for + image(s). + + Returns: + list: A list contains predictions. + """ + input_img = img[0].contiguous() + img_metas = img_metas[0] + outputs = self.forward_test(input_img, img_metas, *args, **kwargs) + rescale = kwargs.get('rescale', False) + if len(img_metas) > 1: + boundaries = [ + self.bbox_head.get_boundary( + *(outputs[i].unsqueeze(0)), [img_metas[i]], + rescale=rescale) for i in range(len(img_metas)) + ] + + else: + boundaries = [ + self.bbox_head.get_boundary( + *outputs, img_metas, rescale=rescale) + ] + return boundaries + + def forward_test(self, imgs: torch.Tensor, *args, **kwargs) -> \ + List[np.ndarray]: + """The interface for forward test. + + Args: + imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. + + Returns: + List[np.ndarray]: A list of predictions of input images. + """ + outputs = self.wrapper({'input': imgs}) + outputs = self.wrapper.output_to_list(outputs) + return outputs + + def show_result(self, + img: np.ndarray, + result: list, + win_name: str, + show: bool = True, + score_thr: float = 0.3, + out_file: str = None): + """Show predictions of segmentation. + Args: + img: (np.ndarray): Input image to draw predictions. + result (list): A list of predictions. + win_name (str): The name of visualization window. + show (bool): Whether to show plotted image in windows. Defaults to + `True`. + score_thr: (float): The thresh of score. Defaults to `0.3`. + out_file (str): Output image file to save drawn predictions. + + Returns: + np.ndarray: Drawn image, only if not `show` or `out_file`. + """ + return TextDetectorMixin.show_result( + self, + img, + result, + score_thr=score_thr, + show=show, + win_name=win_name, + out_file=out_file) + + +def build_text_detection_model(model_files: Sequence[str], + model_cfg: Union[str, mmcv.Config], + deploy_cfg: Union[str, mmcv.Config], + device: str, **kwargs): + """Build text detection model for different backends. + + Args: + model_files (Sequence[str]): Input model file(s). + model_cfg (str | mmcv.Config): Input model config file or Config + object. + deploy_cfg (str | mmcv.Config): Input deployment config file or + Config object. + device (str): Device to input model. + + Returns: + BaseBackendModel: Text detector for a configured backend. + """ + # load cfg if necessary + deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) + + backend = get_backend(deploy_cfg) + backend_text_detector = End2EndModel( + backend, + model_files, + device, + deploy_cfg=deploy_cfg, + model_cfg=model_cfg, + **kwargs) + + return backend_text_detector diff --git a/mmdeploy/codebase/mmocr/deploy/text_recognition.py b/mmdeploy/codebase/mmocr/deploy/text_recognition.py new file mode 100644 index 0000000000..c77a3d540a --- /dev/null +++ b/mmdeploy/codebase/mmocr/deploy/text_recognition.py @@ -0,0 +1,264 @@ +import logging +from typing import Any, Dict, Optional, Sequence, Tuple, Union + +import mmcv +import numpy as np +import torch +from mmcv.parallel import DataContainer, collate, scatter +from mmdet.datasets import replace_ImageToTensor +from torch import nn +from torch.utils.data import Dataset + +from mmdeploy.codebase.base import BaseTask +from mmdeploy.utils import Task +from .mmocr import MMOCR_TASK + + +@MMOCR_TASK.register_module(Task.TEXT_RECOGNITION.value) +class TextRecognition(BaseTask): + """Text detection task class. + + Args: + model_cfg (mmcv.Config): Original PyTorch model config file. + deploy_cfg (mmcv.Config): Loaded deployment config object. + device (str): A string represents device type. + """ + + def __init__(self, model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str): + super(TextRecognition, self).__init__(model_cfg, deploy_cfg, device) + + def init_backend_model(self, + model_files: Optional[str] = None, + **kwargs) -> torch.nn.Module: + """Initialize backend model. + + Args: + model_files (Sequence[str]): Input model files. + + Returns: + nn.Module: An initialized backend model. + """ + from .text_recognition_model import build_text_recognition_model + model = build_text_recognition_model( + model_files, self.model_cfg, self.deploy_cfg, device=self.device) + return model.eval() + + def init_pytorch_model(self, + model_checkpoint: Optional[str] = None, + cfg_options: Optional[Dict] = None, + **kwargs) -> torch.nn.Module: + """Initialize torch model. + + Args: + model_checkpoint (str): The checkpoint file of torch model, + defaults to `None`. + cfg_options (dict): Optional config key-pair parameters. + + Returns: + nn.Module: An initialized torch model generated by OpenMMLab + codebases. + """ + from mmocr.apis import init_detector + model = init_detector(self.model_cfg, model_checkpoint, self.device, + cfg_options) + + return model.eval() + + def create_input(self, + imgs: Union[str, np.ndarray], + input_shape: Sequence[int] = None) \ + -> Tuple[Dict, torch.Tensor]: + """Create input for segmentor. + + Args: + imgs (str | np.ndarray): Input image(s), accepted data type are + `str`, `np.ndarray`. + input_shape (list[int]): A list of two integer in (width, height) + format specifying input shape. Defaults to `None`. + + Returns: + tuple: (data, img), meta information for the input image and input. + """ + if isinstance(imgs, (list, tuple)): + if not isinstance(imgs[0], (np.ndarray, str)): + raise AssertionError('imgs must be strings or numpy arrays') + + elif isinstance(imgs, (np.ndarray, str)): + imgs = [imgs] + else: + raise AssertionError('imgs must be strings or numpy arrays') + + if self.model_cfg.data.test['type'] == 'ConcatDataset': + self.model_cfg.data.test.pipeline = \ + self.model_cfg.data.test['datasets'][0].pipeline + + is_ndarray = isinstance(imgs[0], np.ndarray) + + if is_ndarray: + self.model_cfg.data.test.pipeline[0].type = 'LoadImageFromNdarray' + + test_pipeline = self.model_cfg.data.test.pipeline + test_pipeline = replace_ImageToTensor(test_pipeline) + # for static exporting + if input_shape is not None: + test_pipeline[1].img_scale = tuple(input_shape) + test_pipeline[1].transforms[0].keep_ratio = False + test_pipeline[1].transforms[0].img_scale = tuple(input_shape) + + from mmdet.datasets.pipelines import Compose + from mmocr.datasets import build_dataset # noqa: F401 + test_pipeline = Compose(test_pipeline) + + data_list = [] + for img in imgs: + # prepare data + if is_ndarray: + # directly add img + data = dict(img=img) + else: + # add information into dict + data = dict(img_info=dict(filename=img), img_prefix=None) + + # build the data pipeline + data = test_pipeline(data) + # get tensor from list to stack for batch mode (text detection) + data_list.append(data) + + if isinstance(data_list[0]['img'], list) and len(data_list) > 1: + raise Exception('aug test does not support ' + f'inference with batch size ' + f'{len(data_list)}') + + data = collate(data_list, samples_per_gpu=len(imgs)) + + # process img_metas + if isinstance(data['img_metas'], list): + data['img_metas'] = [ + img_metas.data[0] for img_metas in data['img_metas'] + ] + else: + data['img_metas'] = data['img_metas'].data + + if isinstance(data['img'], list): + data['img'] = [img.data for img in data['img']] + if isinstance(data['img'][0], list): + data['img'] = [img[0] for img in data['img']] + else: + data['img'] = data['img'].data + + if self.device != 'cpu': + data = scatter(data, [self.device])[0] + + return data, data['img'] + + def visualize(self, + model: nn.Module, + image: Union[str, np.ndarray], + result: list, + output_file: str, + window_name: str = '', + show_result: bool = False): + """Visualize predictions of a model. + + Args: + model (nn.Module): Input model. + image (str | np.ndarray): Input image to draw predictions on. + result (list): A list of predictions. + output_file (str): Output file to save drawn image. + window_name (str): The name of visualization window. Defaults to + an empty string. + show_result (bool): Whether to show result in windows, defaults + to `False`. + """ + show_img = mmcv.imread(image) if isinstance(image, str) else image + output_file = None if show_result else output_file + model.show_result( + show_img, + result, + out_file=output_file, + win_name=window_name, + show=show_result) + + @staticmethod + def run_inference(model: nn.Module, + model_inputs: Dict[str, torch.Tensor]) -> list: + """Run inference once for a segmentation model of mmseg. + + Args: + model (nn.Module): Input model. + model_inputs (dict): A dict containing model inputs tensor and + meta info. + + Returns: + list: The predictions of model inference. + """ + return model(**model_inputs, return_loss=False, rescale=True) + + @staticmethod + def get_partition_cfg(partition_type: str) -> Dict: + """Get a certain partition config. + + Args: + partition_type (str): A string specifying partition type. + + Returns: + dict: A dictionary of partition config. + """ + raise NotImplementedError('Not supported yet.') + + @staticmethod + def get_tensor_from_input(input_data: Dict[str, Any]) -> torch.Tensor: + """Get input tensor from input data. + + Args: + input_data (dict): Input data containing meta info and image + tensor. + Returns: + torch.Tensor: An image in `Tensor`. + """ + if isinstance(input_data['img'], DataContainer): + return input_data['img'].data[0] + return input_data['img'][0] + + @staticmethod + def evaluate_outputs(model_cfg: mmcv.Config, + outputs: Sequence, + dataset: Dataset, + metrics: Optional[str] = None, + out: Optional[str] = None, + metric_options: Optional[dict] = None, + format_only: bool = False): + """Perform post-processing to predictions of model. + + Args: + model_cfg (mmcv.Config): The model config. + outputs (list): A list of predictions of model inference. + dataset (Dataset): Input dataset to run test. + metrics (str): Evaluation metrics, which depends on + the codebase and the dataset, e.g., e.g., "acc" for text + recognition, and "hmean-iou" for text detection. + out (str): Output result file in pickle format, defaults to `None`. + metric_options (dict): Custom options for evaluation, will be + kwargs for dataset.evaluate() function. Defaults to `None`. + format_only (bool): Format the output results without perform + evaluation. It is useful when you want to format the result + to a specific format and submit it to the test server. Defaults + to `False`. + """ + if out: + logging.info(f'\nwriting results to {out}') + mmcv.dump(outputs, out) + kwargs = {} if metric_options is None else metric_options + if format_only: + dataset.format_results(outputs, **kwargs) + if metrics: + eval_kwargs = model_cfg.get('evaluation', {}).copy() + # hard-code way to remove EvalHook args + for key in [ + 'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best', + 'rule' + ]: + eval_kwargs.pop(key, None) + eval_kwargs.update(dict(metric=metrics, **kwargs)) + print(dataset.evaluate(outputs, **eval_kwargs)) diff --git a/mmdeploy/codebase/mmocr/deploy/text_recognition_model.py b/mmdeploy/codebase/mmocr/deploy/text_recognition_model.py new file mode 100644 index 0000000000..9f6b74017a --- /dev/null +++ b/mmdeploy/codebase/mmocr/deploy/text_recognition_model.py @@ -0,0 +1,172 @@ +from typing import List, Sequence, Union + +import mmcv +import numpy as np +import torch +from mmocr.models.builder import build_convertor +from mmocr.models.textrecog import BaseRecognizer + +from mmdeploy.codebase.base import BaseBackendModel +from mmdeploy.utils import Backend, get_backend, get_onnx_config, load_config + + +class End2EndModel(BaseBackendModel): + """End to end model for inference of text detection. + + Args: + backend (Backend): The backend enum, specifying backend type. + backend_files (Sequence[str]): Paths to all required backend files(e.g. + '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn). + device (str): A string represents device type. + deploy_cfg (str | mmcv.Config): Deployment config file or loaded Config + object. + model_cfg (str | mmcv.Config): Model config file or loaded Config + object. + """ + + def __init__( + self, + backend: Backend, + backend_files: Sequence[str], + device: str, + deploy_cfg: Union[str, mmcv.Config] = None, + model_cfg: Union[str, mmcv.Config] = None, + ): + super(End2EndModel, self).__init__() + model_cfg, deploy_cfg = load_config(model_cfg, deploy_cfg) + self.deploy_cfg = deploy_cfg + self.show_score = False + label_convertor = model_cfg.model.label_convertor + assert label_convertor is not None, 'model_cfg contains no label ' + 'convertor' + max_seq_len = 40 # default value in EncodeDecodeRecognizer of mmocr + label_convertor.update(max_seq_len=max_seq_len) + self.label_convertor = build_convertor(label_convertor) + self._init_wrapper( + backend=backend, backend_files=backend_files, device=device) + + def _init_wrapper(self, backend: Backend, backend_files: Sequence[str], + device: str): + """Initialize the wrapper of backends. + + Args: + backend (Backend): The backend enum, specifying backend type. + backend_files (Sequence[str]): Paths to all required backend files + (e.g. .onnx' for ONNX Runtime, '.param' and '.bin' for ncnn). + device (str): A string represents device type. + """ + onnx_config = get_onnx_config(self.deploy_cfg) + output_names = onnx_config['output_names'] + self.wrapper = BaseBackendModel._build_wrapper( + backend=backend, + backend_files=backend_files, + device=device, + output_names=output_names) + + def forward(self, img: Sequence[torch.Tensor], + img_metas: Sequence[Sequence[dict]], *args, **kwargs): + """Run forward inference. + + Args: + imgs (torch.Tensor | Sequence[torch.Tensor]): Image input tensor. + img_metas (Sequence[dict]): List of image information. + + Returns: + list[str]: Text label result of each image. + """ + if isinstance(img, list): + for idx, each_img in enumerate(img): + if each_img.dim() == 3: + img[idx] = each_img.unsqueeze(0) + img = img[0] # avoid aug_test + img_metas = img_metas[0] + else: + if len(img_metas) == 1 and isinstance(img_metas[0], list): + img_metas = img_metas[0] + + return self.forward_test(img, img_metas, **kwargs) + + def forward_test(self, imgs: torch.Tensor, + img_metas: Sequence[Sequence[dict]], *args, **kwargs) -> \ + List[np.ndarray]: + """The interface for forward test. + + Args: + imgs (torch.Tensor): Image input tensor. + img_metas (Sequence[dict]): List of image information. + + Returns: + list[str]: Text label result of each image. + """ + pred = self.wrapper({'input': imgs})['output'] + label_indexes, label_scores = self.label_convertor.tensor2idx( + pred, img_metas) + label_strings = self.label_convertor.idx2str(label_indexes) + + # flatten batch results + results = [] + for string, score in zip(label_strings, label_scores): + results.append(dict(text=string, score=score)) + + return results + + def show_result(self, + img: np.ndarray, + result: list, + win_name: str, + show: bool = True, + score_thr: float = 0.3, + out_file: str = None): + """Show predictions of segmentation. + Args: + img: (np.ndarray): Input image to draw predictions. + result (list): A list of predictions. + win_name (str): The name of visualization window. + show (bool): Whether to show plotted image in windows. Defaults to + `True`. + score_thr: (float): The thresh of score. Defaults to `0.3`. + out_file (str): Output image file to save drawn predictions. + + Returns: + np.ndarray: Drawn image, only if not `show` or `out_file`. + """ + return BaseRecognizer.show_result( + self, + img, + result, + score_thr=score_thr, + show=show, + win_name=win_name, + out_file=out_file) + + +def build_text_recognition_model(model_files: Sequence[str], + model_cfg: Union[str, mmcv.Config], + deploy_cfg: Union[str, mmcv.Config], + device: str, **kwargs): + """Build text recognition model for different backends. + + Args: + model_files (Sequence[str]): Input model file(s). + model_cfg (str | mmcv.Config): Input model config file or Config + object. + deploy_cfg (str | mmcv.Config): Input deployment config file or + Config object. + device (str): Device to input model. + + Returns: + BaseBackendModel: Text recognizer for a configured backend. + """ + # load cfg if necessary + deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) + + backend = get_backend(deploy_cfg) + backend_text_recognizer = End2EndModel( + backend, + model_files, + device, + deploy_cfg=deploy_cfg, + model_cfg=model_cfg, + **kwargs) + + return backend_text_recognizer diff --git a/mmdeploy/codebase/mmocr/models/__init__.py b/mmdeploy/codebase/mmocr/models/__init__.py new file mode 100644 index 0000000000..dd24cc8e19 --- /dev/null +++ b/mmdeploy/codebase/mmocr/models/__init__.py @@ -0,0 +1,2 @@ +from .text_detection import * # noqa: F401,F403 +from .text_recognition import * # noqa: F401,F403 diff --git a/mmdeploy/codebase/mmocr/models/text_detection/__init__.py b/mmdeploy/codebase/mmocr/models/text_detection/__init__.py new file mode 100644 index 0000000000..fb8a50e832 --- /dev/null +++ b/mmdeploy/codebase/mmocr/models/text_detection/__init__.py @@ -0,0 +1,6 @@ +from .fpn_cat import fpnc__forward__tensorrt +from .single_stage_text_detector import single_stage_text_detector__simple_test + +__all__ = [ + 'fpnc__forward__tensorrt', 'single_stage_text_detector__simple_test' +] diff --git a/mmdeploy/mmocr/models/textdet/necks/fpn_cat.py b/mmdeploy/codebase/mmocr/models/text_detection/fpn_cat.py similarity index 100% rename from mmdeploy/mmocr/models/textdet/necks/fpn_cat.py rename to mmdeploy/codebase/mmocr/models/text_detection/fpn_cat.py diff --git a/mmdeploy/mmocr/models/textdet/detectors/single_stage_text_detector.py b/mmdeploy/codebase/mmocr/models/text_detection/single_stage_text_detector.py similarity index 100% rename from mmdeploy/mmocr/models/textdet/detectors/single_stage_text_detector.py rename to mmdeploy/codebase/mmocr/models/text_detection/single_stage_text_detector.py diff --git a/mmdeploy/codebase/mmocr/models/text_recognition/__init__.py b/mmdeploy/codebase/mmocr/models/text_recognition/__init__.py new file mode 100644 index 0000000000..dd0eb24297 --- /dev/null +++ b/mmdeploy/codebase/mmocr/models/text_recognition/__init__.py @@ -0,0 +1,13 @@ +from .base import base_recognizer__forward +from .crnn_decoder import crnndecoder__forward_train__ncnn +from .encode_decode_recognizer import encode_decode_recognizer__simple_test +from .lstm_layer import bidirectionallstm__forward__ncnn +from .sar import SARNet +from .sar_decoder import * # noqa: F401,F403 +from .sar_encoder import sar_encoder__forward + +__all__ = [ + 'base_recognizer__forward', 'crnndecoder__forward_train__ncnn', + 'encode_decode_recognizer__simple_test', + 'bidirectionallstm__forward__ncnn', 'sar_encoder__forward', 'SARNet' +] diff --git a/mmdeploy/mmocr/models/textrecog/recognizer/base.py b/mmdeploy/codebase/mmocr/models/text_recognition/base.py similarity index 100% rename from mmdeploy/mmocr/models/textrecog/recognizer/base.py rename to mmdeploy/codebase/mmocr/models/text_recognition/base.py diff --git a/mmdeploy/mmocr/models/textrecog/decoders/crnn_decoder.py b/mmdeploy/codebase/mmocr/models/text_recognition/crnn_decoder.py similarity index 100% rename from mmdeploy/mmocr/models/textrecog/decoders/crnn_decoder.py rename to mmdeploy/codebase/mmocr/models/text_recognition/crnn_decoder.py diff --git a/mmdeploy/mmocr/models/textrecog/recognizer/encode_decode_recognizer.py b/mmdeploy/codebase/mmocr/models/text_recognition/encode_decode_recognizer.py similarity index 100% rename from mmdeploy/mmocr/models/textrecog/recognizer/encode_decode_recognizer.py rename to mmdeploy/codebase/mmocr/models/text_recognition/encode_decode_recognizer.py diff --git a/mmdeploy/mmocr/models/textrecog/layers/lstm_layer.py b/mmdeploy/codebase/mmocr/models/text_recognition/lstm_layer.py similarity index 100% rename from mmdeploy/mmocr/models/textrecog/layers/lstm_layer.py rename to mmdeploy/codebase/mmocr/models/text_recognition/lstm_layer.py diff --git a/mmdeploy/mmocr/models/textrecog/recognizer/sar.py b/mmdeploy/codebase/mmocr/models/text_recognition/sar.py similarity index 97% rename from mmdeploy/mmocr/models/textrecog/recognizer/sar.py rename to mmdeploy/codebase/mmocr/models/text_recognition/sar.py index 9317e3d758..186c8a6c5c 100644 --- a/mmdeploy/mmocr/models/textrecog/recognizer/sar.py +++ b/mmdeploy/codebase/mmocr/models/text_recognition/sar.py @@ -4,8 +4,8 @@ import torch.nn as nn from mmdeploy.core import MODULE_REWRITER -from mmdeploy.mmocr.utils.cfg_utils import get_resize_ocr from mmdeploy.utils import is_dynamic_shape +from ..utils import get_resize_ocr @MODULE_REWRITER.register_rewrite_module( diff --git a/mmdeploy/mmocr/models/textrecog/decoders/sar_decoder.py b/mmdeploy/codebase/mmocr/models/text_recognition/sar_decoder.py similarity index 100% rename from mmdeploy/mmocr/models/textrecog/decoders/sar_decoder.py rename to mmdeploy/codebase/mmocr/models/text_recognition/sar_decoder.py diff --git a/mmdeploy/mmocr/models/textrecog/encoders/sar_encoder.py b/mmdeploy/codebase/mmocr/models/text_recognition/sar_encoder.py similarity index 100% rename from mmdeploy/mmocr/models/textrecog/encoders/sar_encoder.py rename to mmdeploy/codebase/mmocr/models/text_recognition/sar_encoder.py diff --git a/mmdeploy/mmocr/utils/cfg_utils.py b/mmdeploy/codebase/mmocr/models/utils.py similarity index 100% rename from mmdeploy/mmocr/utils/cfg_utils.py rename to mmdeploy/codebase/mmocr/models/utils.py diff --git a/mmdeploy/mmocr/__init__.py b/mmdeploy/codebase/mmseg/__init__.py similarity index 50% rename from mmdeploy/mmocr/__init__.py rename to mmdeploy/codebase/mmseg/__init__.py index d2b62e1cb6..33b69c74df 100644 --- a/mmdeploy/mmocr/__init__.py +++ b/mmdeploy/codebase/mmseg/__init__.py @@ -1,2 +1,2 @@ -from .export import * # noqa: F401,F403 +from .deploy import * # noqa: F401,F403 from .models import * # noqa: F401,F403 diff --git a/mmdeploy/codebase/mmseg/deploy/__init__.py b/mmdeploy/codebase/mmseg/deploy/__init__.py new file mode 100644 index 0000000000..3bf727b702 --- /dev/null +++ b/mmdeploy/codebase/mmseg/deploy/__init__.py @@ -0,0 +1,5 @@ +from .mmsegmentation import MMSegmentation +from .segmentation import Segmentation +from .utils import convert_syncbatchnorm + +__all__ = ['convert_syncbatchnorm', 'MMSegmentation', 'Segmentation'] diff --git a/mmdeploy/codebase/mmseg/deploy/mmsegmentation.py b/mmdeploy/codebase/mmseg/deploy/mmsegmentation.py new file mode 100644 index 0000000000..a1ee9f9a64 --- /dev/null +++ b/mmdeploy/codebase/mmseg/deploy/mmsegmentation.py @@ -0,0 +1,141 @@ +from typing import Optional, Union + +import mmcv +import torch +from mmcv.utils import Registry +from torch.utils.data import DataLoader, Dataset + +from mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase +from mmdeploy.utils import Codebase, get_task_type + + +def __build_mmseg_task(model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str, registry: Registry) -> BaseTask: + task = get_task_type(deploy_cfg) + return registry.module_dict[task.value](model_cfg, deploy_cfg, device) + + +MMSEG_TASK = Registry('mmseg_tasks', build_func=__build_mmseg_task) + + +@CODEBASE.register_module(Codebase.MMSEG.value) +class MMSegmentation(MMCodebase): + """mmsegmentation codebase class.""" + + task_registry = MMSEG_TASK + + def __init__(self): + super(MMSegmentation, self).__init__() + + @staticmethod + def build_task_processor(model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str): + """The interface to build the task processors of mmseg. + + Args: + model_cfg (str | mmcv.Config): Model config file. + deploy_cfg (str | mmcv.Config): Deployment config file. + device (str): A string specifying device type. + + Returns: + BaseTask: A task processor. + """ + return MMSEG_TASK.build(model_cfg, deploy_cfg, device) + + @staticmethod + def build_dataset(dataset_cfg: Union[str, mmcv.Config], + dataset_type: str = 'val', + **kwargs) -> Dataset: + """Build dataset for segmentation. + + Args: + dataset_cfg (str | mmcv.Config): The input dataset config. + dataset_type (str): A string represents dataset type, e.g.: 'train' + , 'test', 'val'. Defaults to 'val'. + + Returns: + Dataset: A PyTorch dataset. + """ + from mmseg.datasets import build_dataset as build_dataset_mmseg + + assert dataset_type in dataset_cfg.data + data_cfg = dataset_cfg.data[dataset_type] + dataset = build_dataset_mmseg(data_cfg) + return dataset + + @staticmethod + def build_dataloader(dataset: Dataset, + samples_per_gpu: int, + workers_per_gpu: int, + num_gpus: int = 1, + dist: bool = False, + shuffle: bool = False, + seed: Optional[int] = None, + drop_last: bool = False, + pin_memory: bool = True, + persistent_workers: bool = True, + **kwargs) -> DataLoader: + """Build dataloader for segmentation. + + Args: + dataset (Dataset): Input dataset. + samples_per_gpu (int): Number of training samples on each GPU, i.e. + ,batch size of each GPU. + workers_per_gpu (int): How many subprocesses to use for data + loading for each GPU. + num_gpus (int): Number of GPUs. Only used in non-distributed + training. dist (bool): Distributed training/test or not. + Defaults to `False`. + shuffle (bool): Whether to shuffle the data at every epoch. + Defaults to `False`. + seed (int): An integer set to be seed. Default is `None`. + drop_last (bool): Whether to drop the last incomplete batch in + epoch. Default to `False`. + pin_memory (bool): Whether to use pin_memory in DataLoader. + Default is `True`. + persistent_workers (bool): If `True`, the data loader will not + shutdown the worker processes after a dataset has been + consumed once. This allows to maintain the workers Dataset + instances alive. The argument also has effect in + PyTorch>=1.7.0. Default is `True`. + kwargs: Any other keyword argument to be used to initialize + DataLoader. + + Returns: + DataLoader: A PyTorch dataloader. + """ + from mmseg.datasets import build_dataloader as build_dataloader_mmseg + return build_dataloader_mmseg( + dataset, + samples_per_gpu, + workers_per_gpu, + num_gpus=num_gpus, + dist=dist, + shuffle=shuffle, + seed=seed, + drop_last=drop_last, + pin_memory=pin_memory, + persistent_workers=persistent_workers, + **kwargs) + + @staticmethod + def single_gpu_test(model: torch.nn.Module, + data_loader: DataLoader, + show: bool = False, + out_dir: Optional[str] = None, + **kwargs): + """Run test with single gpu. + + Args: + model (torch.nn.Module): Input model from nn.Module. + data_loader (DataLoader): PyTorch data loader. + show (bool): Specifying whether to show plotted results. Defaults + to `False`. + out_dir (str): A directory to save results, defaults to `None`. + + Returns: + list: The prediction results. + """ + from mmseg.apis import single_gpu_test + outputs = single_gpu_test(model, data_loader, show, out_dir, **kwargs) + return outputs diff --git a/mmdeploy/codebase/mmseg/deploy/segmentation.py b/mmdeploy/codebase/mmseg/deploy/segmentation.py new file mode 100644 index 0000000000..8545c83555 --- /dev/null +++ b/mmdeploy/codebase/mmseg/deploy/segmentation.py @@ -0,0 +1,216 @@ +import logging +from typing import Any, Dict, Optional, Sequence, Tuple, Union + +import mmcv +import numpy as np +import torch +from torch.utils.data import Dataset + +from mmdeploy.codebase.base import BaseTask +from mmdeploy.utils import Task +from .mmsegmentation import MMSEG_TASK + + +@MMSEG_TASK.register_module(Task.SEGMENTATION.value) +class Segmentation(BaseTask): + """Segmentation task class. + + Args: + model_cfg (mmcv.Config): Original PyTorch model config file. + deploy_cfg (mmcv.Config): Deployment config file or loaded Config + object. + device (str): A string represents device type. + """ + + def __init__(self, model_cfg: mmcv.Config, deploy_cfg: mmcv.Config, + device: str): + super(Segmentation, self).__init__(model_cfg, deploy_cfg, device) + + def init_backend_model(self, + model_files: Optional[str] = None, + **kwargs) -> torch.nn.Module: + """Initialize backend model. + + Args: + model_files (Sequence[str]): Input model files. + + Returns: + nn.Module: An initialized backend model. + """ + from .segmentation_model import build_segmentation_model + model = build_segmentation_model( + model_files, self.model_cfg, self.deploy_cfg, device=self.device) + return model.eval() + + def init_pytorch_model(self, + model_checkpoint: Optional[str] = None, + cfg_options: Optional[Dict] = None, + **kwargs) -> torch.nn.Module: + """Initialize torch model. + + Args: + model_checkpoint (str): The checkpoint file of torch model, + defaults to `None`. + cfg_options (dict): Optional config key-pair parameters. + + Returns: + nn.Module: An initialized torch model generated by OpenMMLab + codebases. + """ + from mmseg.apis import init_segmentor + from mmdeploy.codebase.mmseg.deploy import convert_syncbatchnorm + model = init_segmentor(self.model_cfg, model_checkpoint, self.device) + model = convert_syncbatchnorm(model) + + return model.eval() + + def create_input(self, + imgs: Union[str, np.ndarray], + input_shape: Sequence[int] = None) \ + -> Tuple[Dict, torch.Tensor]: + """Create input for segmentor. + + Args: + imgs (Any): Input image(s), accepted data type are `str`, + `np.ndarray`, `torch.Tensor`. + input_shape (list[int]): A list of two integer in (width, height) + format specifying input shape. Defaults to `None`. + + Returns: + tuple: (data, img), meta information for the input image and input. + """ + from mmseg.apis.inference import LoadImage + from mmseg.datasets.pipelines import Compose + from mmcv.parallel import collate, scatter + + cfg = self.model_cfg.copy() + if not isinstance(imgs, (list, tuple)): + imgs = [imgs] + + if isinstance(imgs[0], np.ndarray): + cfg = cfg.copy() + # set loading pipeline type + cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' + # for static exporting + if input_shape is not None: + cfg.data.test.pipeline[1]['img_scale'] = tuple(input_shape) + cfg.data.test.pipeline[1]['transforms'][0]['keep_ratio'] = False + cfg.data.test.pipeline = [LoadImage()] + cfg.data.test.pipeline[1:] + + test_pipeline = Compose(cfg.data.test.pipeline) + data_list = [] + for img in imgs: + # prepare data + data = dict(img=img) + # build the data pipeline + data = test_pipeline(data) + data_list.append(data) + + data = collate(data_list, samples_per_gpu=len(imgs)) + + data['img_metas'] = [ + img_metas.data[0] for img_metas in data['img_metas'] + ] + data['img'] = [img.data[0][None, :] for img in data['img']] + if self.device != 'cpu': + data = scatter(data, [self.device])[0] + + return data, data['img'] + + def visualize(self, + model, + image: Union[str, np.ndarray], + result: list, + output_file: str, + window_name: str = '', + show_result: bool = False, + opacity: float = 0.5): + """Visualize predictions of a model. + + Args: + model (nn.Module): Input model. + image (str | np.ndarray): Input image to draw predictions on. + result (list): A list of predictions. + output_file (str): Output file to save drawn image. + window_name (str): The name of visualization window. Defaults to + an empty string. + show_result (bool): Whether to show result in windows, defaults + to `False`. + opacity: (float): Opacity of painted segmentation map. + Defaults to `0.5`. + """ + show_img = mmcv.imread(image) if isinstance(image, str) else image + output_file = None if show_result else output_file + # Need to wrapper the result with list for mmseg + result = [result] + model.show_result( + show_img, + result, + out_file=output_file, + win_name=window_name, + show=show_result, + opacity=opacity) + + @staticmethod + def run_inference(model, model_inputs: Dict[str, torch.Tensor]): + """Run inference once for a segmentation model of mmseg. + + Args: + model (nn.Module): Input model. + model_inputs (dict): A dict containing model inputs tensor and + meta info. + + Returns: + list: The predictions of model inference. + """ + return model(**model_inputs, return_loss=False, rescale=True) + + @staticmethod + def get_partition_cfg(partition_type: str) -> Dict: + raise NotImplementedError('Not supported yet.') + + @staticmethod + def get_tensor_from_input(input_data: Dict[str, Any]) -> torch.Tensor: + """Get input tensor from input data. + + Args: + input_data (dict): Input data containing meta info and image + tensor. + Returns: + torch.Tensor: An image in `Tensor`. + """ + return input_data['img'][0] + + @staticmethod + def evaluate_outputs(model_cfg, + outputs: Sequence, + dataset: Dataset, + metrics: Optional[str] = None, + out: Optional[str] = None, + metric_options: Optional[dict] = None, + format_only: bool = False): + """Perform post-processing to predictions of model. + + Args: + outputs (list): A list of predictions of model inference. + dataset (Dataset): Input dataset to run test. + model_cfg (mmcv.Config): The model config. + metrics (str): Evaluation metrics, which depends on + the codebase and the dataset, e.g., e.g., "mIoU" for generic + datasets, and "cityscapes" for Cityscapes in mmseg. + out (str): Output result file in pickle format, defaults to `None`. + metric_options (dict): Custom options for evaluation, will be + kwargs for dataset.evaluate() function. Defaults to `None`. + format_only (bool): Format the output results without perform + evaluation. It is useful when you want to format the result + to a specific format and submit it to the test server. Defaults + to `False`. + """ + if out: + logging.info(f'\nwriting results to {out}') + mmcv.dump(outputs, out) + kwargs = {} if metric_options is None else metric_options + if format_only: + dataset.format_results(outputs, **kwargs) + if metrics: + dataset.evaluate(outputs, metrics, **kwargs) diff --git a/mmdeploy/codebase/mmseg/deploy/segmentation_model.py b/mmdeploy/codebase/mmseg/deploy/segmentation_model.py new file mode 100644 index 0000000000..b1a30bccaa --- /dev/null +++ b/mmdeploy/codebase/mmseg/deploy/segmentation_model.py @@ -0,0 +1,190 @@ +from typing import List, Sequence, Union + +import mmcv +import numpy as np +import torch +from mmseg.datasets import DATASETS +from mmseg.models.segmentors.base import BaseSegmentor +from mmseg.ops import resize + +from mmdeploy.codebase.base import BaseBackendModel +from mmdeploy.utils import Backend, get_backend, get_onnx_config, load_config + + +class End2EndModel(BaseBackendModel): + """End to end model for inference of segmentation. + + Args: + backend (Backend): The backend enum, specifying backend type. + backend_files (Sequence[str]): Paths to all required backend files(e.g. + '.onnx' for ONNX Runtime, '.param' and '.bin' for ncnn). + device (str): A string represents device type. + class_names (Sequence[str]): A list of string specifying class names. + palette (np.ndarray): The palette of segmentation map. + deploy_cfg (str | mmcv.Config): Deployment config file or loaded Config + object. + """ + + def __init__( + self, + backend: Backend, + backend_files: Sequence[str], + device: str, + class_names: Sequence[str], + palette: np.ndarray, + deploy_cfg: Union[str, mmcv.Config] = None, + ): + super(End2EndModel, self).__init__() + self.CLASSES = class_names + self.PALETTE = palette + self.deploy_cfg = deploy_cfg + self._init_wrapper( + backend=backend, backend_files=backend_files, device=device) + + def _init_wrapper(self, backend, backend_files, device): + onnx_config = get_onnx_config(self.deploy_cfg) + output_names = onnx_config['output_names'] + self.wrapper = BaseBackendModel._build_wrapper( + backend=backend, + backend_files=backend_files, + device=device, + output_names=output_names) + + def forward(self, img: Sequence[torch.Tensor], + img_metas: Sequence[Sequence[dict]], *args, **kwargs): + """Run forward inference. + + Args: + img (Sequence[torch.Tensor]): A list contains input image(s) + in [N x C x H x W] format. + img_metas (Sequence[Sequence[dict]]): A list of meta info for + image(s). + *args: Other arguments. + **kwargs: Other key-pair arguments. + + Returns: + list: A list contains predictions. + """ + input_img = img[0].contiguous() + outputs = self.forward_test(input_img, img_metas, *args, **kwargs) + seg_pred = outputs[0] + # whole mode supports dynamic shape + ori_shape = img_metas[0][0]['ori_shape'] + if not (ori_shape[0] == seg_pred.shape[-2] + and ori_shape[1] == seg_pred.shape[-1]): + seg_pred = torch.from_numpy(seg_pred).float() + seg_pred = resize( + seg_pred, size=tuple(ori_shape[:2]), mode='nearest') + seg_pred = seg_pred.long().detach().cpu().numpy() + # remove unnecessary dim + seg_pred = seg_pred.squeeze(1) + seg_pred = list(seg_pred) + return seg_pred + + def forward_test(self, imgs: torch.Tensor, *args, **kwargs) -> \ + List[np.ndarray]: + """The interface for forward test. + + Args: + imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. + + Returns: + List[np.ndarray]: A list of segmentation map. + """ + outputs = self.wrapper({'input': imgs}) + outputs = self.wrapper.output_to_list(outputs) + outputs = [out.detach().cpu().numpy() for out in outputs] + return outputs + + def show_result(self, + img: np.ndarray, + result: list, + win_name: str, + show: bool = True, + opacity: float = 0.5, + out_file: str = None): + """Show predictions of segmentation. + Args: + img: (np.ndarray): Input image to draw predictions. + result (list): A list of predictions. + win_name (str): The name of visualization window. + show (bool): Whether to show plotted image in windows. Defaults to + `True`. + opacity: (float): Opacity of painted segmentation map. + Defaults to `0.5`. + out_file (str): Output image file to save drawn predictions. + + Returns: + np.ndarray: Drawn image, only if not `show` or `out_file`. + """ + return BaseSegmentor.show_result( + self, + img, + result, + palette=self.PALETTE, + opacity=opacity, + show=show, + win_name=win_name, + out_file=out_file) + + +def get_classes_palette_from_config(model_cfg: Union[str, mmcv.Config]): + """Get class name and palette from config. + + Args: + model_cfg (str | mmcv.Config): Input model config file or + Config object. + Returns: + tuple(Sequence[str], np.ndarray): A list of string specifying names of + different class and the palette of segmentation map. + """ + # load cfg if necessary + model_cfg = load_config(model_cfg)[0] + + module_dict = DATASETS.module_dict + data_cfg = model_cfg.data + + if 'train' in data_cfg: + module = module_dict[data_cfg.train.type] + elif 'val' in data_cfg: + module = module_dict[data_cfg.val.type] + elif 'test' in data_cfg: + module = module_dict[data_cfg.test.type] + else: + raise RuntimeError(f'No dataset config found in: {model_cfg}') + + return module.CLASSES, module.PALETTE + + +def build_segmentation_model(model_files: Sequence[str], + model_cfg: Union[str, mmcv.Config], + deploy_cfg: Union[str, mmcv.Config], device: str, + **kwargs): + """Build object segmentation model for different backends. + + Args: + model_files (Sequence[str]): Input model file(s). + model_cfg (str | mmcv.Config): Input model config file or Config + object. + deploy_cfg (str | mmcv.Config): Input deployment config file or + Config object. + device (str): Device to input model. + + Returns: + BaseBackendModel: Segmentor for a configured backend. + """ + # load cfg if necessary + deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) + + backend = get_backend(deploy_cfg) + class_names, palette = get_classes_palette_from_config(model_cfg) + backend_segmentor = End2EndModel( + backend, + model_files, + device, + class_names, + palette, + deploy_cfg=deploy_cfg, + **kwargs) + + return backend_segmentor diff --git a/mmdeploy/mmseg/export/onnx_utils.py b/mmdeploy/codebase/mmseg/deploy/utils.py similarity index 100% rename from mmdeploy/mmseg/export/onnx_utils.py rename to mmdeploy/codebase/mmseg/deploy/utils.py diff --git a/mmdeploy/mmseg/models/__init__.py b/mmdeploy/codebase/mmseg/models/__init__.py similarity index 100% rename from mmdeploy/mmseg/models/__init__.py rename to mmdeploy/codebase/mmseg/models/__init__.py diff --git a/mmdeploy/mmseg/models/decode_heads/__init__.py b/mmdeploy/codebase/mmseg/models/decode_heads/__init__.py similarity index 100% rename from mmdeploy/mmseg/models/decode_heads/__init__.py rename to mmdeploy/codebase/mmseg/models/decode_heads/__init__.py diff --git a/mmdeploy/mmseg/models/decode_heads/aspp_head.py b/mmdeploy/codebase/mmseg/models/decode_heads/aspp_head.py similarity index 100% rename from mmdeploy/mmseg/models/decode_heads/aspp_head.py rename to mmdeploy/codebase/mmseg/models/decode_heads/aspp_head.py diff --git a/mmdeploy/mmseg/models/decode_heads/psp_head.py b/mmdeploy/codebase/mmseg/models/decode_heads/psp_head.py similarity index 100% rename from mmdeploy/mmseg/models/decode_heads/psp_head.py rename to mmdeploy/codebase/mmseg/models/decode_heads/psp_head.py diff --git a/mmdeploy/mmseg/models/segmentors/__init__.py b/mmdeploy/codebase/mmseg/models/segmentors/__init__.py similarity index 100% rename from mmdeploy/mmseg/models/segmentors/__init__.py rename to mmdeploy/codebase/mmseg/models/segmentors/__init__.py diff --git a/mmdeploy/mmseg/models/segmentors/base.py b/mmdeploy/codebase/mmseg/models/segmentors/base.py similarity index 100% rename from mmdeploy/mmseg/models/segmentors/base.py rename to mmdeploy/codebase/mmseg/models/segmentors/base.py diff --git a/mmdeploy/mmseg/models/segmentors/encoder_decoder.py b/mmdeploy/codebase/mmseg/models/segmentors/encoder_decoder.py similarity index 100% rename from mmdeploy/mmseg/models/segmentors/encoder_decoder.py rename to mmdeploy/codebase/mmseg/models/segmentors/encoder_decoder.py diff --git a/mmdeploy/core/optimizers/extractor.py b/mmdeploy/core/optimizers/extractor.py index f15d1e4a2c..10f9bc5aea 100644 --- a/mmdeploy/core/optimizers/extractor.py +++ b/mmdeploy/core/optimizers/extractor.py @@ -4,7 +4,7 @@ from packaging import version -def parse_extractor_io_string(io_str): +def parse_extractor_io_string(io_str) -> tuple: """Parse IO string for extractor.""" name, io_type = io_str.split(':') assert io_type in ['input', 'output'] @@ -44,14 +44,14 @@ def impl(node_output_name, graph_input_nodes, reachable_nodes): impl(node_output_name, graph_input_nodes, reachable_nodes) -def create_extractor(model: onnx.ModelProto): +def create_extractor(model: onnx.ModelProto) -> onnx.utils.Extractor: """Create Extractor for ONNX. Args: model (onnx.ModelProto): An input onnx model. Returns: - Extractor: Extractor for the onnx. + onnx.utils.Extractor: Extractor for the onnx. """ assert version.parse(onnx.__version__) >= version.parse('1.8.0') # patch extractor diff --git a/mmdeploy/core/optimizers/function_marker.py b/mmdeploy/core/optimizers/function_marker.py index 500e5f2458..0a1c6f79da 100644 --- a/mmdeploy/core/optimizers/function_marker.py +++ b/mmdeploy/core/optimizers/function_marker.py @@ -1,10 +1,10 @@ import inspect -from typing import Any, Dict, Optional, Sequence +from typing import Any, Callable, Dict, Optional, Sequence import torch from mmdeploy.core.rewriters import FUNCTION_REWRITER -from mmdeploy.utils import cfg_apply_marks, get_codebase, get_partition_config +from mmdeploy.utils import cfg_apply_marks, get_partition_config MARK_FUNCTION_COUNT = dict() @@ -54,7 +54,7 @@ def symbolic(g, x, dtype, shape, func, func_id, type, name, id, attrs): return n @staticmethod - def forward(ctx, x, *args): + def forward(ctx, x, *args) -> torch.Tensor: """Run forward.""" return x @@ -71,19 +71,20 @@ def mark_symbolic(rewriter, g, x, *args): @FUNCTION_REWRITER.register_rewriter( 'mmdeploy.core.optimizers.function_marker.Mark.forward') def forward_of_mark(rewriter, ctx, x, dtype, shape, func, func_id, type, name, - id, attrs): + id, attrs) -> torch.Tensor: """Rewrite forward of mark op.""" deploy_cfg = rewriter.cfg # save calib data apply_marks = cfg_apply_marks(deploy_cfg) create_calib = getattr(rewriter, 'create_calib', False) if apply_marks and create_calib: - codebase = get_codebase(deploy_cfg) partition_params = get_partition_config(deploy_cfg) assert partition_params is not None, 'No partition config.' partition_type = partition_params['type'] - from mmdeploy.apis.utils import get_partition_cfg - partition_cfgs = get_partition_cfg(codebase, partition_type) + + from mmdeploy.apis import get_predefined_partition_cfg + partition_cfgs = get_predefined_partition_cfg(deploy_cfg, + partition_type) assert hasattr(rewriter, 'calib_file') for partition_id, partition_cfg in enumerate(partition_cfgs): @@ -123,7 +124,7 @@ def forward_of_mark(rewriter, ctx, x, dtype, shape, func, func_id, type, name, def mark_tensors(xs: Any, func: str, func_id: int, io_type: str, ctx: Any, - attrs: Dict, is_inspecting: bool, level: int): + attrs: Dict, is_inspecting: bool, level: int) -> tuple: """Add mark node recursively. Args: @@ -181,7 +182,7 @@ def impl(ys, prefix, level): def mark(func_name: Optional[str] = None, inputs: Optional[Sequence[str]] = None, outputs: Optional[Sequence[str]] = None, - **attrs): + **attrs) -> Callable: """The decorator used to add mark node. Mark node can be used to support model partition. diff --git a/mmdeploy/core/optimizers/optimize.py b/mmdeploy/core/optimizers/optimize.py index ed72282afb..57ce8cbe66 100644 --- a/mmdeploy/core/optimizers/optimize.py +++ b/mmdeploy/core/optimizers/optimize.py @@ -5,7 +5,7 @@ from onnx.helper import get_attribute_value -def attribute_to_dict(attr: onnx.AttributeProto): +def attribute_to_dict(attr: onnx.AttributeProto) -> Dict: """Convert onnx op attribute to dict. Args: @@ -23,7 +23,8 @@ def attribute_to_dict(attr: onnx.AttributeProto): return ret -def remove_nodes(model: onnx.ModelProto, predicate: Callable): +def remove_nodes(model: onnx.ModelProto, + predicate: Callable) -> onnx.ModelProto: """Remove nodes from ONNX model. Args: @@ -54,14 +55,14 @@ def remove_nodes(model: onnx.ModelProto, predicate: Callable): return model -def is_unused_mark(marks: Iterable[onnx.NodeProto]): +def is_unused_mark(marks: Iterable[onnx.NodeProto]) -> Callable: """Check whether a mark is unused. Args: marks (Iterable[onnx.NodeProto]): A list of onnx NodeProto. Returns: - bool: `True` if a mark node is in `marks`. + Callable: The function to check if a mark node is in `marks`. """ def f(node): @@ -75,14 +76,14 @@ def f(node): return f -def is_identity(node: onnx.NodeProto): +def is_identity(node: onnx.NodeProto) -> bool: """Check if an op is identity.""" return node.op_type == 'Identity' def get_new_name(attrs: Dict[str, str], mark_name: str = '', - name_map: Optional[Dict[str, str]] = None): + name_map: Optional[Dict[str, str]] = None) -> str: """Get new name for a node. Args: diff --git a/mmdeploy/core/rewriters/function_rewriter.py b/mmdeploy/core/rewriters/function_rewriter.py index a8d49b857a..175372b061 100644 --- a/mmdeploy/core/rewriters/function_rewriter.py +++ b/mmdeploy/core/rewriters/function_rewriter.py @@ -44,7 +44,7 @@ def __init__(self): self._registry = RewriterRegistry() def add_backend(self, backend: str): - """Add a beckend by calling the _registry.add_backend.""" + """Add a backend by calling the _registry.add_backend.""" self._registry.add_backend(backend) def register_rewriter(self, diff --git a/mmdeploy/core/rewriters/module_rewriter.py b/mmdeploy/core/rewriters/module_rewriter.py index adf20fbc21..7b45d4ae17 100644 --- a/mmdeploy/core/rewriters/module_rewriter.py +++ b/mmdeploy/core/rewriters/module_rewriter.py @@ -27,7 +27,7 @@ def __init__(self): self._registry = RewriterRegistry() def add_backend(self, backend: str): - """Add a beckend by calling the _registry.add_backend.""" + """Add a backend by calling the _registry.add_backend.""" self._registry.add_backend(backend) def register_rewrite_module(self, diff --git a/mmdeploy/core/rewriters/rewriter_manager.py b/mmdeploy/core/rewriters/rewriter_manager.py index 23eb424eff..4b0891b87a 100644 --- a/mmdeploy/core/rewriters/rewriter_manager.py +++ b/mmdeploy/core/rewriters/rewriter_manager.py @@ -28,12 +28,8 @@ def add_backend(self, backend: str): REWRITER_MANAGER = RewriterManager() -REWRITER_MANAGER.add_backend(Backend.ONNXRUNTIME.value) -REWRITER_MANAGER.add_backend(Backend.TENSORRT.value) -REWRITER_MANAGER.add_backend(Backend.NCNN.value) -REWRITER_MANAGER.add_backend(Backend.PPL.value) -REWRITER_MANAGER.add_backend(Backend.PYTORCH.value) -REWRITER_MANAGER.add_backend(Backend.OPENVINO.value) +for backend in Backend: + REWRITER_MANAGER.add_backend(backend.value) MODULE_REWRITER = REWRITER_MANAGER.module_rewriter FUNCTION_REWRITER = REWRITER_MANAGER.function_rewrite diff --git a/mmdeploy/core/rewriters/rewriter_utils.py b/mmdeploy/core/rewriters/rewriter_utils.py index f0eb8b4a87..e706b3b8a1 100644 --- a/mmdeploy/core/rewriters/rewriter_utils.py +++ b/mmdeploy/core/rewriters/rewriter_utils.py @@ -80,7 +80,7 @@ def decorator(object): return decorator -class ContextCaller(): +class ContextCaller: """A callable object used in RewriteContext. This class saves context variables as member variables. When a rewritten diff --git a/mmdeploy/core/rewriters/symbolic_rewriter.py b/mmdeploy/core/rewriters/symbolic_rewriter.py index 93fd640701..d5eb6f80ea 100644 --- a/mmdeploy/core/rewriters/symbolic_rewriter.py +++ b/mmdeploy/core/rewriters/symbolic_rewriter.py @@ -1,5 +1,5 @@ import logging -from typing import Dict, Optional, Sequence +from typing import Callable, Dict, Optional, Sequence from torch.autograd import Function from torch.onnx.symbolic_helper import parse_args @@ -36,7 +36,7 @@ def __init__(self) -> None: self._registry = RewriterRegistry() def add_backend(self, backend: str): - """Add a beckend by calling the _registry.add_backend.""" + """Add a backend by calling the _registry.add_backend.""" self._registry.add_backend(backend) def register_symbolic(self, @@ -44,7 +44,7 @@ def register_symbolic(self, backend: str = Backend.DEFAULT.value, is_pytorch: bool = False, arg_descriptors: Optional[Sequence[str]] = None, - **kwargs): + **kwargs) -> Callable: """The decorator of the custom symbolic. Args: diff --git a/mmdeploy/mmcls/apis/__init__.py b/mmdeploy/mmcls/apis/__init__.py deleted file mode 100644 index 1cfce5d45d..0000000000 --- a/mmdeploy/mmcls/apis/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .inference import build_classifier -from .visualize import show_result - -__all__ = ['build_classifier', 'show_result'] diff --git a/mmdeploy/mmcls/apis/inference.py b/mmdeploy/mmcls/apis/inference.py deleted file mode 100644 index b5fe05996b..0000000000 --- a/mmdeploy/mmcls/apis/inference.py +++ /dev/null @@ -1,239 +0,0 @@ -from typing import Sequence, Union - -import mmcv -import torch -from mmcls.datasets import DATASETS -from mmcls.models import BaseClassifier - -from mmdeploy.utils.config_utils import Backend, get_backend, load_config - - -class DeployBaseClassifier(BaseClassifier): - """Base Class of Wrapper for classifier's inference. - - Args: - class_names (Sequence[str]): A list of string specifying class names. - device_id (int): An integer represents device index. - """ - - def __init__(self, class_names: Sequence[str], device_id: int): - super(DeployBaseClassifier, self).__init__() - self.CLASSES = class_names - self.device_id = device_id - - def simple_test(self, img, *args, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def extract_feat(self, imgs): - raise NotImplementedError('This method is not implemented.') - - def forward_train(self, imgs, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def forward_test(self, imgs, *args, **kwargs): - raise NotImplementedError('This method is not implemented.') - - -class ONNXRuntimeClassifier(DeployBaseClassifier): - """Wrapper for classifier's inference with ONNXRuntime. - - Args: - model_file (str): The path of input model file. - class_names (Sequence[str]): A list of string specifying class names. - device_id (int): An integer represents device index. - """ - - def __init__(self, model_file: str, class_names: Sequence[str], - device_id: int): - super(ONNXRuntimeClassifier, self).__init__(class_names, device_id) - from mmdeploy.apis.onnxruntime import ORTWrapper - self.model = ORTWrapper(model_file, device_id) - - def forward_test(self, imgs: torch.Tensor, *args, **kwargs): - """Run test inference. - - Args: - imgs (torch.Tensor): Input tensor of the model. - - Returns: - list[np.ndarray]: Predictions of a classifier. - """ - input_data = imgs - results = self.model({'input': input_data})[0] - return list(results) - - -class TensorRTClassifier(DeployBaseClassifier): - """Wrapper for classifier's inference with TensorRT. - - Args: - model_file (str): The path of input model file. - class_names (Sequence[str]): A list of string specifying class names. - device_id (int): An integer represents device index. - """ - - def __init__(self, model_file: str, class_names: Sequence[str], - device_id: int): - super(TensorRTClassifier, self).__init__(class_names, device_id) - from mmdeploy.apis.tensorrt import TRTWrapper - model = TRTWrapper(model_file) - - self.model = model - - def forward_test(self, imgs: torch.Tensor, *args, **kwargs): - """Run test inference. - - Args: - imgs (torch.Tensor): Input tensor of the model. - - Returns: - list[np.ndarray]: Predictions of a classifier. - """ - input_data = imgs - with torch.cuda.device(self.device_id), torch.no_grad(): - results = self.model({'input': input_data})['output'] - results = results.detach().cpu().numpy() - - return list(results) - - -class NCNNClassifier(DeployBaseClassifier): - """Wrapper for classifier's inference with NCNN. - - Args: - param_file (str): Path of parameter file. - bin_file (str): Path of bin file. - class_names (Sequence[str]): A list of string specifying class names. - device_id (int): An integer represents device index. - """ - - def __init__(self, param_file: str, bin_file: str, - class_names: Sequence[str], device_id: int): - super(NCNNClassifier, self).__init__(class_names, device_id) - from mmdeploy.apis.ncnn import NCNNWrapper - self.model = NCNNWrapper(param_file, bin_file, output_names=['output']) - - def forward_test(self, imgs: torch.Tensor, *args, **kwargs): - """Run test inference. - - Args: - imgs (torch.Tensor): Input tensor of the model. - - Returns: - list[np.ndarray]: Predictions of a classifier. - """ - results = self.model({'input': imgs})['output'] - results = results.detach().cpu().numpy() - results_list = list(results) - return results_list - - -class PPLClassifier(DeployBaseClassifier): - """Wrapper for classifier's inference with PPL. - - Args: - onnx_file (str): Path of input ONNX model file. - algo_file (str): Path of PPL algorithm file. - class_names (Sequence[str]): A list of string specifying class names. - device_id (int): An integer represents device index. - """ - - def __init__(self, onnx_file, algo_file, class_names, device_id): - super(PPLClassifier, self).__init__(class_names, device_id) - from mmdeploy.apis.ppl import PPLWrapper - model = PPLWrapper( - onnx_file=onnx_file, algo_file=algo_file, device_id=device_id) - self.model = model - self.CLASSES = class_names - - def forward_test(self, imgs: torch.Tensor, *args, **kwargs): - """Run test inference. - - Args: - imgs (torch.Tensor): Input tensor of the model. - - Returns: - list[np.ndarray]: Predictions of a classifier. - """ - input_data = imgs - results = self.model({'input': input_data})[0] - - return list(results) - - -ONNXRUNTIME_CLASSIFIER_MAP = dict(end2end=ONNXRuntimeClassifier) - -TENSORRT_CLASSIFIER_MAP = dict(end2end=TensorRTClassifier) - -PPL_CLASSIFIER_MAP = dict(end2end=PPLClassifier) - -NCNN_CLASSIFIER_MAP = dict(end2end=NCNNClassifier) - -BACKEND_CLASSIFIER_MAP = { - Backend.ONNXRUNTIME: ONNXRUNTIME_CLASSIFIER_MAP, - Backend.TENSORRT: TENSORRT_CLASSIFIER_MAP, - Backend.PPL: PPL_CLASSIFIER_MAP, - Backend.NCNN: NCNN_CLASSIFIER_MAP -} - - -def get_classes_from_config(model_cfg: Union[str, mmcv.Config]): - """Get class name from config. - - Args: - model_cfg (str | mmcv.Config): Input model config file or - Config object. - - Returns: - list[str]: A list of string specifying names of different class. - """ - model_cfg = load_config(model_cfg)[0] - module_dict = DATASETS.module_dict - data_cfg = model_cfg.data - - if 'train' in data_cfg: - module = module_dict[data_cfg.train.type] - elif 'val' in data_cfg: - module = module_dict[data_cfg.val.type] - elif 'test' in data_cfg: - module = module_dict[data_cfg.test.type] - else: - raise RuntimeError(f'No dataset config found in: {model_cfg}') - - return module.CLASSES - - -def build_classifier(model_files: Sequence[str], model_cfg: Union[str, - mmcv.Config], - deploy_cfg: Union[str, - mmcv.Config], device_id: int, **kwargs): - """Build classifier for different backend. - - Args: - model_files (list[str]): Input model file(s). - model_cfg (str | mmcv.Config): Input model config file or Config - object. - deploy_cfg (str | mmcv.Config): Input deployment config file or - Config object. - device_id (int): An integer represents device index. - - Returns: - DeployBaseClassifier: Classifier for a configured backend. - """ - model_cfg, deploy_cfg = load_config(model_cfg, deploy_cfg) - - backend = get_backend(deploy_cfg) - class_names = get_classes_from_config(model_cfg) - - assert backend in BACKEND_CLASSIFIER_MAP, \ - f'Unsupported backend type: {backend.value}' - model_map = BACKEND_CLASSIFIER_MAP[backend] - - model_type = 'end2end' - assert model_type in model_map, f'Unsupported model type: {model_type}' - backend_classifier_class = model_map[model_type] - - backend_detector = backend_classifier_class( - *model_files, class_names=class_names, device_id=device_id) - - return backend_detector diff --git a/mmdeploy/mmcls/apis/visualize.py b/mmdeploy/mmcls/apis/visualize.py deleted file mode 100644 index f03d1a100d..0000000000 --- a/mmdeploy/mmcls/apis/visualize.py +++ /dev/null @@ -1,32 +0,0 @@ -import numpy as np -import torch - -from mmdeploy.utils import Backend - - -def show_result(model: torch.nn.Module, - image: np.ndarray, - result: list, - output_file: str, - backend: Backend, - show: bool = True): - """Show predictions of mmcls. - - Args: - model (nn.Module): Input model which has `show_result` method. - image: (np.ndarray): Input image to draw predictions. - result (list): A list of predictions. - output_file (str): Output image file to save drawn predictions. - backend (Backend): Specifying backend type. - show (bool): Whether to show plotted image in windows. Defaults to - `True`. - - Returns: - np.ndarray: Drawn image, only if not `show` or `out_file`. - """ - pred_score = np.max(result, axis=0) - pred_label = np.argmax(result, axis=0) - result = {'pred_label': pred_label, 'pred_score': float(pred_score)} - result['pred_class'] = model.CLASSES[result['pred_label']] - return model.show_result( - image, result, show=show, win_name=backend.value, out_file=output_file) diff --git a/mmdeploy/mmcls/export/__init__.py b/mmdeploy/mmcls/export/__init__.py deleted file mode 100644 index 2aa9aa73d0..0000000000 --- a/mmdeploy/mmcls/export/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .prepare_input import (build_dataloader, build_dataset, create_input, - get_tensor_from_input) - -__all__ = [ - 'build_dataloader', 'build_dataset', 'create_input', - 'get_tensor_from_input' -] diff --git a/mmdeploy/mmcls/export/prepare_input.py b/mmdeploy/mmcls/export/prepare_input.py deleted file mode 100644 index f8f4378013..0000000000 --- a/mmdeploy/mmcls/export/prepare_input.py +++ /dev/null @@ -1,132 +0,0 @@ -import logging -from typing import Any, Optional, Sequence, Union - -import mmcv -from mmcls.datasets import build_dataloader as build_dataloader_mmcls -from mmcls.datasets import build_dataset as build_dataset_mmcls -from mmcls.datasets.pipelines import Compose -from mmcv.parallel import collate, scatter -from torch.utils.data import Dataset - -from mmdeploy.utils import Task, load_config - - -def create_input(task: Task, - model_cfg: Union[str, mmcv.Config], - imgs: Any, - input_shape: Optional[Sequence[int]] = None, - device: str = 'cuda:0'): - """Create input for classifier. - - Args: - task (Task): Specifying task type. - model_cfg (str | mmcv.Config): The input model config. - imgs (Any): Input image(s), accpeted data type are `str`, - `np.ndarray`, `torch.Tensor`. - input_shape (list[int]): A list of two integer in (width, height) - format specifying input shape. Defaults to `None`. - device (str): A string represents device type. Default is 'cuda:0'. - - Returns: - tuple: (data, img), meta information for the input image and input. - """ - assert task == Task.CLASSIFICATION - cfg = load_config(model_cfg)[0].copy() - if isinstance(imgs, str): - if cfg.data.test.pipeline[0]['type'] != 'LoadImageFromFile': - cfg.data.test.pipeline.insert(0, dict(type='LoadImageFromFile')) - data = dict(img_info=dict(filename=imgs), img_prefix=None) - else: - if cfg.data.test.pipeline[0]['type'] == 'LoadImageFromFile': - cfg.data.test.pipeline.pop(0) - data = dict(img=imgs) - # check whether input_shape is valid - if input_shape is not None: - if 'crop_size' in cfg.data.test.pipeline[2]: - crop_size = cfg.data.test.pipeline[2]['crop_size'] - if tuple(input_shape) != (crop_size, crop_size): - logging.warning( - f'`input shape` should be equal to `crop_size`: {crop_size},\ - but given: {input_shape}') - test_pipeline = Compose(cfg.data.test.pipeline) - data = test_pipeline(data) - data = collate([data], samples_per_gpu=1) - if device != 'cpu': - data = scatter(data, [device])[0] - return data, data['img'] - - -def build_dataset(dataset_cfg: Union[str, mmcv.Config], - dataset_type: str = 'val', - **kwargs): - """Build dataset for classifier. - - Args: - dataset_cfg (str | mmcv.Config): The input dataset config. - dataset_type (str): A string represents dataset type, e.g.: 'train', - 'test', 'val'. Defaults to 'val'. - - Returns: - Dataset: A PyTorch dataset. - """ - dataset_cfg = load_config(dataset_cfg)[0] - data = dataset_cfg.data - assert dataset_type in data - - dataset = build_dataset_mmcls(data[dataset_type]) - - return dataset - - -def build_dataloader(dataset: Dataset, - samples_per_gpu: int, - workers_per_gpu: int, - num_gpus: int = 1, - dist: bool = False, - shuffle: bool = False, - round_up: bool = True, - seed: Optional[int] = None, - pin_memory: bool = True, - persistent_workers: bool = True, - **kwargs): - """Build dataloader for classifier. - - Args: - dataset (Dataset): Input dataset. - samples_per_gpu (int): Number of training samples on each GPU, i.e., - batch size of each GPU. - workers_per_gpu (int): How many subprocesses to use for data loading - for each GPU. - num_gpus (int): Number of GPUs. Only used in non-distributed training. - dist (bool): Distributed training/test or not. Defaults to `False`. - shuffle (bool): Whether to shuffle the data at every epoch. - Defaults to `False`. - round_up (bool): Whether to round up the length of dataset by adding - extra samples to make it evenly divisible. Default is `True`. - seed (int): An integer set to be seed. Default is `None`. - pin_memory (bool): Whether to use pin_memory in DataLoader. - Default is `True`. - persistent_workers (bool): If `True`, the data loader will not shutdown - the worker processes after a dataset has been consumed once. - This allows to maintain the workers Dataset instances alive. - The argument also has effect in PyTorch>=1.7.0. - Default is `True`. - kwargs: Any other keyword argument to be used to initialize DataLoader. - - Returns: - DataLoader: A PyTorch dataloader. - """ - return build_dataloader_mmcls(dataset, samples_per_gpu, workers_per_gpu, - num_gpus, dist, shuffle, round_up, seed, - pin_memory, persistent_workers, **kwargs) - - -def get_tensor_from_input(input_data: tuple): - """Get input tensor from input data. - - Args: - input_data (tuple): Input data containing meta info and image tensor. - Returns: - torch.Tensor: An image in `Tensor`. - """ - return input_data['img'] diff --git a/mmdeploy/mmdet/__init__.py b/mmdeploy/mmdet/__init__.py deleted file mode 100644 index 72fcc2b119..0000000000 --- a/mmdeploy/mmdet/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .core import * # noqa: F401,F403 -from .export import * # noqa: F401,F403 -from .models import * # noqa: F401,F403 diff --git a/mmdeploy/mmdet/apis/__init__.py b/mmdeploy/mmdet/apis/__init__.py deleted file mode 100644 index 7c62082749..0000000000 --- a/mmdeploy/mmdet/apis/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .inference import build_detector -from .visualize import show_result - -__all__ = ['build_detector', 'show_result'] diff --git a/mmdeploy/mmdet/apis/inference.py b/mmdeploy/mmdet/apis/inference.py deleted file mode 100644 index 6d83c0ef11..0000000000 --- a/mmdeploy/mmdet/apis/inference.py +++ /dev/null @@ -1,977 +0,0 @@ -from functools import partial -from typing import List, Sequence, Tuple, Union - -import mmcv -import numpy as np -import torch -import torch.nn.functional as F -from mmdet.core import bbox2result -from mmdet.datasets import DATASETS -from mmdet.models import BaseDetector - -from mmdeploy.mmdet.core.post_processing import multiclass_nms -from mmdeploy.utils import (Backend, get_backend, get_mmdet_params, - get_partition_config, load_config) - - -class DeployBaseDetector(BaseDetector): - """Base Class of Wrapper for inference of detection. - - Args: - class_names (Sequence[str]): A list of string specifying class names. - device_id (int): An integer represents device index. - """ - - def __init__(self, class_names, device_id, deploy_cfg=None, **kwargs): - super(DeployBaseDetector, self).__init__() - self.CLASSES = class_names - self.device_id = device_id - self.deploy_cfg = deploy_cfg - - def simple_test(self, img, img_metas, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def aug_test(self, imgs, img_metas, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def extract_feat(self, imgs): - raise NotImplementedError('This method is not implemented.') - - def forward_train(self, imgs, img_metas, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def val_step(self, data, optimizer): - raise NotImplementedError('This method is not implemented.') - - def train_step(self, data, optimizer): - raise NotImplementedError('This method is not implemented.') - - def aforward_test(self, *, img, img_metas, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def async_simple_test(self, img, img_metas, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def __clear_outputs( - self, test_outputs: List[Union[torch.Tensor, np.ndarray]] - ) -> List[Union[List[torch.Tensor], List[np.ndarray]]]: - """Removes additional outputs and detections with zero score. - - Args: - test_outputs (List[Union[torch.Tensor, np.ndarray]]): - outputs of forward_test. - - Returns: - List[Union[List[torch.Tensor], List[np.ndarray]]]: - outputs with without zero score object. - """ - batch_size = len(test_outputs[0]) - - num_outputs = len(test_outputs) - outputs = [[None for _ in range(batch_size)] - for _ in range(num_outputs)] - - for i in range(batch_size): - inds = test_outputs[0][i, :, 4] > 0.0 - for output_id in range(num_outputs): - outputs[output_id][i] = test_outputs[output_id][i, inds, ...] - return outputs - - def __postprocessing_masks(self, - det_bboxes: np.ndarray, - det_masks: np.ndarray, - img_w: int, - img_h: int, - mask_thr_binary: float = 0.5) -> np.ndarray: - """Additional processing of masks. Resizes masks from [num_det, 28, 28] - to [num_det, img_w, img_h]. Analog of the 'mmdeploy.mmdet.models.roi_he - ads.mask_heads.fcn_mask_head._do_paste_mask' function. - - Args: - det_bboxes (np.ndarray): Bbox of shape [num_det, 5] - det_masks (np.ndarray): Masks of shape [num_det, 28, 28]. - img_w (int): Width of the original image. - img_h (int): Height of the original image. - mask_thr_binary (float): The threshold for the mask. - - Returns: - np.ndarray: masks of shape [N, num_det, img_w, img_h]. - """ - masks = det_masks - bboxes = det_bboxes - - num_det = bboxes.shape[0] - if num_det == 0: - return np.zeros((0, img_w, img_h)) - - if isinstance(masks, np.ndarray): - masks = torch.tensor(masks) - bboxes = torch.tensor(bboxes) - - result_masks = [] - for bbox, mask in zip(bboxes, masks): - - x0_int, y0_int = 0, 0 - x1_int, y1_int = img_w, img_h - - img_y = torch.arange(y0_int, y1_int, dtype=torch.float32) + 0.5 - img_x = torch.arange(x0_int, x1_int, dtype=torch.float32) + 0.5 - x0, y0, x1, y1 = bbox - - img_y = (img_y - y0) / (y1 - y0) * 2 - 1 - img_x = (img_x - x0) / (x1 - x0) * 2 - 1 - if torch.isinf(img_x).any(): - inds = torch.where(torch.isinf(img_x)) - img_x[inds] = 0 - if torch.isinf(img_y).any(): - inds = torch.where(torch.isinf(img_y)) - img_y[inds] = 0 - - gx = img_x[None, :].expand(img_y.size(0), img_x.size(0)) - gy = img_y[:, None].expand(img_y.size(0), img_x.size(0)) - grid = torch.stack([gx, gy], dim=2) - - img_masks = F.grid_sample( - mask.to(dtype=torch.float32)[None, None, :, :], - grid[None, :, :, :], - align_corners=False) - - mask = img_masks - mask = (mask >= mask_thr_binary).to(dtype=torch.bool) - result_masks.append(mask.numpy()) - result_masks = np.concatenate(result_masks, axis=1) - return result_masks.squeeze(0) - - def forward(self, img: Sequence[torch.Tensor], img_metas: Sequence[dict], - *args, **kwargs): - """Run forward inference. - - Args: - img (Sequence[torch.Tensor]): A list contains input image(s) - in [N x C x H x W] format. - img_metas (Sequence[dict]): A list of meta info for image(s). - *args: Other arguments. - **kwargs: Other key-pair arguments. - - Returns: - list: A list contains predictions. - """ - input_img = img[0].contiguous() - outputs = self.forward_test(input_img, img_metas, *args, **kwargs) - outputs = self.__clear_outputs(outputs) - batch_dets, batch_labels = outputs[:2] - batch_masks = outputs[2] if len(outputs) == 3 else None - batch_size = input_img.shape[0] - img_metas = img_metas[0] - results = [] - rescale = kwargs.get('rescale', True) - for i in range(batch_size): - dets, labels = batch_dets[i], batch_labels[i] - if rescale: - scale_factor = img_metas[i]['scale_factor'] - - if isinstance(scale_factor, (list, tuple, np.ndarray)): - assert len(scale_factor) == 4 - scale_factor = np.array(scale_factor)[None, :] # [1,4] - dets[:, :4] /= scale_factor - - if 'border' in img_metas[i]: - # offset pixel of the top-left corners between original image - # and padded/enlarged image, 'border' is used when exporting - # CornerNet and CentripetalNet to onnx - x_off = img_metas[i]['border'][2] - y_off = img_metas[i]['border'][0] - dets[:, [0, 2]] -= x_off - dets[:, [1, 3]] -= y_off - dets[:, :4] *= (dets[:, :4] > 0).astype(dets.dtype) - - dets_results = bbox2result(dets, labels, len(self.CLASSES)) - - if batch_masks is not None: - masks = batch_masks[i] - img_h, img_w = img_metas[i]['img_shape'][:2] - ori_h, ori_w = img_metas[i]['ori_shape'][:2] - export_postprocess_mask = True - if self.deploy_cfg is not None: - mmdet_deploy_cfg = get_mmdet_params(self.deploy_cfg) - # this flag enable postprocess when export. - export_postprocess_mask = mmdet_deploy_cfg.get( - 'export_postprocess_mask', True) - if not export_postprocess_mask: - masks = self.__postprocessing_masks( - dets[:, :4], masks, ori_w, ori_h) - else: - masks = masks[:, :img_h, :img_w] - # avoid to resize masks with zero dim - if rescale and masks.shape[0] != 0: - masks = masks.astype(np.float32) - masks = torch.from_numpy(masks) - masks = torch.nn.functional.interpolate( - masks.unsqueeze(0), size=(ori_h, ori_w)) - masks = masks.squeeze(0).detach().numpy() - if masks.dtype != np.bool: - masks = masks >= 0.5 - segms_results = [[] for _ in range(len(self.CLASSES))] - for j in range(len(dets)): - segms_results[labels[j]].append(masks[j]) - results.append((dets_results, segms_results)) - else: - results.append(dets_results) - return results - - -class ONNXRuntimeDetector(DeployBaseDetector): - """Wrapper for detection's inference with ONNXRuntime. - - Args: - model_file (str): The path of input model file. - class_names (Sequence[str]): A list of string specifying class names. - device_id (int): An integer represents device index. - """ - - def __init__(self, model_file: str, class_names: Sequence[str], - device_id: int, **kwargs): - super(ONNXRuntimeDetector, self).__init__(class_names, device_id, - **kwargs) - from mmdeploy.apis.onnxruntime import ORTWrapper - self.model = ORTWrapper(model_file, device_id) - - def forward_test(self, imgs: torch.Tensor, *args, **kwargs): - """Implement forward test. - - Args: - imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. - - Returns: - tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] - and class labels of shape [N, num_det]. - """ - ort_outputs = self.model({'input': imgs}) - return ort_outputs - - -class TensorRTDetector(DeployBaseDetector): - """Wrapper for detection's inference with TensorRT. - - Args: - model_file (str): The path of input model file. - class_names (Sequence[str]): A list of string specifying class names. - device_id (int): An integer represents device index. - """ - - def __init__(self, model_file: str, class_names: Sequence[str], - device_id: int, **kwargs): - super(TensorRTDetector, self).__init__(class_names, device_id, - **kwargs) - from mmdeploy.apis.tensorrt import TRTWrapper - - self.model = TRTWrapper(model_file) - self.output_names = ['dets', 'labels'] - if len(self.model.output_names) == 3: - self.output_names.append('masks') - - def forward_test(self, imgs: torch.Tensor, *args, **kwargs): - """Implement forward test. - - Args: - imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. - - Returns: - tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] and - class labels of shape [N, num_det]. - """ - with torch.cuda.device(self.device_id), torch.no_grad(): - outputs = self.model({'input': imgs}) - outputs = [outputs[name] for name in self.output_names] - outputs = [out.detach().cpu().numpy() for out in outputs] - # filtered out invalid output filled with -1 - batch_labels = outputs[1] - batch_size = batch_labels.shape[0] - inds = batch_labels.reshape(-1) != -1 - for i in range(len(outputs)): - ori_shape = outputs[i].shape - outputs[i] = outputs[i].reshape(-1, - *ori_shape[2:])[inds, ...].reshape( - batch_size, -1, *ori_shape[2:]) - return outputs - - -class PPLDetector(DeployBaseDetector): - """Wrapper for detection's inference with PPL. - - Args: - model_file (str): The path of input model file. - class_names (Sequence[str]): A list of string specifying class names. - device_id (int): An integer represents device index. - """ - - def __init__(self, model_file, class_names, device_id, **kwargs): - super(PPLDetector, self).__init__(class_names, device_id) - from mmdeploy.apis.ppl import PPLWrapper - self.model = PPLWrapper(*model_file, device_id=device_id) - - def forward_test(self, imgs: torch.Tensor, *args, **kwargs): - """Implement forward test. - - Args: - imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. - - Returns: - tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] and class - labels of shape [N, num_det]. - """ - ppl_outputs = self.model({'input': imgs}) - return ppl_outputs - - -class OpenVINODetector(DeployBaseDetector): - """Wrapper for detector's inference with OpenVINO. - - Args: - model_file (str): The path of input model file (.xml). - class_names (Sequence[str]): A list of string specifying class names. - device_id (int): An integer represents device index. - """ - - def __init__(self, model_file: str, class_names: Sequence[str], - device_id: int, **kwargs): - super(OpenVINODetector, self).__init__(class_names, device_id, - **kwargs) - from mmdeploy.apis.openvino import OpenVINOWrapper - self.model = OpenVINOWrapper(model_file) - - def forward_test(self, imgs: torch.Tensor, *args, **kwargs) -> Tuple: - """Implement forward test. - - Args: - imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. - - Returns: - If there are no masks in the output: - tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] - and class labels of shape [N, num_det]. - If the output contains masks: - tuple[np.ndarray, np.ndarray, np.ndarray]: - dets of shape [N, num_det, 5], - class labels of shape [N, num_det] and - masks of shape [N, num_det, H, W]. - """ - openvino_outputs = self.model({'input': imgs}) - output_keys = ['dets', 'labels'] - if 'masks' in openvino_outputs: - output_keys += ['masks'] - openvino_outputs = [openvino_outputs[key] for key in output_keys] - return openvino_outputs - - -class PartitionSingleStageDetector(DeployBaseDetector): - """Base wrapper for partitioned single stage detector. - - Args: - model_file (str): The path of input model file. - class_names (Sequence[str]): A list of string specifying class names. - model_cfg: (str | mmcv.Config): Input model config. - deploy_cfg: (str | mmcv.Config): Input deployment config. - device_id (int): An integer represents device index. - """ - - def __init__(self, class_names: Sequence[str], - model_cfg: Union[str, mmcv.Config], - deploy_cfg: Union[str, - mmcv.Config], device_id: int, **kwargs): - super(PartitionSingleStageDetector, - self).__init__(class_names, device_id, **kwargs) - # load cfg if necessary - deploy_cfg = load_config(deploy_cfg)[0] - model_cfg = load_config(model_cfg)[0] - - self.model_cfg = model_cfg - self.deploy_cfg = deploy_cfg - - def partition0_postprocess(self, scores: torch.Tensor, - bboxes: torch.Tensor): - """Perform post-processing for partition 0. - - Args: - scores (Tensor): The detection scores of shape - [N, num_boxes, num_classes]. - bboxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. - - Returns: - tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] and - class labels of shape [N, num_det]. - """ - cfg = self.model_cfg.model.test_cfg - deploy_cfg = self.deploy_cfg - - post_params = get_mmdet_params(deploy_cfg) - max_output_boxes_per_class = post_params.max_output_boxes_per_class - iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) - score_threshold = cfg.get('score_thr', post_params.score_threshold) - pre_top_k = -1 if post_params.pre_top_k >= bboxes.shape[1] \ - else post_params.pre_top_k - keep_top_k = cfg.get('max_per_img', post_params.keep_top_k) - ret = multiclass_nms( - bboxes, - scores, - max_output_boxes_per_class, - iou_threshold=iou_threshold, - score_threshold=score_threshold, - pre_top_k=pre_top_k, - keep_top_k=keep_top_k) - ret = [r.cpu() for r in ret] - return ret - - -class ONNXRuntimePSSDetector(PartitionSingleStageDetector): - """Wrapper for partitioned single stage detector with ONNX Runtime. - - Args: - model_file (str): The path of input model file. - class_names (Sequence[str]): A list of string specifying class names. - model_cfg: (str | mmcv.Config): Input model config. - deploy_cfg: (str | mmcv.Config): Input deployment config. - device_id (int): An integer represents device index. - """ - - def __init__(self, model_file: str, class_names: Sequence[str], - model_cfg: Union[str, mmcv.Config], - deploy_cfg: Union[str, - mmcv.Config], device_id: int, **kwargs): - super(ONNXRuntimePSSDetector, - self).__init__(class_names, model_cfg, deploy_cfg, device_id, - **kwargs) - from mmdeploy.apis.onnxruntime import ORTWrapper - self.model = ORTWrapper( - model_file, device_id, output_names=['scores', 'boxes']) - - def forward_test(self, imgs: torch.Tensor, *args, **kwargs): - """Implement forward test. - - Args: - imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. - - Returns: - tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] and - class labels of shape [N, num_det]. - """ - ort_outputs = self.model({'input': imgs}) - scores, bboxes = ort_outputs[:2] - scores = torch.from_numpy(scores).to(imgs.device) - bboxes = torch.from_numpy(bboxes).to(imgs.device) - return self.partition0_postprocess(scores, bboxes) - - -class TensorRTPSSDetector(PartitionSingleStageDetector): - """TensorRT Wrapper for partition single stage detector. - - Args: - model_file (str): Path of the engine file. - class_names (list[str] | tuple[str]): Class names of the detector. - model_cfg (str | mmcv.Config): Model config file or Config object. - deploy_cfg (str | mmcv.Config): Deployment config file or Config - object. - device_id (int): Device index, should be same as the engine. - """ - - def __init__(self, model_file: str, class_names: Sequence[str], - model_cfg: Union[str, mmcv.Config], - deploy_cfg: Union[str, - mmcv.Config], device_id: int, **kwargs): - super(TensorRTPSSDetector, - self).__init__(class_names, model_cfg, deploy_cfg, device_id, - **kwargs) - from mmdeploy.apis.tensorrt import TRTWrapper - - self.model = TRTWrapper(model_file) - self.output_names = ['scores', 'boxes'] - - def forward_test(self, imgs: torch.Tensor, *args, - **kwargs) -> Tuple[torch.Tensor, torch.Tensor]: - """Run forward test. - - Args: - imgs (torch.Tensor): The input image(s). - - Return: - tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] and - class labels of shape [N, num_det]. - """ - with torch.cuda.device(self.device_id), torch.no_grad(): - outputs = self.model({'input': imgs}) - outputs = [outputs[name] for name in self.output_names] - scores, bboxes = outputs[:2] - return self.partition0_postprocess(scores, bboxes) - - -class NCNNPSSDetector(PartitionSingleStageDetector): - """Wrapper for partitioned single stage detector with NCNN. - - Args: - model_file (str): The path of input model file. - class_names (Sequence[str]): A list of string specifying class names. - model_cfg: (str | mmcv.Config): Input model config. - deploy_cfg: (str | mmcv.Config): Input deployment config. - device_id (int): An integer represents device index. - """ - - def __init__(self, model_file: str, class_names: Sequence[str], - model_cfg: Union[str, mmcv.Config], - deploy_cfg: Union[str, - mmcv.Config], device_id: int, **kwargs): - super(NCNNPSSDetector, self).__init__(class_names, model_cfg, - deploy_cfg, device_id, **kwargs) - from mmdeploy.apis.ncnn import NCNNWrapper - assert len(model_file) == 2 - ncnn_param_file = model_file[0] - ncnn_bin_file = model_file[1] - self.model = NCNNWrapper( - ncnn_param_file, ncnn_bin_file, output_names=['boxes', 'scores']) - - def forward_test(self, imgs: torch.Tensor, *args, **kwargs): - """Run forward test. - - Args: - imgs (torch.Tensor): The input image(s). - - Return: - tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] and - class labels of shape [N, num_det]. - """ - outputs = self.model({'input': imgs}) - boxes = outputs['boxes'] - scores = outputs['scores'] - return self.partition0_postprocess(scores, boxes) - - -class PartitionTwoStageDetector(DeployBaseDetector): - """Base wrapper for partitioned two stage detector. - - Args: - class_names (Sequence[str]): A list of string specifying class names. - model_cfg: (str | mmcv.Config): Input model config. - deploy_cfg: (str | mmcv.Config): Input deployment config. - device_id (int): An integer represents device index. - """ - - def __init__(self, class_names: Sequence[str], - model_cfg: Union[str, mmcv.Config], - deploy_cfg: Union[str, - mmcv.Config], device_id: int, **kwargs): - super(PartitionTwoStageDetector, - self).__init__(class_names, device_id, **kwargs) - from mmdet.models.builder import build_head, build_roi_extractor - - from mmdeploy.mmdet.models.roi_heads.bbox_heads import \ - bbox_head__get_bboxes - - # load cfg if necessary - deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) - - self.model_cfg = model_cfg - self.deploy_cfg = deploy_cfg - - self.bbox_roi_extractor = build_roi_extractor( - model_cfg.model.roi_head.bbox_roi_extractor) - self.bbox_head = build_head(model_cfg.model.roi_head.bbox_head) - - class Context: - pass - - ctx = Context() - ctx.cfg = self.deploy_cfg - self.bbox_head__get_bboxes = partial(bbox_head__get_bboxes, ctx) - - def partition0_postprocess(self, x: Sequence[torch.Tensor], - scores: torch.Tensor, bboxes: torch.Tensor): - """Perform post-processing for partition 0. - - Args: - x (tuple[Tensor]): Feature maps of all scale levels. - scores (Tensor): The detection scores of shape - [N, num_boxes, num_classes]. - bboxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]. - - Returns: - tuple(Tensor, Tensor): rois and bbox_feats. - """ - # rpn-nms + roi-extractor - cfg = self.model_cfg.model.test_cfg.rpn - deploy_cfg = self.deploy_cfg - - post_params = get_mmdet_params(deploy_cfg) - iou_threshold = cfg.nms.get('iou_threshold', post_params.iou_threshold) - score_threshold = cfg.get('score_thr', post_params.score_threshold) - pre_top_k = -1 if post_params.pre_top_k >= bboxes.shape[1] \ - else post_params.pre_top_k - keep_top_k = cfg.get('max_per_img', post_params.keep_top_k) - # only one class in rpn - max_output_boxes_per_class = keep_top_k - proposals, _ = multiclass_nms( - bboxes, - scores, - max_output_boxes_per_class, - iou_threshold=iou_threshold, - score_threshold=score_threshold, - pre_top_k=pre_top_k, - keep_top_k=keep_top_k) - - rois = proposals - batch_index = torch.arange( - rois.shape[0], device=rois.device).float().view(-1, 1, 1).expand( - rois.size(0), rois.size(1), 1) - rois = torch.cat([batch_index, rois[..., :4]], dim=-1) - batch_size = rois.shape[0] - num_proposals_per_img = rois.shape[1] - - # Eliminate the batch dimension - rois = rois.view(-1, 5) - bbox_feats = self.bbox_roi_extractor( - x[:self.bbox_roi_extractor.num_inputs], rois) - - rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1)) - return rois, bbox_feats - - def partition1_postprocess(self, rois: torch.Tensor, - cls_score: torch.Tensor, - bbox_pred: torch.Tensor, - img_metas: Sequence[dict]): - """Perform post-processing for partition 1. - Args: - rois (torch.Tensor): Input tensor of roi. - cls_score (torch.Tensor): Scores of all classes. - bbox_pred (torch.Tensor): Bounding box proposals. - img_metas (Sequence[dict]): A list of image(s) meta information. - - Returns: - tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] and class - labels of shape [N, num_det]. - """ - batch_size = rois.shape[0] - num_proposals_per_img = rois.shape[1] - - cls_score = cls_score.reshape(batch_size, num_proposals_per_img, - cls_score.size(-1)) - - bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, - bbox_pred.size(-1)) - - rcnn_test_cfg = self.model_cfg.model.test_cfg.rcnn - return self.bbox_head__get_bboxes(self.bbox_head, rois, cls_score, - bbox_pred, - img_metas[0][0]['img_shape'], - rcnn_test_cfg) - - -class ONNXRuntimePTSDetector(PartitionTwoStageDetector): - """Wrapper for partitioned two stage detector with ONNX Runtime. - - Args: - model_file (Sequence[str]): A list of paths of input model files. - class_names (Sequence[str]): A list of string specifying class names. - model_cfg: (str | mmcv.Config): Input model config. - deploy_cfg: (str | mmcv.Config): Input deployment config. - device_id (int): An integer represents device index. - """ - - def __init__(self, model_file: Sequence[str], class_names: Sequence[str], - model_cfg: Union[str, mmcv.Config], - deploy_cfg: Union[str, - mmcv.Config], device_id: int, **kwargs): - super(ONNXRuntimePTSDetector, - self).__init__(class_names, model_cfg, deploy_cfg, device_id, - **kwargs) - from mmdeploy.apis.onnxruntime import ORTWrapper - self.model_list = [ - ORTWrapper(file, device_id=device_id) for file in model_file - ] - num_partition0_outputs = len(self.model_list[0].output_names) - num_feat = num_partition0_outputs - 2 - self.model_list[0].output_names = [ - 'feat/{}'.format(i) for i in range(num_feat) - ] + ['scores', 'boxes'] - self.model_list[1].output_names = ['cls_score', 'bbox_pred'] - - def forward_test(self, imgs: torch.Tensor, img_metas: Sequence[dict], - *args, **kwargs): - """Implement forward test. - - Args: - imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. - img_metas (Sequence[dict]): A list of image(s) meta information. - - Returns: - tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] and class - labels of shape [N, num_det]. - """ - ort_outputs = self.model_list[0]({'input': imgs}) - feats = ort_outputs[:-2] - scores, bboxes = ort_outputs[-2:] - feats = [torch.from_numpy(feat).to(imgs.device) for feat in feats] - scores = torch.from_numpy(scores).to(imgs.device) - bboxes = torch.from_numpy(bboxes).to(imgs.device) - - # partition0_postprocess - rois, bbox_feats = self.partition0_postprocess(feats, scores, bboxes) - - # partition1 - ort_outputs = self.model_list[1]({'bbox_feats': bbox_feats}) - cls_score, bbox_pred = ort_outputs[:2] - cls_score = torch.from_numpy(cls_score).to(imgs.device) - bbox_pred = torch.from_numpy(bbox_pred).to(imgs.device) - - # partition1_postprocess - return self.partition1_postprocess(rois, cls_score, bbox_pred, - img_metas) - - -class TensorRTPTSDetector(PartitionTwoStageDetector): - """Wrapper for partitioned two stage detector with TensorRT. - - Args: - model_file (Sequence[str]): A list of paths of input model files. - class_names (Sequence[str]): A list of string specifying class names. - model_cfg: (str | mmcv.Config): Input model config. - deploy_cfg: (str | mmcv.Config): Input deployment config. - device_id (int): An integer represents device index. - """ - - def __init__(self, model_file: Sequence[str], class_names: Sequence[str], - model_cfg: Union[str, mmcv.Config], - deploy_cfg: Union[str, - mmcv.Config], device_id: int, **kwargs): - super(TensorRTPTSDetector, - self).__init__(class_names, model_cfg, deploy_cfg, device_id, - **kwargs) - - from mmdeploy.apis.tensorrt import TRTWrapper - - model_list = [] - for m_file in model_file: - model = TRTWrapper(m_file) - model_list.append(model) - - self.model_list = model_list - - output_names_list = [] - num_partition0_outputs = len(model_list[0].output_names) - num_feat = num_partition0_outputs - 2 - output_names_list.append( - ['feat/{}'.format(i) - for i in range(num_feat)] + ['scores', 'boxes']) # partition0 - output_names_list.append(['cls_score', 'bbox_pred']) # partition1 - self.output_names_list = output_names_list - - def forward_test(self, imgs: torch.Tensor, img_metas: Sequence[dict], - *args, **kwargs): - """Implement forward test. - - Args: - imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. - img_metas (Sequence[dict]): A list of image(s) meta information. - - Returns: - tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] and - class labels of shape [N, num_det]. - """ - with torch.cuda.device(self.device_id), torch.no_grad(): - outputs = self.model_list[0]({'input': imgs}) - outputs = [outputs[name] for name in self.output_names_list[0]] - feats = outputs[:-2] - scores, bboxes = outputs[-2:] - - # partition0_postprocess - rois, bbox_feats = self.partition0_postprocess(feats, scores, bboxes) - - # partition1 forward - bbox_feats = bbox_feats.contiguous() - with torch.cuda.device(self.device_id), torch.no_grad(): - outputs = self.model_list[1]({'bbox_feats': bbox_feats}) - outputs = [outputs[name] for name in self.output_names_list[1]] - cls_score, bbox_pred = outputs[:2] - - # partition1_postprocess - outputs = self.partition1_postprocess(rois, cls_score, bbox_pred, - img_metas) - outputs = [out.detach().cpu() for out in outputs] - return outputs - - -class NCNNPTSDetector(PartitionTwoStageDetector): - """Wrapper for partitioned two stage detector with NCNN. - - Args: - model_file (Sequence[str]): A list of paths of input model files. - class_names (Sequence[str]): A list of string specifying class names. - model_cfg: (str | mmcv.Config): Input model config. - deploy_cfg: (str | mmcv.Config): Input deployment config. - device_id (int): An integer represents device index. - """ - - def __init__(self, model_file: Sequence[str], class_names: Sequence[str], - model_cfg: Union[str, mmcv.Config], - deploy_cfg: Union[str, - mmcv.Config], device_id: int, **kwargs): - super(NCNNPTSDetector, self).__init__(class_names, model_cfg, - deploy_cfg, device_id, **kwargs) - from mmdeploy.apis.ncnn import NCNNWrapper - assert self.device_id == -1 - assert len(model_file) == 4 - - model_list = [] - for ncnn_param_file, ncnn_bin_file in zip(model_file[::2], - model_file[1::2]): - model = NCNNWrapper(ncnn_param_file, ncnn_bin_file) - model_list.append(model) - - model_cfg = load_config(model_cfg)[0] - num_output_stage1 = model_cfg['model']['neck']['num_outs'] - - output_names_list = [] - output_names_list.append( - ['feat/{}'.format(i) - for i in range(num_output_stage1)] + ['scores', 'boxes']) - output_names_list.append(['cls_score', 'bbox_pred']) - - model_list[0].set_output_names(output_names_list[0]) - model_list[1].set_output_names(output_names_list[1]) - - self.model_list = model_list - self.output_names_list = output_names_list - - def forward_test(self, imgs: torch.Tensor, img_metas: Sequence[dict], - *args, **kwargs): - """Implement forward test. - - Args: - imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. - img_metas (Sequence[dict]): A list of image(s) meta information. - - Returns: - tuple[np.ndarray, np.ndarray]: dets of shape [N, num_det, 5] and - class labels of shape [N, num_det]. - """ - # stage0 forward - out_stage0 = self.model_list[0]({'input': imgs}) - - outputs = [] - for name in self.output_names_list[0]: - out = out_stage0[name] - outputs.append(out) - feats = outputs[:-2] - scores, bboxes = outputs[-2:] - - # stage0_postprocess - rois, bbox_feats = self.partition0_postprocess(feats, scores, bboxes) - - # stage1 forward - out_stage1 = self.model_list[1]({'bbox_feats': bbox_feats}) - cls_score = out_stage1['cls_score'] - bbox_pred = out_stage1['bbox_pred'] - - # stage1_postprocess - outputs = self.partition1_postprocess(rois, cls_score, bbox_pred, - img_metas) - outputs = [out.detach().cpu() for out in outputs] - return outputs - - -def get_classes_from_config(model_cfg: Union[str, mmcv.Config], **kwargs): - """Get class name from config. - - Args: - model_cfg (str | mmcv.Config): Input model config file or - Config object. - - Returns: - list[str]: A list of string specifying names of different class. - """ - # load cfg if necessary - model_cfg = load_config(model_cfg)[0] - module_dict = DATASETS.module_dict - data_cfg = model_cfg.data - - if 'test' in data_cfg: - module = module_dict[data_cfg.test.type] - elif 'val' in data_cfg: - module = module_dict[data_cfg.val.type] - elif 'train' in data_cfg: - module = module_dict[data_cfg.train.type] - else: - raise RuntimeError(f'No dataset config found in: {model_cfg}') - - return module.CLASSES - - -ONNXRUNTIME_DETECTOR_MAP = dict( - end2end=ONNXRuntimeDetector, - single_stage=ONNXRuntimePSSDetector, - two_stage=ONNXRuntimePTSDetector) - -TENSORRT_DETECTOR_MAP = dict( - end2end=TensorRTDetector, - single_stage=TensorRTPSSDetector, - two_stage=TensorRTPTSDetector) - -PPL_DETECTOR_MAP = dict(end2end=PPLDetector) - -NCNN_DETECTOR_MAP = dict( - single_stage=NCNNPSSDetector, two_stage=NCNNPTSDetector) - -OPENVINO_MAP = dict(end2end=OpenVINODetector) - -BACKEND_DETECTOR_MAP = { - Backend.ONNXRUNTIME: ONNXRUNTIME_DETECTOR_MAP, - Backend.TENSORRT: TENSORRT_DETECTOR_MAP, - Backend.PPL: PPL_DETECTOR_MAP, - Backend.NCNN: NCNN_DETECTOR_MAP, - Backend.OPENVINO: OPENVINO_MAP -} - - -def build_detector(model_files: Sequence[str], model_cfg: Union[str, - mmcv.Config], - deploy_cfg: Union[str, - mmcv.Config], device_id: int, **kwargs): - """Build detector for different backend. - - Args: - model_files (list[str]): Input model file(s). - model_cfg (str | mmcv.Config): Input model config file or Config - object. - deploy_cfg (str | mmcv.Config): Input deployment config file or - Config object. - device_id (int): An integer represents device index. - - Returns: - DeployBaseDetector: Detector for a configured backend. - """ - # load cfg if necessary - deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) - - backend = get_backend(deploy_cfg) - class_names = get_classes_from_config(model_cfg) - - assert backend in BACKEND_DETECTOR_MAP, \ - f'Unsupported backend type: {backend.value}' - detector_map = BACKEND_DETECTOR_MAP[backend] - - partition_type = 'end2end' - partition_config = get_partition_config(deploy_cfg) - if partition_config is not None: - partition_type = partition_config.get('type', None) - - assert partition_type in detector_map,\ - f'Unsupported partition type: {partition_type}' - backend_detector_class = detector_map[partition_type] - - model_files = model_files[0] if len(model_files) == 1 else model_files - backend_detector = backend_detector_class( - model_file=model_files, - class_names=class_names, - device_id=device_id, - model_cfg=model_cfg, - deploy_cfg=deploy_cfg, - **kwargs) - - return backend_detector diff --git a/mmdeploy/mmdet/apis/visualize.py b/mmdeploy/mmdet/apis/visualize.py deleted file mode 100644 index b42f4dc750..0000000000 --- a/mmdeploy/mmdet/apis/visualize.py +++ /dev/null @@ -1,34 +0,0 @@ -import numpy as np - -from mmdeploy.utils import Backend - - -def show_result(model, - image: np.ndarray, - result: list, - output_file: str, - backend: Backend, - show: bool = True, - score_thr: float = 0.3): - """Show predictions of detection. - - Args: - model (nn.Module): Input model which has `show_result` method. - image: (np.ndarray): Input image to draw predictions. - result (list): A list of predictions. - output_file (str): Output image file to save drawn predictions. - backend (Backend): Specifying backend type. - show (bool): Whether to show plotted image in windows. Defaults to - `True`. - score_thr (float): Score threshold for detection, defaults to `0.3`. - - Returns: - np.ndarray: Drawn image, only if not `show` or `out_file`. - """ - return model.show_result( - image, - result, - score_thr=score_thr, - show=show, - win_name=backend.value, - out_file=output_file) diff --git a/mmdeploy/mmdet/core/bbox/__init__.py b/mmdeploy/mmdet/core/bbox/__init__.py deleted file mode 100644 index 7a66740087..0000000000 --- a/mmdeploy/mmdet/core/bbox/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .coder import * # noqa: F401,F403 -from .transforms import distance2bbox # noqa: F401,F403 diff --git a/mmdeploy/mmdet/core/bbox/coder/__init__.py b/mmdeploy/mmdet/core/bbox/coder/__init__.py deleted file mode 100644 index fb83388204..0000000000 --- a/mmdeploy/mmdet/core/bbox/coder/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .delta_xywh_bbox_coder import * # noqa: F401,F403 -from .tblr_bbox_coder import * # noqa: F401, F403 diff --git a/mmdeploy/mmdet/export/__init__.py b/mmdeploy/mmdet/export/__init__.py deleted file mode 100644 index 1092eaeaac..0000000000 --- a/mmdeploy/mmdet/export/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from .model_partition import get_partition_cfg -from .onnx_utils import clip_bboxes -from .prepare_input import (build_dataloader, build_dataset, create_input, - get_tensor_from_input) -from .tensorrt_helper import pad_with_value - -__all__ = [ - 'get_partition_cfg', 'clip_bboxes', 'create_input', 'build_dataloader', - 'build_dataset', 'get_tensor_from_input', 'pad_with_value' -] diff --git a/mmdeploy/mmdet/export/onnx_utils.py b/mmdeploy/mmdet/export/onnx_utils.py deleted file mode 100644 index f08bd39f12..0000000000 --- a/mmdeploy/mmdet/export/onnx_utils.py +++ /dev/null @@ -1,47 +0,0 @@ -from typing import Sequence, Union - -import torch -from torch import Tensor - - -def clip_bboxes(x1: Tensor, y1: Tensor, x2: Tensor, y2: Tensor, - max_shape: Union[Tensor, Sequence[int]]): - """Clip bboxes for onnx. - - Since torch.clamp cannot have dynamic `min` and `max`, we scale the - boxes by 1/max_shape and clamp in the range [0, 1] if necessary. - - Args: - x1 (Tensor): The x1 for bounding boxes. - y1 (Tensor): The y1 for bounding boxes. - x2 (Tensor): The x2 for bounding boxes. - y2 (Tensor): The y2 for bounding boxes. - max_shape (Tensor | Sequence[int]): The (H,W) of original image. - Returns: - tuple(Tensor): The clipped x1, y1, x2, y2. - """ - assert len(max_shape) == 2, '`max_shape` should be [h, w]' - if isinstance(max_shape, torch.Tensor): - # scale by 1/max_shape - x1 = x1 / max_shape[1] - y1 = y1 / max_shape[0] - x2 = x2 / max_shape[1] - y2 = y2 / max_shape[0] - - # clamp [0, 1] - x1 = torch.clamp(x1, 0, 1) - y1 = torch.clamp(y1, 0, 1) - x2 = torch.clamp(x2, 0, 1) - y2 = torch.clamp(y2, 0, 1) - - # scale back - x1 = x1 * max_shape[1] - y1 = y1 * max_shape[0] - x2 = x2 * max_shape[1] - y2 = y2 * max_shape[0] - else: - x1 = torch.clamp(x1, 0, max_shape[1]) - y1 = torch.clamp(y1, 0, max_shape[0]) - x2 = torch.clamp(x2, 0, max_shape[1]) - y2 = torch.clamp(y2, 0, max_shape[0]) - return x1, y1, x2, y2 diff --git a/mmdeploy/mmdet/export/prepare_input.py b/mmdeploy/mmdet/export/prepare_input.py deleted file mode 100644 index 2d2f473c16..0000000000 --- a/mmdeploy/mmdet/export/prepare_input.py +++ /dev/null @@ -1,162 +0,0 @@ -from typing import Any, Dict, Optional, Sequence, Union - -import mmcv -import numpy as np -from mmcv.parallel import collate, scatter -from mmdet.datasets import build_dataloader as build_dataloader_mmdet -from mmdet.datasets import build_dataset as build_dataset_mmdet -from mmdet.datasets import replace_ImageToTensor -from mmdet.datasets.pipelines import Compose -from torch.utils.data import Dataset - -from mmdeploy.utils import Task, load_config - - -def create_input(task: Task, - model_cfg: Union[str, mmcv.Config], - imgs: Any, - input_shape: Sequence[int] = None, - device: str = 'cuda:0'): - """Create input for detector. - - Args: - task (Task): Specifying task type. - model_cfg (str | mmcv.Config): The input model config. - imgs (Any): Input image(s), accpeted data type are `str`, - `np.ndarray`, `torch.Tensor`. - input_shape (list[int]): A list of two integer in (width, height) - format specifying input shape. Defaults to `None`. - device (str): A string represents device type. Default is 'cuda:0'. - - Returns: - tuple: (data, img), meta information for the input image and input. - """ - assert task == Task.OBJECT_DETECTION - cfg = load_config(model_cfg)[0].copy() - - if not isinstance(imgs, (list, tuple)): - imgs = [imgs] - - if isinstance(imgs[0], np.ndarray): - cfg = cfg.copy() - # set loading pipeline type - cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' - # for static exporting - if input_shape is not None: - cfg.data.test.pipeline[1]['img_scale'] = tuple(input_shape) - transforms = cfg.data.test.pipeline[1]['transforms'] - for trans in transforms: - trans_type = trans['type'] - if trans_type == 'Resize': - trans['keep_ratio'] = False - elif trans_type == 'Pad': - trans['size_divisor'] = 1 - - cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) - test_pipeline = Compose(cfg.data.test.pipeline) - data_list = [] - for img in imgs: - # prepare data - if isinstance(img, np.ndarray): - # directly add img - data = dict(img=img) - else: - # add information into dict - data = dict(img_info=dict(filename=img), img_prefix=None) - # build the data pipeline - data = test_pipeline(data) - data_list.append(data) - - data = collate(data_list, samples_per_gpu=len(imgs)) - - data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']] - data['img'] = [img.data[0] for img in data['img']] - if device != 'cpu': - data = scatter(data, [device])[0] - - return data, data['img'] - - -def build_dataset(dataset_cfg: Union[str, mmcv.Config], - dataset_type: str = 'val', - **kwargs): - """Build dataset for detection. - - Args: - dataset_cfg (str | mmcv.Config): The input dataset config. - dataset_type (str): A string represents dataset type, e.g.: 'train', - 'test', 'val'. Defaults to 'val'. - - Returns: - Dataset: A PyTorch dataset. - """ - dataset_cfg = load_config(dataset_cfg)[0].copy() - - assert dataset_type in dataset_cfg.data - data_cfg = dataset_cfg.data[dataset_type] - # in case the dataset is concatenated - if isinstance(data_cfg, dict): - data_cfg.test_mode = True - samples_per_gpu = data_cfg.get('samples_per_gpu', 1) - if samples_per_gpu > 1: - # Replace 'ImageToTensor' to 'DefaultFormatBundle' - data_cfg.pipeline = replace_ImageToTensor(data_cfg.pipeline) - elif isinstance(data_cfg, list): - for ds_cfg in data_cfg: - ds_cfg.test_mode = True - samples_per_gpu = max( - [ds_cfg.get('samples_per_gpu', 1) for ds_cfg in data_cfg]) - if samples_per_gpu > 1: - for ds_cfg in data_cfg: - ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline) - dataset = build_dataset_mmdet(data_cfg) - - return dataset - - -def build_dataloader(dataset: Dataset, - samples_per_gpu: int, - workers_per_gpu: int, - num_gpus: int = 1, - dist: bool = False, - shuffle: bool = False, - seed: Optional[int] = None, - **kwargs): - """Build dataloader for detection. - - Args: - dataset (Dataset): Input dataset. - samples_per_gpu (int): Number of training samples on each GPU, i.e., - batch size of each GPU. - workers_per_gpu (int): How many subprocesses to use for data loading - for each GPU. - num_gpus (int): Number of GPUs. Only used in non-distributed training. - dist (bool): Distributed training/test or not. Defaults to `False`. - shuffle (bool): Whether to shuffle the data at every epoch. - Defaults to `False`. - seed (int): An integer set to be seed. Default is `None`. - kwargs: Any other keyword argument to be used to initialize DataLoader. - - Returns: - DataLoader: A PyTorch dataloader. - """ - return build_dataloader_mmdet( - dataset, - samples_per_gpu, - workers_per_gpu, - num_gpus=num_gpus, - dist=dist, - shuffle=shuffle, - seed=seed, - **kwargs) - - -def get_tensor_from_input(input_data: Dict[str, Any]): - """Get input tensor from input data. - - Args: - input_data (dict): Input data containing meta info and image tensor. - Returns: - torch.Tensor: An image in `Tensor`. - """ - return input_data['img'][0] diff --git a/mmdeploy/mmdet/export/tensorrt_helper.py b/mmdeploy/mmdet/export/tensorrt_helper.py deleted file mode 100644 index 0b7dc0871c..0000000000 --- a/mmdeploy/mmdet/export/tensorrt_helper.py +++ /dev/null @@ -1,35 +0,0 @@ -from typing import Any, Optional - -import torch -from torch import Tensor - - -def pad_with_value(x: Tensor, - pad_dim: int, - pad_size: int, - pad_value: Optional[Any] = None): - """Pad a tensor with a value along some dim. - - Args: - x (Tensor): Input tensor. - pad_dim (int): Along which dim to pad. - pad_size (int): To which size to pad. - pad_value (Any): Filled value for padding. Defaults to `None`. - - Returns: - Tensor: Padded tensor. - """ - num_dims = len(x.shape) - pad_slice = (slice(None, None, None), ) * num_dims - pad_slice = pad_slice[:pad_dim] + (slice(0, 1, - 1), ) + pad_slice[pad_dim + 1:] - repeat_size = [1] * num_dims - repeat_size[pad_dim] = pad_size - - x_pad = x.__getitem__(pad_slice) - if pad_value is not None: - x_pad = x_pad * 0 + pad_value - - x_pad = x_pad.repeat(*repeat_size) - x = torch.cat([x, x_pad], dim=pad_dim) - return x diff --git a/mmdeploy/mmdet/models/dense_heads/__init__.py b/mmdeploy/mmdet/models/dense_heads/__init__.py deleted file mode 100644 index 467b387b28..0000000000 --- a/mmdeploy/mmdet/models/dense_heads/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -from .anchor_head import anchor_head__get_bboxes -from .atss_head import atss_head__get_bboxes -from .fcos_head import fcos_head__get_bboxes -from .fovea_head import fovea_head__get_bboxes -from .rpn_head import rpn_head__get_bboxes -from .vfnet_head import vfnet_head__get_bboxes -from .yolo_head import yolov3_head__get_bboxes, yolov3_head__get_bboxes__ncnn -from .yolox_head import yolox_head__get_bboxes - -__all__ = [ - 'anchor_head__get_bboxes', 'atss_head__get_bboxes', - 'fcos_head__get_bboxes', 'fovea_head__get_bboxes', 'rpn_head__get_bboxes', - 'vfnet_head__get_bboxes', 'yolov3_head__get_bboxes', - 'yolov3_head__get_bboxes__ncnn', 'yolox_head__get_bboxes' -] diff --git a/mmdeploy/mmdet/models/detectors/__init__.py b/mmdeploy/mmdet/models/detectors/__init__.py deleted file mode 100644 index d206d63afe..0000000000 --- a/mmdeploy/mmdet/models/detectors/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -from .base import base_detector__forward -from .rpn import rpn__simple_test -from .single_stage import single_stage__simple_test -from .two_stage import two_stage__extract_feat - -__all__ = [ - 'single_stage__simple_test', 'two_stage__extract_feat', - 'base_detector__forward', 'rpn__simple_test' -] diff --git a/mmdeploy/mmdet/models/roi_heads/__init__.py b/mmdeploy/mmdet/models/roi_heads/__init__.py deleted file mode 100644 index b5ac1221bf..0000000000 --- a/mmdeploy/mmdet/models/roi_heads/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .bbox_heads import * # noqa: F401, F403 -from .cascade_roi_head import * # noqa: F401, F403 -from .mask_heads import * # noqa: F401, F403 -from .roi_extractors import * # noqa: F401, F403 -from .standard_roi_head import * # noqa: F401, F403 -from .test_mixins import * # noqa: F401, F403 diff --git a/mmdeploy/mmdet/models/roi_heads/bbox_heads/__init__.py b/mmdeploy/mmdet/models/roi_heads/bbox_heads/__init__.py deleted file mode 100644 index 7b7bdf48c9..0000000000 --- a/mmdeploy/mmdet/models/roi_heads/bbox_heads/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .bbox_head import bbox_head__get_bboxes - -__all__ = ['bbox_head__get_bboxes'] diff --git a/mmdeploy/mmdet/models/roi_heads/mask_heads/__init__.py b/mmdeploy/mmdet/models/roi_heads/mask_heads/__init__.py deleted file mode 100644 index 7bafcfc008..0000000000 --- a/mmdeploy/mmdet/models/roi_heads/mask_heads/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .fcn_mask_head import fcn_mask_head__get_seg_masks - -__all__ = ['fcn_mask_head__get_seg_masks'] diff --git a/mmdeploy/mmdet/models/roi_heads/roi_extractors/__init__.py b/mmdeploy/mmdet/models/roi_heads/roi_extractors/__init__.py deleted file mode 100644 index 105e406d1d..0000000000 --- a/mmdeploy/mmdet/models/roi_heads/roi_extractors/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .single_level_roi_extractor import ( - single_roi_extractor__forward, single_roi_extractor__forward__openvino, - single_roi_extractor__forward__tensorrt) - -__all__ = [ - 'single_roi_extractor__forward', 'single_roi_extractor__forward__openvino', - 'single_roi_extractor__forward__tensorrt' -] diff --git a/mmdeploy/mmedit/apis/__init__.py b/mmdeploy/mmedit/apis/__init__.py deleted file mode 100644 index 61a020cfa7..0000000000 --- a/mmdeploy/mmedit/apis/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .inference import build_editing_processor -from .visualize import show_result - -__all__ = ['build_editing_processor', 'show_result'] diff --git a/mmdeploy/mmedit/apis/inference.py b/mmdeploy/mmedit/apis/inference.py deleted file mode 100644 index c675574f1b..0000000000 --- a/mmdeploy/mmedit/apis/inference.py +++ /dev/null @@ -1,314 +0,0 @@ -import warnings -from typing import Optional, Sequence, Union - -import mmcv -import numpy as np -import torch -from mmedit.core import psnr, ssim, tensor2img -from mmedit.models import BaseModel - -from mmdeploy.utils.config_utils import Backend, get_backend, load_config - - -class DeployBaseRestorer(BaseModel): - """Base Class of Wrapper for restorer's inference. - - Args: - device_id (int): An integer represents device index. - test_cfg (mmcv.Config) : The test config in model config, which is used - in evaluation. Defaults to `None`. - """ - - allowed_metrics = {'PSNR': psnr, 'SSIM': ssim} - - def __init__(self, - device_id: int, - test_cfg: Optional[mmcv.Config] = None, - **kwargs): - super(DeployBaseRestorer, self).__init__(**kwargs) - self.test_cfg = test_cfg - self.device_id = device_id - - def init_weights(self): - raise NotImplementedError('This method is not implemented.') - - def forward(self, lq: torch.Tensor, test_mode: bool = False, **kwargs): - """Run test inference for restorer. - - We want forward() to output an image or a evaluation result. - When test_mode is set, the output is evaluation result. Otherwise - it is an image. - - Args: - lq (torch.Tensor): The input low-quality image of the model. - test_mode (bool): When test_mode is set, the output is evaluation - result. Otherwise it is an image. Default to `False`. - - Returns: - torch.Tensor | dict: High resolution image or a evaluation results. - """ - - if test_mode: - return self.forward_test(lq, **kwargs) - else: - return self.forward_dummy(lq, **kwargs) - - def forward_train(self, imgs, labels): - raise NotImplementedError('This method is not implemented.') - - def forward_test(self, - lq: torch.Tensor, - gt: Optional[torch.Tensor] = None, - **kwargs): - """Run inference for restorer to generate evaluation result. - - Args: - lq (torch.Tensor): The input low-quality image of the model. - gt (torch.Tensor): The ground truth of input image. Defaults to - `None`. - - Returns: - dict: Evaluation results. - """ - outputs = self.forward_dummy(lq) - result = self._test_post_process(outputs, lq, gt) - return result - - def train_step(self, data_batch, optimizer): - raise NotImplementedError('This method is not implemented.') - - def evaluate(self, output: torch.Tensor, gt: torch.Tensor): - """Evaluation function implemented in mmedit. - - Args: - output (torch.Tensor): Model output with shape (n, c, h, w). - gt (torch.Tensor): GT Tensor with shape (n, c, h, w). - - Returns: - dict: Evaluation results. - """ - crop_border = self.test_cfg.crop_border - - if isinstance(output, np.ndarray): - output = torch.from_numpy(output) - output = tensor2img(output) - gt = tensor2img(gt) - - eval_result = dict() - for metric in self.test_cfg.metrics: - eval_result[metric] = self.allowed_metrics[metric](output, gt, - crop_border) - return eval_result - - def _test_post_process(self, - outputs: torch.Tensor, - lq: torch.Tensor, - gt: Optional[torch.Tensor] = None): - """Get evaluation results by post-processing model outputs. - - Args: - output (torch.Tensor) : The output high resolution image. - lq (torch.Tensor): The input low-quality image of the model. - gt (torch.Tensor): The ground truth of input image, default is - `None`. - - Returns: - dict: Evaluation results. - """ - if self.test_cfg is not None and self.test_cfg.get('metrics', None): - assert gt is not None, ( - 'evaluation with metrics must have gt images.') - results = dict(eval_result=self.evaluate(outputs, gt)) - else: - results = dict(lq=lq.cpu(), output=outputs) - if gt is not None: - results['gt'] = gt.cpu() - - return results - - -class ONNXRuntimeRestorer(DeployBaseRestorer): - """Wrapper for restorer's inference with ONNXRuntime. - - Args: - model_file (str): The path of an input model file. - device_id (int): An integer represents device index. - test_cfg (mmcv.Config) : The test config in model config, which is - used in evaluation. Defaults to `None`. - """ - - def __init__(self, - model_file: str, - device_id: int, - test_cfg: Optional[mmcv.Config] = None, - **kwargs): - super(ONNXRuntimeRestorer, self).__init__( - device_id, test_cfg=test_cfg, **kwargs) - - from mmdeploy.apis.onnxruntime import ORTWrapper - self.model = ORTWrapper(model_file, device_id) - - def forward_dummy(self, lq: torch.Tensor, *args, **kwargs): - """Run test inference for restorer with ONNXRuntime. - - Args: - lq (torch.Tensor): The input low-quality image of the model. - - Returns: - list[np.ndarray] : High resolution image. - """ - ort_outputs = self.model({'input': lq}) - # only concern pred_alpha value - if isinstance(ort_outputs, (tuple, list)): - ort_outputs = ort_outputs[0] - return ort_outputs - - -class TensorRTRestorer(DeployBaseRestorer): - """Wrapper for restorer's inference with TensorRT. - - Args: - trt_file (str): The path of an input model file. - device_id (int): An integer represents device index. - test_cfg (mmcv.Config) : The test config in model config, which is - used in evaluation. - """ - - def __init__(self, - trt_file: str, - device_id: int, - test_cfg: Optional[mmcv.Config] = None, - **kwargs): - super(TensorRTRestorer, self).__init__( - device_id, test_cfg=test_cfg, **kwargs) - - from mmdeploy.apis.tensorrt import TRTWrapper, load_tensorrt_plugin - try: - load_tensorrt_plugin() - except (ImportError, ModuleNotFoundError): - warnings.warn('If input model has custom plugins, \ - you may have to build backend ops with TensorRT') - model = TRTWrapper(trt_file) - self.model = model - - def forward_dummy(self, lq: torch.Tensor, *args, **kwargs): - """Run test inference for restorer with TensorRT. - - Args: - lq (torch.Tensor): The input low-quality image of the model. - - Returns: - list[np.ndarray]: High resolution image. - """ - input_data = lq.contiguous() - with torch.cuda.device(self.device_id), torch.no_grad(): - pred = self.model({'input': input_data})['output'] - pred = pred.detach().cpu().numpy() - return pred - - -class PPLRestorer(DeployBaseRestorer): - """Wrapper for restorer's inference with ppl. - - Args: - onnx_file (str): Path of input ONNX model file. - algo_file (str): Path of PPL algorithm file. - device_id (int): An integer represents device index. - test_cfg (mmcv.Config): The test config in model config, which is - used in evaluation. - """ - - def __init__(self, - onnx_file: str, - algo_file: str, - device_id: int, - test_cfg: Optional[mmcv.Config] = None, - **kwargs): - super(PPLRestorer, self).__init__( - device_id, test_cfg=test_cfg, **kwargs) - - from mmdeploy.apis.ppl import PPLWrapper - self.model = PPLWrapper(onnx_file, algo_file, device_id) - - def forward_dummy(self, lq: torch.Tensor, *args, **kwargs): - """Run test inference for restorer with PPL. - - Args: - lq (torch.Tensor): Input low-quality image of the model. - - Returns: - list[np.ndarray]: High resolution image. - """ - ppl_outputs = self.model({'input': lq}) - # only concern pred_alpha value - if isinstance(ppl_outputs, (tuple, list)): - ppl_outputs = ppl_outputs[0] - return ppl_outputs - - -ONNXRUNTIME_RESTORER_MAP = dict(end2end=ONNXRuntimeRestorer) - -TENSORRT_RESTORER_MAP = dict(end2end=TensorRTRestorer) - -PPL_RESTORER_MAP = dict(end2end=PPLRestorer) - -BACKEND_RESTORER_MAP = { - Backend.ONNXRUNTIME: ONNXRUNTIME_RESTORER_MAP, - Backend.TENSORRT: TENSORRT_RESTORER_MAP, - Backend.PPL: PPL_RESTORER_MAP, -} - - -def build_restorer(model_files: Sequence[str], backend: Backend, - model_cfg: Union[str, mmcv.Config], device_id: int): - """Build restorer for different backend. - - Args: - model_files (Sequence[str]): Input model file(s). - backend (Backend): Target backend. - model_cfg (str | mmcv.Config): Input model config file or config - object. - device_id (int): An integer represents device index. - - Returns: - DeployBaseRestorer: Restorer for a configured backend. - """ - model_map = BACKEND_RESTORER_MAP[backend] - - model_type = 'end2end' - assert model_type in model_map, f'Unsupported model type: {model_type}' - backend_model_class = model_map[model_type] - - backend_model = backend_model_class( - *model_files, device_id=device_id, test_cfg=model_cfg.test_cfg) - - return backend_model - - -def build_editing_processor(model_files: Sequence[str], - model_cfg: Union[str, mmcv.Config], - deploy_cfg: Union[str, - mmcv.Config], device_id: int): - """Build editing processor for different backend. - - Args: - model_files (Sequence[str]): Input model file(s). - model_cfg (str | mmcv.Config): Input model config file or Config - object. - deploy_cfg (str | mmcv.Config): Input deployment config file or - Config object. - device_id (int): An integer represents device index. - - Returns: - BaseModel: Editing processor for a configured backend. - """ - model_cfg = load_config(model_cfg)[0] - deploy_cfg = load_config(deploy_cfg)[0] - - backend = get_backend(deploy_cfg) - - assert backend in BACKEND_RESTORER_MAP, \ - f'Unsupported backend type: {backend.value}' - - # TODO: Add other tasks - return build_restorer(model_files, backend, model_cfg, device_id) diff --git a/mmdeploy/mmedit/apis/visualize.py b/mmdeploy/mmedit/apis/visualize.py deleted file mode 100644 index dc2995afb3..0000000000 --- a/mmdeploy/mmedit/apis/visualize.py +++ /dev/null @@ -1,46 +0,0 @@ -import warnings - -import mmcv -import numpy as np -import torch - -from mmdeploy.utils import Backend - - -# BaseModel in mmedit doesn't implement show_result -# TODO: add show_result to different tasks -def show_result(result: np.ndarray, - output_file: str, - backend: Backend, - show: bool = True): - """Show high resolution image of mmedit. - - Args: - result: (np.ndarray): Input high resolution image. - output_file (str): Output image file to save image. - backend (Backend): Specifying backend type. - show (bool): Whether to show plotted image in windows. Defaults to - `True`. - - Returns: - np.ndarray: Drawn image, only if not `show` or `out_file`. - """ - win_name = backend.value - with torch.no_grad(): - result = result.transpose(1, 2, 0) - result = np.clip(result, 0, 1)[:, :, ::-1] - result = (result * 255.0).round() - - if output_file is not None: - show = False - - if show: - int_result = result.astype(np.uint8) - mmcv.imshow(int_result, win_name, 0) - if output_file is not None: - mmcv.imwrite(result, output_file) - - if not (show or output_file): - warnings.warn('show==False and output_file is not specified, only ' - 'result image will be returned') - return result diff --git a/mmdeploy/mmedit/export/__init__.py b/mmdeploy/mmedit/export/__init__.py deleted file mode 100644 index 0ed7292c77..0000000000 --- a/mmdeploy/mmedit/export/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .prepare_input import (build_dataloader, build_dataset, create_input, - get_tensor_from_input) - -__all__ = [ - 'create_input', 'build_dataset', 'build_dataloader', - 'get_tensor_from_input' -] diff --git a/mmdeploy/mmedit/export/prepare_input.py b/mmdeploy/mmedit/export/prepare_input.py deleted file mode 100644 index 63efc94910..0000000000 --- a/mmdeploy/mmedit/export/prepare_input.py +++ /dev/null @@ -1,201 +0,0 @@ -from typing import Any, Dict, Optional, Sequence, Union - -import mmcv -import numpy as np -from mmcv.parallel import collate, scatter -from mmedit.datasets import build_dataloader as build_dataloader_mmedit -from mmedit.datasets import build_dataset as build_dataset_mmedit -from mmedit.datasets.pipelines import Compose -from torch.utils.data.dataset import Dataset - -from mmdeploy.utils import Task, load_config - - -def _preprocess_cfg(config: Union[str, mmcv.Config], task: Task, - load_from_file: bool, is_static_cfg: bool, - input_shape: Sequence[int]): - """Remove unnecessary information in config. - - Args: - model_cfg (str | mmcv.Config): The input model config. - task (Task): Specifying editing task type. - load_from_file (bool): Whether the input is a filename of a numpy - matrix. If this variable is True, extra preprocessing is required. - is_static_cfg (bool): Whether the config specifys a static export. - If this variable if True, the input image will be resize to a fix - resolution. - input_shape (Sequence[int]): A list of two integer in (width, height) - format specifying input shape. Defaults to `None`. - """ - - # TODO: Differentiate the editing tasks (e.g. restorers and mattors - # preprocess the data in differenet ways) - - if task == Task.SUPER_RESOLUTION: - keys_to_remove = ['gt', 'gt_path'] - else: - raise NotImplementedError(f'Unknown task type: {task.value}') - - # MMEdit doesn't support LoadImageFromWebcam. - # Remove "LoadImageFromFile" and related metakeys. - if not load_from_file: - config.test_pipeline.pop(0) - if task == Task.SUPER_RESOLUTION: - keys_to_remove.append('lq_path') - - # Fix the input shape by 'Resize' - if is_static_cfg: - if task == Task.SUPER_RESOLUTION: - resize = { - 'type': 'Resize', - 'scale': (input_shape[0], input_shape[1]), - 'keys': ['lq'] - } - config.test_pipeline.insert(1, resize) - - for key in keys_to_remove: - for pipeline in list(config.test_pipeline): - if 'key' in pipeline and key == pipeline['key']: - config.test_pipeline.remove(pipeline) - if 'keys' in pipeline: - while key in pipeline['keys']: - pipeline['keys'].remove(key) - if len(pipeline['keys']) == 0: - config.test_pipeline.remove(pipeline) - if 'meta_keys' in pipeline: - while key in pipeline['meta_keys']: - pipeline['meta_keys'].remove(key) - - -def create_input(task: Task, - model_cfg: Union[str, mmcv.Config], - imgs: Union[str, np.ndarray], - input_shape: Optional[Sequence[int]] = None, - device: Optional[str] = 'cuda:0'): - """Create input for editing processor. - - Args: - task (Task): Specifying editing task type. - model_cfg (str | mmcv.Config): The input model config. - imgs (str | np.ndarray): Input image(s). - input_shape (Sequence[int]): A list of two integer in (width, height) - format specifying input shape. Defaults to `None`. - device (str): A string represents device type. Default is 'cuda:0'. - - Returns: - tuple: (data, img), meta information for the input image and input. - """ - if isinstance(imgs, (list, tuple)): - if not isinstance(imgs[0], (np.ndarray, str)): - raise AssertionError('imgs must be strings or numpy arrays') - elif isinstance(imgs, (np.ndarray, str)): - imgs = [imgs] - else: - raise AssertionError('imgs must be strings or numpy arrays') - - cfg = load_config(model_cfg)[0].copy() - - _preprocess_cfg( - cfg, - task=task, - load_from_file=isinstance(imgs[0], str), - is_static_cfg=input_shape is not None, - input_shape=input_shape) - - test_pipeline = Compose(cfg.test_pipeline) - - data_arr = [] - for img in imgs: - # TODO: This is only for restore. Add condiction statement. - if isinstance(img, np.ndarray): - data = dict(lq=img) - else: - data = dict(lq_path=img) - - data = test_pipeline(data) - data_arr.append(data) - - data = collate(data_arr, samples_per_gpu=len(imgs)) - - # TODO: This is only for restore. Add condiction statement. - data['img'] = data['lq'] - - if device != 'cpu': - data = scatter(data, [device])[0] - - return data, data['img'] - - -def build_dataset(dataset_cfg: Union[str, mmcv.Config], **kwargs): - """Build dataset for processor. - - Args: - dataset_cfg (str | mmcv.Config): The input dataset config. - - Returns: - Dataset: A PyTorch dataset. - """ - dataset_cfg = load_config(dataset_cfg)[0] - data = dataset_cfg.data - - dataset = build_dataset_mmedit(data.test) - return dataset - - -def build_dataloader(dataset: Dataset, - samples_per_gpu: int, - workers_per_gpu: int, - num_gpus: int = 1, - dist: bool = False, - shuffle: bool = False, - seed: Optional[int] = None, - drop_last: bool = False, - pin_memory: bool = True, - persistent_workers: bool = True, - **kwargs): - """Build PyTorch DataLoader. - - In distributed training, each GPU/process has a dataloader. - In non-distributed training, there is only one dataloader for all GPUs. - - Args: - dataset (:obj:`Dataset`): A PyTorch dataset. - samples_per_gpu (int): Number of samples on each GPU, i.e., - batch size of each GPU. - workers_per_gpu (int): How many subprocesses to use for data - loading for each GPU. - num_gpus (int): Number of GPUs. Only used in non-distributed - training. Default: 1. - dist (bool): Distributed training/test or not. Default: True. - shuffle (bool): Whether to shuffle the data at every epoch. - Default: True. - seed (int | None): Seed to be used. Default: None. - drop_last (bool): Whether to drop the last incomplete batch in epoch. - Default: False - pin_memory (bool): Whether to use pin_memory in DataLoader. - Default: True - persistent_workers (bool): If True, the data loader will not shutdown - the worker processes after a dataset has been consumed once. - This allows to maintain the workers Dataset instances alive. - The argument also has effect in PyTorch>=1.7.0. - Default: True - kwargs (dict, optional): Any keyword argument to be used to initialize - DataLoader. - - Returns: - DataLoader: A PyTorch dataloader. - """ - return build_dataloader_mmedit(dataset, samples_per_gpu, workers_per_gpu, - num_gpus, dist, shuffle, seed, drop_last, - pin_memory, persistent_workers, **kwargs) - - -def get_tensor_from_input(input_data: Dict[str, Any]): - """Get input tensor from input data. - - Args: - input_data (dict): Input data containing meta info and image tensor. - Returns: - torch.Tensor: An image in `Tensor`. - """ - return input_data['lq'] diff --git a/mmdeploy/mmedit/models/backbones/__init__.py b/mmdeploy/mmedit/models/backbones/__init__.py deleted file mode 100644 index aeaf91486a..0000000000 --- a/mmdeploy/mmedit/models/backbones/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .sr_backbones import * # noqa: F401,F403 diff --git a/mmdeploy/mmocr/apis/__init__.py b/mmdeploy/mmocr/apis/__init__.py deleted file mode 100644 index 5cfb27301c..0000000000 --- a/mmdeploy/mmocr/apis/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .inference import build_ocr_processor -from .visualize import show_result - -__all__ = ['build_ocr_processor', 'show_result'] diff --git a/mmdeploy/mmocr/apis/inference.py b/mmdeploy/mmocr/apis/inference.py deleted file mode 100644 index ab8e8997f9..0000000000 --- a/mmdeploy/mmocr/apis/inference.py +++ /dev/null @@ -1,538 +0,0 @@ -from typing import Iterable, Sequence, Union - -import mmcv -import torch -from mmdet.models.builder import DETECTORS -from mmocr.datasets import DATASETS -from mmocr.models.textdet.detectors import (SingleStageTextDetector, - TextDetectorMixin) -from mmocr.models.textrecog.recognizer import EncodeDecodeRecognizer - -from mmdeploy.utils.config_utils import (Backend, Task, get_backend, - get_task_type, load_config) - - -@DETECTORS.register_module() -class DeployBaseTextDetector(TextDetectorMixin, SingleStageTextDetector): - """Base Class of Wrapper for TextDetector. - - Args: - cfg (str | mmcv.ConfigDict): Input model config. - device_id (int): An integer represents device index. - show_score (bool): Whether to show scores. Defaults to `False`. - """ - - def __init__(self, - cfg: Union[mmcv.Config, mmcv.ConfigDict], - device_id: int, - show_score: bool = False, - *args, - **kwargs): - SingleStageTextDetector.__init__(self, cfg.model.backbone, - cfg.model.neck, cfg.model.bbox_head) - TextDetectorMixin.__init__(self, show_score) - self.device_id = device_id - self.show_score = show_score - self.cfg = cfg - - def forward_train(self, img, img_metas, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def aug_test(self, imgs, img_metas, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def extract_feat(self, imgs): - raise NotImplementedError('This method is not implemented.') - - def simple_test(self, - img: torch.Tensor, - img_metas: Sequence[dict], - rescale: bool = False, - *args, - **kwargs): - """Run forward test. - - Args: - img (torch.Tensor): Input image tensor. - img_metas (Sequence[dict]): A list of meta info for image(s). - - Returns: - list: A list of predictions. - """ - pred = self.forward_of_backend(img, img_metas, *args, **kwargs) - if len(img_metas) > 1: - boundaries = [ - self.bbox_head.get_boundary( - *(pred[i].unsqueeze(0)), [img_metas[i]], rescale=rescale) - for i in range(len(img_metas)) - ] - - else: - boundaries = [ - self.bbox_head.get_boundary(*pred, img_metas, rescale=rescale) - ] - return boundaries - - -@DETECTORS.register_module() -class DeployBaseRecognizer(EncodeDecodeRecognizer): - """Base Class of Wrapper for TextRecognizer. - - Args: - cfg (str | mmcv.ConfigDict): Input model config. - device_id (int): An integer represents device index. - show_score (bool): Whether to show scores. Defaults to `False`. - """ - - def __init__(self, - cfg: Union[mmcv.Config, mmcv.ConfigDict], - device_id: int, - show_score: bool = False, - *args, - **kwargs): - super(DeployBaseRecognizer, - self).__init__(None, cfg.model.backbone, cfg.model.encoder, - cfg.model.decoder, cfg.model.loss, - cfg.model.label_convertor, None, None, 40, None) - self.device_id = device_id - self.show_score = show_score - self.cfg = cfg - - def forward_train(self, img, img_metas, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def aug_test(self, imgs, img_metas, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def extract_feat(self, imgs): - raise NotImplementedError('This method is not implemented.') - - def forward(self, img: Union[torch.Tensor, Sequence[torch.Tensor]], - img_metas: Sequence[dict], *args, **kwargs): - """Run forward. - - Args: - imgs (torch.Tensor | Sequence[torch.Tensor]): Image input tensor. - img_metas (Sequence[dict]): List of image information. - - Returns: - list[str]: Text label result of each image. - """ - - if isinstance(img, list): - for idx, each_img in enumerate(img): - if each_img.dim() == 3: - img[idx] = each_img.unsqueeze(0) - img = img[0] # avoid aug_test - img_metas = img_metas[0] - else: - if len(img_metas) == 1 and isinstance(img_metas[0], list): - img_metas = img_metas[0] - - return self.simple_test(img, img_metas, **kwargs) - - def simple_test(self, img: torch.Tensor, img_metas: Sequence[dict], *args, - **kwargs): - """Run forward test. - - Args: - imgs (torch.Tensor): Image input tensor. - img_metas (Sequence[dict]): List of image information. - - Returns: - list[str]: Text label result of each image. - """ - pred = self.forward_of_backend(img, img_metas, *args, **kwargs) - label_indexes, label_scores = self.label_convertor.tensor2idx( - pred, img_metas) - label_strings = self.label_convertor.idx2str(label_indexes) - - # flatten batch results - results = [] - for string, score in zip(label_strings, label_scores): - results.append(dict(text=string, score=score)) - - return results - - -class ONNXRuntimeDetector(DeployBaseTextDetector): - """Wrapper for TextDetector with ONNX Runtime. - - Args: - model_file (str): The path of input model file. - cfg (str | mmcv.ConfigDict): Input model config. - device_id (int): An integer represents device index. - show_score (bool): Whether to show scores. Defaults to `False`. - """ - - def __init__(self, - model_file: str, - cfg: Union[mmcv.Config, mmcv.ConfigDict], - device_id: int, - show_score: bool = False, - *args, - **kwargs): - super(ONNXRuntimeDetector, self).__init__(cfg, device_id, show_score) - from mmdeploy.apis.onnxruntime import ORTWrapper - self.model = ORTWrapper(model_file, device_id) - - def forward_of_backend(self, img: torch.Tensor, img_metas: Iterable, *args, - **kwargs): - """Implement forward test with a backend. - - Args: - imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. - img_metas (Sequence[dict]]): List of image information. - Returns: - np.ndarray: Prediction of input model. - """ - onnx_pred = self.model({'input': img}) - onnx_pred = torch.from_numpy(onnx_pred[0]) - return onnx_pred - - -class ONNXRuntimeRecognizer(DeployBaseRecognizer): - """Wrapper for TextRecognizer with ONNX Runtime. - - Args: - model_file (str): The path of input model file. - cfg (str | mmcv.ConfigDict): Input model config. - device_id (int): An integer represents device index. - show_score (bool): Whether to show scores. Defaults to `False`. - """ - - def __init__(self, - model_file: str, - cfg: Union[mmcv.Config, mmcv.ConfigDict], - device_id: int, - show_score: bool = False, - *args, - **kwargs): - super(ONNXRuntimeRecognizer, self).__init__(cfg, device_id, show_score) - from mmdeploy.apis.onnxruntime import ORTWrapper - self.model = ORTWrapper(model_file, device_id) - - def forward_of_backend(self, img: torch.Tensor, img_metas: Sequence[dict], - *args, **kwargs): - """Implement forward test with a backend. - - Args: - imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. - img_metas (Sequence[dict]]): List of image information. - Returns: - np.ndarray: Prediction of input model. - """ - onnx_pred = self.model({'input': img}) - onnx_pred = torch.from_numpy(onnx_pred[0]) - return onnx_pred - - -class TensorRTDetector(DeployBaseTextDetector): - """Wrapper for TextDetector with TensorRT. - - Args: - model_file (str): The path of input model file. - cfg (str | mmcv.ConfigDict): Input model config. - device_id (int): An integer represents device index. - show_score (bool): Whether to show scores. Defaults to `False`. - """ - - def __init__(self, - model_file: str, - cfg: Union[mmcv.Config, mmcv.ConfigDict], - device_id: int, - show_score: bool = False, - *args, - **kwargs): - super(TensorRTDetector, self).__init__(cfg, device_id, show_score) - from mmdeploy.apis.tensorrt import TRTWrapper - model = TRTWrapper(model_file) - self.model = model - - def forward_of_backend(self, img: torch.Tensor, img_metas: Sequence[dict], - *args, **kwargs): - """Implement forward test with a backend. - - Args: - imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. - img_metas (Sequence[dict]]): List of image information. - Returns: - np.ndarray: Prediction of input model. - """ - with torch.cuda.device(self.device_id), torch.no_grad(): - trt_pred = self.model({'input': img})['output'] - return trt_pred - - -class TensorRTRecognizer(DeployBaseRecognizer): - """Wrapper for TextRecognizer with TensorRT. - - Args: - model_file (str): The path of input model file. - cfg (str | mmcv.ConfigDict): Input model config. - device_id (int): An integer represents device index. - show_score (bool): Whether to show scores. Defaults to `False`. - """ - - def __init__(self, - model_file: str, - cfg: Union[mmcv.Config, mmcv.ConfigDict], - device_id: int, - show_score: bool = False, - *args, - **kwargs): - super(TensorRTRecognizer, self).__init__(cfg, device_id, show_score) - from mmdeploy.apis.tensorrt import TRTWrapper - model = TRTWrapper(model_file) - self.model = model - - def forward_of_backend(self, img: torch.Tensor, img_metas: Sequence[dict], - *args, **kwargs): - """Implement forward test with a backend. - - Args: - imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. - img_metas (Sequence[dict]]): List of image information. - Returns: - torch.Tensor: Prediction of input model. - """ - with torch.cuda.device(self.device_id), torch.no_grad(): - trt_pred = self.model({'input': img})['output'] - return trt_pred - - -class NCNNDetector(DeployBaseTextDetector): - """Wrapper for TextDetector with NCNN. - - Args: - model_file (Sequence[str]): Paths of input model files. - cfg (str | mmcv.ConfigDict): Input model config. - device_id (int): An integer represents device index. - show_score (bool): Whether to show scores. Defaults to `False`. - """ - - def __init__(self, - model_file: Sequence[str], - cfg: Union[mmcv.Config, mmcv.ConfigDict], - device_id: int, - show_score: bool = False): - super(NCNNDetector, self).__init__(cfg, device_id, show_score) - from mmdeploy.apis.ncnn import NCNNWrapper - self.model = NCNNWrapper( - model_file[0], model_file[1], output_names=['output']) - - def forward_of_backend(self, img: torch.Tensor, img_metas: Sequence[dict], - *args, **kwargs): - """Implement forward test with a backend. - - Args: - imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. - img_metas (Sequence[dict]]): List of image information. - Returns: - torch.Tensor: Prediction of input model. - """ - pred = self.model({'input': img})['output'] - return pred - - -class NCNNRecognizer(DeployBaseRecognizer): - """Wrapper for TextRecognizer with NCNN. - - Args: - model_file (Sequence[str]): Paths of input model files. - cfg (str | mmcv.ConfigDict): Input model config. - device_id (int): An integer represents device index. - show_score (bool): Whether to show scores. Defaults to `False`. - """ - - def __init__(self, - model_file: Sequence[str], - cfg: Union[mmcv.Config, mmcv.ConfigDict], - device_id: int, - show_score: bool = False): - super(NCNNRecognizer, self).__init__(cfg, device_id, show_score) - from mmdeploy.apis.ncnn import NCNNWrapper - self.model = NCNNWrapper( - model_file[0], model_file[1], output_names=['output']) - - def forward_of_backend(self, img: torch.Tensor, img_metas: Sequence[dict], - *args, **kwargs): - """Implement forward test with a backend. - - Args: - imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. - img_metas (Sequence[dict]]): List of image information. - Returns: - torch.Tensor: Prediction of input model. - """ - pred = self.model({'input': img})['output'] - return pred - - -class PPLDetector(DeployBaseTextDetector): - """Wrapper for TextDetector with PPL. - - Args: - model_file (Sequence[str]): Paths of input model files. - cfg (str | mmcv.ConfigDict): Input model config. - device_id (int): An integer represents device index. - show_score (bool): Whether to show scores. Defaults to `False`. - """ - - def __init__(self, - model_file: str, - cfg: Union[mmcv.Config, mmcv.ConfigDict], - device_id: int, - show_score: bool = False, - *args, - **kwargs): - super(PPLDetector, self).__init__(cfg, device_id, show_score) - from mmdeploy.apis.ppl import PPLWrapper - model = PPLWrapper(model_file[0], model_file[1], device_id) - self.model = model - - def forward_of_backend(self, img: torch.Tensor, img_metas: Sequence[dict], - *args, **kwargs): - """Implement forward test with a backend. - - Args: - imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. - img_metas (Sequence[dict]]): List of image information. - Returns: - torch.Tensor: Prediction of input model. - """ - with torch.cuda.device(self.device_id), torch.no_grad(): - ppl_pred = self.model({'input': img}) - ppl_pred = torch.from_numpy(ppl_pred[0]) - return ppl_pred - - -class PPLRecognizer(DeployBaseRecognizer): - """Wrapper for TextRecognizer with PPL. - - Args: - onnx_file (str): Path of input ONNX model file. - algo_file (str): Path of PPL algorithm file. - cfg (str | mmcv.ConfigDict): Input model config. - device_id (int): An integer represents device index. - show_score (bool): Whether to show scores. Defaults to `False`. - """ - - def __init__(self, - model_file: str, - algo_file: str, - cfg: Union[mmcv.Config, mmcv.ConfigDict], - device_id: int, - show_score: bool = False, - *args, - **kwargs): - super(PPLRecognizer, self).__init__(cfg, device_id, show_score) - from mmdeploy.apis.ppl import PPLWrapper - model = PPLWrapper(model_file, algo_file, device_id) - self.model = model - - def forward_of_backend(self, img: torch.Tensor, img_metas: Sequence[dict], - *args, **kwargs): - """Implement forward test with a backend. - - Args: - imgs (torch.Tensor): Input image(s) in [N x C x H x W] format. - img_metas (Sequence[dict]]): List of image information. - Returns: - torch.Tensor: Prediction of input model. - """ - with torch.cuda.device(self.device_id), torch.no_grad(): - ppl_pred = self.model({'input': img})[0] - ppl_pred = torch.from_numpy(ppl_pred[0]) - return ppl_pred - - -def get_classes_from_config(model_cfg: Union[str, mmcv.Config], **kwargs): - """Get class name from config. - - Args: - model_cfg (str | mmcv.Config): Input model config file or - Config object. - - Returns: - list[str]: A list of string specifying names of different class. - """ - # load cfg if necessary - model_cfg = load_config(model_cfg)[0] - module_dict = DATASETS.module_dict - data_cfg = model_cfg.data - - if 'train' in data_cfg: - module = module_dict[data_cfg.train.type] - elif 'val' in data_cfg: - module = module_dict[data_cfg.val.type] - elif 'test' in data_cfg: - module = module_dict[data_cfg.test.type] - else: - raise RuntimeError(f'No dataset config found in: {model_cfg}') - - return module.CLASSES - - -TASK_ONNXRUNTIME_MAP = { - Task.TEXT_DETECTION: ONNXRuntimeDetector, - Task.TEXT_RECOGNITION: ONNXRuntimeRecognizer -} - -TASK_TENSORRT_MAP = { - Task.TEXT_DETECTION: TensorRTDetector, - Task.TEXT_RECOGNITION: TensorRTRecognizer -} - -TASK_PPL_MAP = { - Task.TEXT_DETECTION: PPLDetector, - Task.TEXT_RECOGNITION: PPLRecognizer -} - -TASK_NCNN_MAP = { - Task.TEXT_DETECTION: NCNNDetector, - Task.TEXT_RECOGNITION: NCNNRecognizer -} - -BACKEND_TASK_MAP = { - Backend.ONNXRUNTIME: TASK_ONNXRUNTIME_MAP, - Backend.TENSORRT: TASK_TENSORRT_MAP, - Backend.PPL: TASK_PPL_MAP, - Backend.NCNN: TASK_NCNN_MAP -} - - -def build_ocr_processor(model_files: Sequence[str], - model_cfg: Union[str, mmcv.Config], - deploy_cfg: Union[str, mmcv.Config], device_id: int, - **kwargs): - """Build text detector or recognizer for a backend. - - Args: - model_files (Sequence[str]): Input model file(s). - model_cfg (str | mmcv.Config): Input model config file or Config - object. - deploy_cfg (str | mmcv.Config): Input deployment config file or - Config object. - device_id (int): An integer represents device index. - - Returns: - nn.Module: An instance of text detector or recognizer. - """ - # load cfg if necessary - deploy_cfg, model_cfg = load_config(deploy_cfg, model_cfg) - - backend = get_backend(deploy_cfg) - task = get_task_type(deploy_cfg) - - assert backend in BACKEND_TASK_MAP, \ - f'Unsupported backend type: {backend.value}' - assert task in BACKEND_TASK_MAP[backend], \ - f'Unsupported task type: {task.value}' - backend_task_class = BACKEND_TASK_MAP[backend][task] - - model_files = model_files[0] if len(model_files) == 1 else model_files - backend_detector = backend_task_class( - model_file=model_files, cfg=model_cfg, device_id=device_id, **kwargs) - - return backend_detector diff --git a/mmdeploy/mmocr/apis/visualize.py b/mmdeploy/mmocr/apis/visualize.py deleted file mode 100644 index 7b584a8fcb..0000000000 --- a/mmdeploy/mmocr/apis/visualize.py +++ /dev/null @@ -1,35 +0,0 @@ -import numpy as np -import torch - -from mmdeploy.utils import Backend - - -def show_result(model: torch.nn.Module, - image: np.ndarray, - result: list, - output_file: str, - backend: Backend, - show: bool = True, - score_thr: float = 0.3): - """Show predictions of text detector or recognizer. - - Args: - model (nn.Module): Input model which has `show_result` method. - image: (np.ndarray): Input image to draw predictions. - result (list): A list of predictions. - output_file (str): Output image file to save drawn predictions. - backend (Backend): Specifying backend type. - show (bool): Whether to show plotted image in windows. Defaults to - `True`. - score_thr (float): Score threshold for result, defaults to `0.3`. - - Returns: - np.ndarray: Drawn image, only if not `show` or `out_file`. - """ - return model.show_result( - image, - result, - score_thr=score_thr, - show=show, - win_name=backend.value, - out_file=output_file) diff --git a/mmdeploy/mmocr/export/__init__.py b/mmdeploy/mmocr/export/__init__.py deleted file mode 100644 index 0ed7292c77..0000000000 --- a/mmdeploy/mmocr/export/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .prepare_input import (build_dataloader, build_dataset, create_input, - get_tensor_from_input) - -__all__ = [ - 'create_input', 'build_dataset', 'build_dataloader', - 'get_tensor_from_input' -] diff --git a/mmdeploy/mmocr/export/prepare_input.py b/mmdeploy/mmocr/export/prepare_input.py deleted file mode 100644 index f562366950..0000000000 --- a/mmdeploy/mmocr/export/prepare_input.py +++ /dev/null @@ -1,194 +0,0 @@ -from typing import Any, Optional, Sequence, Union - -import mmcv -import numpy as np -from mmcv.parallel import DataContainer, collate, scatter -from mmdet.datasets import replace_ImageToTensor -from mmocr.datasets import build_dataloader as build_dataloader_mmocr -from mmocr.datasets import build_dataset as build_dataset_mmocr -from torch.utils.data import Dataset - -from mmdeploy.utils import Task, load_config - - -def create_input(task: Task, - model_cfg: Union[str, mmcv.Config], - imgs: Any, - input_shape: Sequence[int] = None, - device: str = 'cuda:0'): - """Create input for text detector/recognizer. - - Args: - task (Task): Specifying task type. - model_cfg (str | mmcv.Config): The input model config. - imgs (Any): Input image(s), accpeted data type are `str`, - `np.ndarray`, `torch.Tensor`. - input_shape (list[int]): A list of two integer in (width, height) - format specifying input shape. Defaults to `None`. - device (str): A string represents device type. Default is 'cuda:0'. - - Returns: - tuple: (data, img), meta information for the input image and input. - """ - if isinstance(imgs, (list, tuple)): - if not isinstance(imgs[0], (np.ndarray, str)): - raise AssertionError('imgs must be strings or numpy arrays') - - elif isinstance(imgs, (np.ndarray, str)): - imgs = [imgs] - else: - raise AssertionError('imgs must be strings or numpy arrays') - - if model_cfg.data.test['type'] == 'ConcatDataset': - model_cfg.data.test.pipeline = \ - model_cfg.data.test['datasets'][0].pipeline - - is_ndarray = isinstance(imgs[0], np.ndarray) - - if is_ndarray: - model_cfg = model_cfg.copy() - # set loading pipeline type - model_cfg.data.test.pipeline[0].type = 'LoadImageFromNdarray' - - test_pipeline = model_cfg.data.test.pipeline - test_pipeline = replace_ImageToTensor(test_pipeline) - # for static exporting - if input_shape is not None: - if task == Task.TEXT_DETECTION: - test_pipeline[1].img_scale = tuple(input_shape) - test_pipeline[1].transforms[0].keep_ratio = False - test_pipeline[1].transforms[0].img_scale = tuple(input_shape) - elif task == Task.TEXT_RECOGNITION: - resize = { - 'height': input_shape[1], - 'min_width': input_shape[0], - 'max_width': input_shape[0], - 'keep_aspect_ratio': False - } - if 'transforms' in test_pipeline[1]: - if test_pipeline[1].transforms[0].type == 'ResizeOCR': - test_pipeline[1].transforms[0].height = input_shape[1] - test_pipeline[1].transforms[0].max_width = input_shape[0] - else: - raise ValueError( - f'Transforms[0] should be ResizeOCR, but got\ - {test_pipeline[1].transforms[0].type}') - else: - test_pipeline[1].update(resize) - from mmdet.datasets.pipelines import Compose - from mmocr.datasets import build_dataset # noqa: F401 - test_pipeline = Compose(test_pipeline) - - data_list = [] - for img in imgs: - # prepare data - if is_ndarray: - # directly add img - data = dict(img=img) - else: - # add information into dict - data = dict(img_info=dict(filename=img), img_prefix=None) - - # build the data pipeline - data = test_pipeline(data) - # get tensor from list to stack for batch mode (text detection) - data_list.append(data) - - if isinstance(data_list[0]['img'], list) and len(data_list) > 1: - raise Exception('aug test does not support ' - f'inference with batch size ' - f'{len(data_list)}') - - data = collate(data_list, samples_per_gpu=len(imgs)) - - # process img_metas - if isinstance(data['img_metas'], list): - data['img_metas'] = [ - img_metas.data[0] for img_metas in data['img_metas'] - ] - else: - data['img_metas'] = data['img_metas'].data - - if isinstance(data['img'], list): - data['img'] = [img.data for img in data['img']] - if isinstance(data['img'][0], list): - data['img'] = [img[0] for img in data['img']] - else: - data['img'] = data['img'].data - - if device != 'cpu': - data = scatter(data, [device])[0] - - return data, data['img'] - - -def build_dataset(dataset_cfg: Union[str, mmcv.Config], - dataset_type: str = 'val', - **kwargs): - """Build dataset for detector/recognizer. - - Args: - dataset_cfg (str | mmcv.Config): The input dataset config. - dataset_type (str): A string represents dataset type, e.g.: 'train', - 'test', 'val'. Defaults to 'val'. - - Returns: - Dataset: A PyTorch dataset. - """ - dataset_cfg = load_config(dataset_cfg)[0].copy() - - data = dataset_cfg.data - assert dataset_type in data - dataset = build_dataset_mmocr(data[dataset_type]) - - return dataset - - -def build_dataloader(dataset: Dataset, - samples_per_gpu: int, - workers_per_gpu: int, - num_gpus: int = 1, - dist: bool = False, - shuffle: bool = False, - seed: Optional[int] = None, - **kwargs): - """Build dataloader for detector/recognizer. - - Args: - dataset (Dataset): Input dataset. - samples_per_gpu (int): Number of training samples on each GPU, i.e., - batch size of each GPU. - workers_per_gpu (int): How many subprocesses to use for data loading - for each GPU. - num_gpus (int): Number of GPUs. Only used in non-distributed training. - dist (bool): Distributed training/test or not. Defaults to `False`. - shuffle (bool): Whether to shuffle the data at every epoch. - Defaults to `False`. - seed (int): An integer set to be seed. Default is `None`. - kwargs: Any other keyword argument to be used to initialize DataLoader. - - Returns: - DataLoader: A PyTorch dataloader. - """ - return build_dataloader_mmocr( - dataset, - samples_per_gpu, - workers_per_gpu, - num_gpus=num_gpus, - dist=dist, - shuffle=shuffle, - seed=seed, - **kwargs) - - -def get_tensor_from_input(input_data: tuple): - """Get input tensor from input data. - - Args: - input_data (tuple): Input data containing meta info and image tensor. - Returns: - torch.Tensor: An image in `Tensor`. - """ - if isinstance(input_data['img'], DataContainer): - return input_data['img'].data[0] - return input_data['img'][0] diff --git a/mmdeploy/mmocr/models/__init__.py b/mmdeploy/mmocr/models/__init__.py deleted file mode 100644 index 296cfe2dfd..0000000000 --- a/mmdeploy/mmocr/models/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .textdet import * # noqa: F401,F403 -from .textrecog import * # noqa: F401,F403 diff --git a/mmdeploy/mmocr/models/textdet/__init__.py b/mmdeploy/mmocr/models/textdet/__init__.py deleted file mode 100644 index b64298155e..0000000000 --- a/mmdeploy/mmocr/models/textdet/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .detectors.single_stage_text_detector import * # noqa: F401,F403 -from .necks.fpn_cat import * # noqa: F401,F403 diff --git a/mmdeploy/mmocr/models/textrecog/__init__.py b/mmdeploy/mmocr/models/textrecog/__init__.py deleted file mode 100644 index 075e9cc92c..0000000000 --- a/mmdeploy/mmocr/models/textrecog/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -from .decoders import * # noqa: F401, F403 -from .encoders import sar_encoder__forward -from .layers import * # noqa: F401, F403 -from .recognizer.base import base_recognizer__forward -from .recognizer.encode_decode_recognizer import \ - encode_decode_recognizer__simple_test -from .recognizer.sar import SARNet - -__all__ = [ - 'encode_decode_recognizer__simple_test', 'base_recognizer__forward', - 'sar_encoder__forward', 'SARNet' -] diff --git a/mmdeploy/mmocr/models/textrecog/decoders/__init__.py b/mmdeploy/mmocr/models/textrecog/decoders/__init__.py deleted file mode 100644 index 032de25fb6..0000000000 --- a/mmdeploy/mmocr/models/textrecog/decoders/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .crnn_decoder import crnndecoder__forward_train__ncnn -from .sar_decoder import * # noqa: F401, F403 - -__all__ = ['crnndecoder__forward_train__ncnn'] diff --git a/mmdeploy/mmocr/models/textrecog/encoders/__init__.py b/mmdeploy/mmocr/models/textrecog/encoders/__init__.py deleted file mode 100644 index 41462dcab8..0000000000 --- a/mmdeploy/mmocr/models/textrecog/encoders/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .sar_encoder import sar_encoder__forward - -__all__ = ['sar_encoder__forward'] diff --git a/mmdeploy/mmocr/models/textrecog/layers/__init__.py b/mmdeploy/mmocr/models/textrecog/layers/__init__.py deleted file mode 100644 index 3dd6cef15f..0000000000 --- a/mmdeploy/mmocr/models/textrecog/layers/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .lstm_layer import bidirectionallstm__forward__ncnn - -__all__ = ['bidirectionallstm__forward__ncnn'] diff --git a/mmdeploy/mmseg/__init__.py b/mmdeploy/mmseg/__init__.py deleted file mode 100644 index d2b62e1cb6..0000000000 --- a/mmdeploy/mmseg/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .export import * # noqa: F401,F403 -from .models import * # noqa: F401,F403 diff --git a/mmdeploy/mmseg/apis/__init__.py b/mmdeploy/mmseg/apis/__init__.py deleted file mode 100644 index 9d5a605822..0000000000 --- a/mmdeploy/mmseg/apis/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .inference import build_segmentor -from .visualize import show_result - -__all__ = ['build_segmentor', 'show_result'] diff --git a/mmdeploy/mmseg/apis/inference.py b/mmdeploy/mmseg/apis/inference.py deleted file mode 100644 index 1b9b53c01b..0000000000 --- a/mmdeploy/mmseg/apis/inference.py +++ /dev/null @@ -1,289 +0,0 @@ -from typing import Sequence, Union - -import mmcv -import numpy as np -import torch -from mmseg.datasets import DATASETS -from mmseg.models.segmentors.base import BaseSegmentor -from mmseg.ops import resize - -from mmdeploy.utils.config_utils import Backend, get_backend, load_config - - -class DeployBaseSegmentor(BaseSegmentor): - """Base Class of wrapper for segmentation's inference. - - Args: - class_names (Sequence[str]): A list of string specifying class names. - palette (np.ndarray): The palette of segmentation map. - device_id (int): An integer represents device index. - """ - - def __init__(self, class_names: Sequence[str], palette: np.ndarray, - device_id: int): - super(DeployBaseSegmentor, self).__init__(init_cfg=None) - self.CLASSES = class_names - self.device_id = device_id - self.PALETTE = palette - - def extract_feat(self, imgs): - raise NotImplementedError('This method is not implemented.') - - def encode_decode(self, img, img_metas): - raise NotImplementedError('This method is not implemented.') - - def forward_train(self, imgs, img_metas, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def simple_test(self, img, img_meta, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def aug_test(self, imgs, img_metas, **kwargs): - raise NotImplementedError('This method is not implemented.') - - def forward(self, img: Sequence[torch.Tensor], img_metas: Sequence[dict], - **kwargs): - """Run forward test. - - Args: - img (Sequence[torch.Tensor]]): A list of input image tensor(s). - img_metas (Sequence[dict]]): A list of dict containing image(s) - meta information. - - Returns: - list[np.ndarray]: A list of segmentation result. - """ - if isinstance(img, (list, tuple)): - img = img[0] - img = img.contiguous() - seg_pred = self.forward_test(img, img_metas, **kwargs) - # whole mode supports dynamic shape - ori_shape = img_metas[0][0]['ori_shape'] - if not (ori_shape[0] == seg_pred.shape[-2] - and ori_shape[1] == seg_pred.shape[-1]): - seg_pred = torch.from_numpy(seg_pred).float() - seg_pred = resize( - seg_pred, size=tuple(ori_shape[:2]), mode='nearest') - seg_pred = seg_pred.long().detach().cpu().numpy() - # remove unnecessary dim - seg_pred = seg_pred.squeeze(1) - seg_pred = list(seg_pred) - return seg_pred - - -class ONNXRuntimeSegmentor(DeployBaseSegmentor): - """Wrapper for segmentation's inference with ONNX Runtime. - - Args: - model_file (str): The path of input model file. - class_names (Sequence[str]): A list of string specifying class names. - palette (np.ndarray): The palette of segmentation map. - device_id (int): An integer represents device index. - """ - - def __init__(self, model_file: str, class_names: Sequence[str], - palette: np.ndarray, device_id: int): - super(ONNXRuntimeSegmentor, self).__init__(class_names, palette, - device_id) - from mmdeploy.apis.onnxruntime import ORTWrapper - self.model = ORTWrapper(model_file, device_id) - - def forward_test(self, imgs: torch.Tensor, img_metas: Sequence[dict], - **kwargs): - """Run forward test to get predictions. - - Args: - imgs (torch.Tensor): Input tensor of the model. - img_metas (Sequence[dict]]): A list of dict containing image(s) - meta information. - Returns: - torch.Tensor: Segmentation result. - """ - seg_pred = self.model({'input': imgs})[0] - return seg_pred - - -class TensorRTSegmentor(DeployBaseSegmentor): - """Wrapper for segmentation's inference with TensorRT. - - Args: - model_file (str): The path of input model file. - class_names (Sequence[str]): A list of string specifying class names. - palette (np.ndarray): The palette of segmentation map. - device_id (int): An integer represents device index. - """ - - def __init__(self, model_file: str, class_names: Sequence[str], - palette: np.ndarray, device_id: int): - super(TensorRTSegmentor, self).__init__(class_names, palette, - device_id) - from mmdeploy.apis.tensorrt import TRTWrapper - - model = TRTWrapper(model_file) - self.model = model - self.output_name = self.model.output_names[0] - - def forward_test(self, imgs: torch.Tensor, img_metas: Sequence[dict], - **kwargs): - """Run forward test to get predictions. - - Args: - imgs (torch.Tensor): Input tensor of the model. - img_metas (Sequence[dict]]): A list of dict containing image(s) - meta information. - Returns: - np.ndarray: Segmentation result. - """ - with torch.cuda.device(self.device_id), torch.no_grad(): - seg_pred = self.model({'input': imgs})[self.output_name] - seg_pred = seg_pred.detach().cpu().numpy() - return seg_pred - - -class PPLSegmentor(DeployBaseSegmentor): - """Wrapper for segmentation's inference with PPL. - - Args: - model_file (Sequence[str]): Paths of input params and bin files. - class_names (Sequence[str]): A list of string specifying class names. - palette (np.ndarray): The palette of segmentation map. - device_id (int): An integer represents device index. - """ - - def __init__(self, model_file: str, class_names: Sequence[str], - palette: np.ndarray, device_id: int): - super(PPLSegmentor, self).__init__(class_names, palette, device_id) - from mmdeploy.apis.ppl import PPLWrapper - self.model = PPLWrapper(model_file[0], model_file[1], device_id) - - def forward_test(self, imgs: torch.Tensor, img_metas: Sequence[dict], - **kwargs): - """Run forward test to get predictions. - - Args: - imgs (torch.Tensor): Input tensor of the model. - img_metas (Sequence[dict]]): A list of dict containing image(s) - meta information. - Returns: - np.ndarray: Segmentation result. - """ - seg_pred = self.model({'input': imgs})[0] - return seg_pred - - -class NCNNSegmentor(DeployBaseSegmentor): - """Wrapper for segmentation's inference with NCNN. - - Args: - model_file (Sequence[str]): Paths of input params and bin files. - class_names (Sequence[str]): A list of string specifying class names. - palette (np.ndarray): The palette of segmentation map. - device_id (int): An integer represents device index. - """ - - def __init__(self, model_file: Sequence[str], class_names: Sequence[str], - palette: np.ndarray, device_id: int): - super(NCNNSegmentor, self).__init__(class_names, palette, device_id) - from mmdeploy.apis.ncnn import NCNNWrapper - assert len(model_file) == 2, f'`model_file` should be [param_file, \ - bin_file], but given {model_file}' - - ncnn_param_file = model_file[0] - ncnn_bin_file = model_file[1] - self.model = NCNNWrapper( - ncnn_param_file, ncnn_bin_file, output_names=['output']) - - def forward_test(self, imgs: torch.Tensor, img_metas: Sequence[dict], - **kwargs): - """Run forward test to get predictions. - - Args: - imgs (torch.Tensor): Input tensor of the model. - img_metas (Sequence[dict]]): A list of dict containing image(s) - meta information. - Returns: - np.ndarray: Segmentation result. - """ - results = self.model({'input': imgs})['output'] - results = results.detach().cpu().numpy() - return results - - -ONNXRUNTIME_SEGMENTOR_MAP = dict(end2end=ONNXRuntimeSegmentor) - -TENSORRT_SEGMENTOR_MAP = dict(end2end=TensorRTSegmentor) - -PPL_SEGMENTOR_MAP = dict(end2end=PPLSegmentor) -NCNN_SEGMENTOR_MAP = dict(end2end=NCNNSegmentor) - -BACKEND_SEGMENTOR_MAP = { - Backend.ONNXRUNTIME: ONNXRUNTIME_SEGMENTOR_MAP, - Backend.TENSORRT: TENSORRT_SEGMENTOR_MAP, - Backend.PPL: PPL_SEGMENTOR_MAP, - Backend.NCNN: NCNN_SEGMENTOR_MAP -} - - -def get_classes_palette_from_config(model_cfg: Union[str, mmcv.Config]): - """Get class name and palette from config. - - Args: - model_cfg (str | mmcv.Config): Input model config file or - Config object. - - Returns: - tuple(Sequence[str], np.ndarray): A list of string specifying names of - different class and the palette of segmentation map. - """ - # load cfg if necessary - model_cfg = load_config(model_cfg)[0] - - module_dict = DATASETS.module_dict - data_cfg = model_cfg.data - - if 'train' in data_cfg: - module = module_dict[data_cfg.train.type] - elif 'val' in data_cfg: - module = module_dict[data_cfg.val.type] - elif 'test' in data_cfg: - module = module_dict[data_cfg.test.type] - else: - raise RuntimeError(f'No dataset config found in: {model_cfg}') - - return module.CLASSES, module.PALETTE - - -def build_segmentor(model_files, model_cfg, deploy_cfg, device_id): - """Build segmentor for different backend. - - Args: - model_files (list[str]): Input model file(s). - model_cfg (str | mmcv.Config): Input model config file or Config - object. - deploy_cfg (str | mmcv.Config): Input deployment config file or - Config object. - device_id (int): An integer represents device index. - - Returns: - DeployBaseSegmentor: Segmentor for a configured backend. - """ - # load cfg if necessary - model_cfg, deploy_cfg = load_config(model_cfg, deploy_cfg) - - backend = get_backend(deploy_cfg) - class_names, palette = get_classes_palette_from_config(model_cfg) - assert backend in BACKEND_SEGMENTOR_MAP, \ - f'Unsupported backend type: {backend.value}' - segmentor_map = BACKEND_SEGMENTOR_MAP[backend] - - model_type = 'end2end' - assert model_type in segmentor_map, f'Unsupported model type: {model_type}' - backend_segmentor_class = segmentor_map[model_type] - model_files = model_files[0] if len(model_files) == 1 else model_files - backend_segmentor = backend_segmentor_class( - model_files, - class_names=class_names, - device_id=device_id, - palette=palette) - - return backend_segmentor diff --git a/mmdeploy/mmseg/apis/visualize.py b/mmdeploy/mmseg/apis/visualize.py deleted file mode 100644 index c1257369c1..0000000000 --- a/mmdeploy/mmseg/apis/visualize.py +++ /dev/null @@ -1,36 +0,0 @@ -import numpy as np -import torch - -from mmdeploy.utils import Backend - - -def show_result(model: torch.nn.Module, - image: np.ndarray, - result: list, - output_file: str, - backend: Backend, - show: bool = True, - opacity: float = 0.5): - """Show predictions of segmentation. - - Args: - model (nn.Module): Input model which has `show_result` method. - image: (np.ndarray): Input image to draw predictions. - result (list): A list of predictions. - output_file (str): Output image file to save drawn predictions. - backend (Backend): Specifying backend type. - show (bool): Whether to show plotted image in windows. Defaults to - `True`. - opacity: (float): Opacity of painted segmentation map. - Defaults to `0.5`. - - Returns: - np.ndarray: Drawn image, only if not `show` or `out_file`. - """ - return model.show_result( - image, - result, - opacity=opacity, - show=show, - win_name=backend.value, - out_file=output_file) diff --git a/mmdeploy/mmseg/export/__init__.py b/mmdeploy/mmseg/export/__init__.py deleted file mode 100644 index 82f32aa792..0000000000 --- a/mmdeploy/mmseg/export/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -from .onnx_utils import convert_syncbatchnorm -from .prepare_input import (build_dataloader, build_dataset, create_input, - get_tensor_from_input) - -__all__ = [ - 'create_input', 'convert_syncbatchnorm', 'build_dataloader', - 'build_dataset', 'get_tensor_from_input' -] diff --git a/mmdeploy/mmseg/export/prepare_input.py b/mmdeploy/mmseg/export/prepare_input.py deleted file mode 100644 index aa32d2dc56..0000000000 --- a/mmdeploy/mmseg/export/prepare_input.py +++ /dev/null @@ -1,141 +0,0 @@ -from typing import Any, Optional, Sequence, Union - -import mmcv -import numpy as np -from mmcv.parallel import collate, scatter -from mmseg.apis.inference import LoadImage -from mmseg.datasets import build_dataloader as build_dataloader_mmseg -from mmseg.datasets import build_dataset as build_dataset_mmseg -from mmseg.datasets.pipelines import Compose -from torch.utils.data import Dataset - -from mmdeploy.utils import Task, load_config - - -def create_input(task: Task, - model_cfg: Union[str, mmcv.Config], - imgs: Any, - input_shape: Optional[Sequence[int]] = None, - device: str = 'cuda:0'): - """Create input for segmentation. - - Args: - task (Task): Specifying task type. - model_cfg (str | mmcv.Config): The input model config. - imgs (Any): Input image(s), accpeted data type are `str`, - `np.ndarray`, `torch.Tensor`. - input_shape (list[int]): A list of two integer in (width, height) - format specifying input shape. Defaults to `None`. - device (str): A string represents device type. Default is 'cuda:0'. - - Returns: - tuple: (data, img), meta information for the input image and input. - """ - assert task == Task.SEGMENTATION - cfg = load_config(model_cfg)[0].copy() - if not isinstance(imgs, (list, tuple)): - imgs = [imgs] - - if isinstance(imgs[0], np.ndarray): - cfg = cfg.copy() - # set loading pipeline type - cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' - # for static exporting - if input_shape is not None: - cfg.data.test.pipeline[1]['img_scale'] = tuple(input_shape) - cfg.data.test.pipeline[1]['transforms'][0]['keep_ratio'] = False - cfg.data.test.pipeline = [LoadImage()] + cfg.data.test.pipeline[1:] - - test_pipeline = Compose(cfg.data.test.pipeline) - data_list = [] - for img in imgs: - # prepare data - data = dict(img=img) - # build the data pipeline - data = test_pipeline(data) - data_list.append(data) - - data = collate(data_list, samples_per_gpu=len(imgs)) - - data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']] - data['img'] = [img.data[0][None, :] for img in data['img']] - if device != 'cpu': - data = scatter(data, [device])[0] - - return data, data['img'] - - -def build_dataset(dataset_cfg: Union[str, mmcv.Config], - dataset_type: str = 'val', - **kwargs): - """Build dataset for segmentation. - - Args: - dataset_cfg (str | mmcv.Config): The input dataset config. - dataset_type (str): A string represents dataset type, e.g.: 'train', - 'test', 'val'. Defaults to 'val'. - - Returns: - Dataset: A PyTorch dataset. - """ - dataset_cfg = load_config(dataset_cfg)[0] - data = dataset_cfg.data - assert dataset_type in data - - dataset = build_dataset_mmseg(data[dataset_type]) - - return dataset - - -def build_dataloader(dataset: Dataset, - samples_per_gpu: int, - workers_per_gpu: int, - num_gpus: int = 1, - dist: bool = False, - shuffle: bool = False, - seed: Optional[int] = None, - drop_last: bool = False, - pin_memory: bool = True, - persistent_workers: bool = True, - **kwargs): - """Build dataloader for segmentation. - - Args: - dataset (Dataset): Input dataset. - samples_per_gpu (int): Number of training samples on each GPU, i.e., - batch size of each GPU. - workers_per_gpu (int): How many subprocesses to use for data loading - for each GPU. - num_gpus (int): Number of GPUs. Only used in non-distributed training. - dist (bool): Distributed training/test or not. Defaults to `False`. - shuffle (bool): Whether to shuffle the data at every epoch. - Defaults to `False`. - seed (int): An integer set to be seed. Default is `None`. - drop_last (bool): Whether to drop the last incomplete batch in epoch. - Default to `False`. - pin_memory (bool): Whether to use pin_memory in DataLoader. - Default is `True`. - persistent_workers (bool): If `True`, the data loader will not shutdown - the worker processes after a dataset has been consumed once. - This allows to maintain the workers Dataset instances alive. - The argument also has effect in PyTorch>=1.7.0. - Default is `True`. - kwargs: Any other keyword argument to be used to initialize DataLoader. - - Returns: - DataLoader: A PyTorch dataloader. - """ - return build_dataloader_mmseg(dataset, samples_per_gpu, workers_per_gpu, - num_gpus, dist, shuffle, seed, drop_last, - pin_memory, persistent_workers, **kwargs) - - -def get_tensor_from_input(input_data: tuple): - """Get input tensor from input data. - - Args: - input_data (tuple): Input data containing meta info and image tensor. - Returns: - torch.Tensor: An image in `Tensor`. - """ - return input_data['img'][0] diff --git a/mmdeploy/utils/__init__.py b/mmdeploy/utils/__init__.py index ec36ead2f8..b114bc31f7 100644 --- a/mmdeploy/utils/__init__.py +++ b/mmdeploy/utils/__init__.py @@ -1,15 +1,16 @@ from .config_utils import (cfg_apply_marks, get_backend, get_calib_config, get_calib_filename, get_codebase, get_common_config, - get_input_shape, get_mmdet_params, get_model_inputs, - get_onnx_config, get_partition_config, - get_task_type, is_dynamic_batch, is_dynamic_shape, - load_config) + get_input_shape, get_model_inputs, get_onnx_config, + get_partition_config, get_task_type, + is_dynamic_batch, is_dynamic_shape, load_config) from .constants import Backend, Codebase, Task +from .device import parse_cuda_device_id, parse_device_id __all__ = [ 'is_dynamic_batch', 'is_dynamic_shape', 'get_task_type', 'get_codebase', 'get_backend', 'load_config', 'Backend', 'Codebase', 'Task', 'get_onnx_config', 'get_partition_config', 'get_calib_config', 'get_calib_filename', 'get_common_config', 'get_model_inputs', - 'cfg_apply_marks', 'get_mmdet_params', 'get_input_shape' + 'cfg_apply_marks', 'get_input_shape', 'parse_device_id', + 'parse_cuda_device_id' ] diff --git a/mmdeploy/utils/config_utils.py b/mmdeploy/utils/config_utils.py index 82cc8ce392..59c8ab2665 100644 --- a/mmdeploy/utils/config_utils.py +++ b/mmdeploy/utils/config_utils.py @@ -1,11 +1,11 @@ -from typing import Optional, Union +from typing import Dict, List, Optional, Union import mmcv from .constants import Backend, Codebase, Task -def load_config(*args): +def load_config(*args) -> List[mmcv.Config]: """Load the configuration and check the validity. Args: @@ -93,7 +93,7 @@ def get_backend(deploy_cfg: Union[str, mmcv.Config], default=None) -> Backend: return backend -def get_onnx_config(deploy_cfg: Union[str, mmcv.Config]) -> str: +def get_onnx_config(deploy_cfg: Union[str, mmcv.Config]) -> Dict: """Get the onnx parameters in export() from config. Args: @@ -167,14 +167,15 @@ def is_dynamic_shape(deploy_cfg: Union[str, mmcv.Config], return False -def get_input_shape(deploy_cfg: Union[str, mmcv.Config]): +def get_input_shape(deploy_cfg: Union[str, mmcv.Config]) -> List[int]: """Get the input shape for static exporting. Args: deploy_cfg (str | mmcv.Config): The path or content of config. Returns: - List: The input shape for backend model (axis 2 and 3), e.g [512, 512]. + List[int]: The input shape for backend model (axis 2 and 3), + e.g [512, 512]. """ input_shape = get_onnx_config(deploy_cfg)['input_shape'] if input_shape is not None: @@ -182,7 +183,7 @@ def get_input_shape(deploy_cfg: Union[str, mmcv.Config]): return input_shape -def cfg_apply_marks(deploy_cfg: Union[str, mmcv.Config]) -> bool: +def cfg_apply_marks(deploy_cfg: Union[str, mmcv.Config]) -> Union[bool, None]: """Check if the model needs to be partitioned by checking if the config contains 'apply_marks'. @@ -190,7 +191,7 @@ def cfg_apply_marks(deploy_cfg: Union[str, mmcv.Config]) -> bool: deploy_cfg (str | mmcv.Config): The path or content of config. Returns: - bool: Whether config contains 'apply_marks'. + bool or None: Whether config contains 'apply_marks'. """ partition_config = deploy_cfg.get('partition_config', None) if partition_config is None: @@ -200,7 +201,7 @@ def cfg_apply_marks(deploy_cfg: Union[str, mmcv.Config]) -> bool: return apply_marks -def get_partition_config(deploy_cfg: Union[str, mmcv.Config]): +def get_partition_config(deploy_cfg: Union[str, mmcv.Config]) -> Dict: """Check if the model needs to be partitioned and get the config of partition. @@ -208,7 +209,7 @@ def get_partition_config(deploy_cfg: Union[str, mmcv.Config]): deploy_cfg (str | mmcv.Config): The path or content of config. Returns: - dict: The config of partition + dict: The config of partition. """ partition_config = deploy_cfg.get('partition_config', None) if partition_config is None: @@ -221,29 +222,28 @@ def get_partition_config(deploy_cfg: Union[str, mmcv.Config]): return partition_config -def get_calib_config(deploy_cfg: Union[str, mmcv.Config]): +def get_calib_config(deploy_cfg: Union[str, mmcv.Config]) -> Dict: """Check if the model has calibration configs. Args: deploy_cfg (str | mmcv.Config): The path or content of config. Returns: - dict: The config of calibration + dict: The config of calibration. """ calib_config = deploy_cfg.get('calib_config', None) return calib_config -def get_calib_filename(deploy_cfg: Union[str, mmcv.Config]): - """Check if the model needs to create calib and get output filename of - calib. +def get_calib_filename(deploy_cfg: Union[str, mmcv.Config]) -> str: + """Check if the model needs to create calib and get filename of calib. Args: deploy_cfg (str | mmcv.Config): The path or content of config. Returns: - str: The filename of output calib file + str: The filename of output calib file. """ calib_config = get_calib_config(deploy_cfg) @@ -257,7 +257,7 @@ def get_calib_filename(deploy_cfg: Union[str, mmcv.Config]): return None -def get_common_config(deploy_cfg: Union[str, mmcv.Config]): +def get_common_config(deploy_cfg: Union[str, mmcv.Config]) -> Dict: """Get common parameters from config. Args: @@ -271,7 +271,7 @@ def get_common_config(deploy_cfg: Union[str, mmcv.Config]): return model_params -def get_model_inputs(deploy_cfg: Union[str, mmcv.Config]): +def get_model_inputs(deploy_cfg: Union[str, mmcv.Config]) -> List[Dict]: """Get model input parameters from config. Args: @@ -283,21 +283,3 @@ def get_model_inputs(deploy_cfg: Union[str, mmcv.Config]): backend_config = deploy_cfg['backend_config'] model_params = backend_config.get('model_inputs', []) return model_params - - -def get_mmdet_params(deploy_cfg: Union[str, mmcv.Config]): - """Get mmdet post-processing parameters from config. - - Args: - deploy_cfg (str | mmcv.Config): The path or content of config. - - Returns: - dict: A dict of parameters for mmdet. - """ - deploy_cfg = load_config(deploy_cfg)[0] - codebase_key = 'codebase_config' - assert codebase_key in deploy_cfg - codebase_config = deploy_cfg[codebase_key] - post_params = codebase_config.get('post_processing', None) - assert post_params is not None, 'Failed to get `post_processing`.' - return post_params diff --git a/mmdeploy/utils/device.py b/mmdeploy/utils/device.py new file mode 100644 index 0000000000..3e851ad728 --- /dev/null +++ b/mmdeploy/utils/device.py @@ -0,0 +1,37 @@ +import torch + + +def parse_device_id(device: str) -> int: + """Parse cuda device index from a string. + + Args: + device (str): The typical style of string specifying cuda device, + e.g.: 'cuda:0'. + + Returns: + int: The parsed device id, defaults to `0`. + """ + if device == 'cpu': + return -1 + device_id = 0 + if len(device) >= 6: + device_id = torch.device(device).index + return device_id + + +def parse_cuda_device_id(device: str) -> int: + """Parse cuda device index from a string. + + Args: + device (str): The typical style of string specifying cuda device, + e.g.: 'cuda:0'. + + Returns: + int: The parsed device id, defaults to `0`. + """ + device = torch.device(device) + assert device.type == 'cuda', 'Not cuda device.' + + device_id = 0 if device.index is None else device.index + + return device_id diff --git a/mmdeploy/utils/test.py b/mmdeploy/utils/test.py index ccd812f9fa..6e41240f4b 100644 --- a/mmdeploy/utils/test.py +++ b/mmdeploy/utils/test.py @@ -33,7 +33,7 @@ def __init__(self, wrapped_function: Callable, **kwargs): self.wrapped_function = wrapped_function self.kwargs = kwargs - def forward(self, *args, **kwargs): + def forward(self, *args, **kwargs) -> Any: """Call the wrapped function.""" kwargs.update(self.kwargs) return self.wrapped_function(*args, **kwargs) @@ -73,11 +73,31 @@ def forward(self, *args, **kwargs): return func(*args, **kwargs) +class DummyModel(torch.nn.Module): + """A dummy model for unit tests. + + Args: + outputs (Any): Predefined output variables. + """ + + def __init__(self, outputs=None, *args, **kwargs): + torch.nn.Module.__init__(self) + self.outputs = outputs + + def forward(self, *args, **kwargs): + """Run forward.""" + return self.outputs + + def __call__(self, *args, **kwds): + """Call the forward method.""" + return self.forward(*args, **kwds) + + class SwitchBackendWrapper: """A switcher for backend wrapper for unit tests. Examples: >>> from mmdeploy.utils.test import SwitchBackendWrapper - >>> from mmdeploy.apis.onnxruntime.onnxruntime_utils import ORTWrapper + >>> from mmdeploy.backend.onnxruntime import ORTWrapper >>> with SwitchBackendWrapper(ORTWrapper) as wrapper: >>> wrapper.set(ORTWrapper, outputs=outputs) >>> ... @@ -89,10 +109,20 @@ class SwitchBackendWrapper: call = None class BackendWrapper(torch.nn.Module): - """A dummy wrapper for unit tests.""" + """A dummy backend wrapper for unit tests. - def __init__(self, *args, **kwargs): - self.output_names = ['dets', 'labels'] + To enable BaseWrapper.output_to_list(), the wrapper needs member + variable `_output_names` that is set in constructor. Therefore, + the dummy BackendWrapper needs a constructor that receives + output_names. + + Args: + output_names (Any): `output_name` of BaseWrapper + """ + + def __init__(self, output_names=['dets', 'labels'], *args, **kwargs): + torch.nn.Module.__init__(self) + self._output_names = output_names def forward(self, *args, **kwargs): """Run forward.""" @@ -165,7 +195,8 @@ def assert_allclose(expected: List[Union[torch.Tensor, np.ndarray]], raise -def get_model_outputs(model: nn.Module, func_name: str, model_inputs: dict): +def get_model_outputs(model: nn.Module, func_name: str, + model_inputs: dict) -> Any: """To get outputs of pytorch model. Args: @@ -268,6 +299,7 @@ def get_backend_outputs(onnx_file_path: str, flatten_model_inputs = get_flatten_inputs(model_inputs) input_names = [k for k, v in flatten_model_inputs.items() if k != 'ctx'] output_names = get_onnx_config(deploy_cfg).get('output_names', None) + backend_files = [onnx_file_path] # prepare backend model and input features if backend == Backend.TENSORRT: # convert to engine @@ -281,16 +313,16 @@ def get_backend_outputs(onnx_file_path: str, 0, deploy_cfg=deploy_cfg, onnx_model=onnx_file_path) - backend_model = trt_apis.TRTWrapper(trt_file_path) + backend_files = [trt_file_path] for k, v in model_inputs.items(): model_inputs[k] = model_inputs[k].cuda() backend_feats = model_inputs + device = 'cuda:0' elif backend == Backend.ONNXRUNTIME: import mmdeploy.apis.onnxruntime as ort_apis if not ort_apis.is_available(): return None - backend_model = ort_apis.ORTWrapper(onnx_file_path, 0, None) feature_list = [] backend_feats = {} for k, item in model_inputs.items(): @@ -313,6 +345,7 @@ def get_backend_outputs(onnx_file_path: str, backend_feats[input_names[i]] = feature_list[i] else: backend_feats[str(i)] = feature_list[i] + device = 'cpu' elif backend == Backend.NCNN: return None elif backend == Backend.OPENVINO: @@ -328,17 +361,21 @@ def get_backend_outputs(onnx_file_path: str, } openvino_apis.onnx2openvino(input_info, output_names, onnx_file_path, openvino_work_dir) - backend_model = openvino_apis.OpenVINOWrapper(openvino_file_path) - + backend_files = [openvino_file_path] backend_feats = flatten_model_inputs + device = 'cpu' elif backend == Backend.DEFAULT: return None else: raise NotImplementedError( f'Unimplemented backend type: {backend.value}') + from mmdeploy.codebase.base import BaseBackendModel + backend_model = BaseBackendModel._build_wrapper(backend, backend_files, + device, output_names) with torch.no_grad(): - backend_outputs = backend_model.forward(backend_feats) + backend_outputs = backend_model(backend_feats) + backend_outputs = backend_model.output_to_list(backend_outputs) return backend_outputs @@ -354,7 +391,7 @@ def get_rewrite_outputs(wrapped_model: nn.Module, deploy_cfg (mmcv.Config): Deployment config. Returns: - Any: The outputs of model, decided by the backend wrapper. + List[torch.Tensor]: The outputs of model. bool: A flag indicate the type of outputs. If the flag is True, then the outputs are backend output, otherwise they are outputs of wrapped pytorch model. diff --git a/mmdeploy/utils/timer.py b/mmdeploy/utils/timer.py index b24d002f28..900a35cf73 100644 --- a/mmdeploy/utils/timer.py +++ b/mmdeploy/utils/timer.py @@ -3,6 +3,7 @@ import time import warnings from contextlib import contextmanager +from typing import Union import torch @@ -92,7 +93,7 @@ def activate(cls, warmup: int = 1, log_interval: int = 1, with_sync: bool = False, - file: io.TextIOWrapper = sys.stdout): + file: Union[str, io.TextIOWrapper] = sys.stdout): """Activate the time counter. Args: @@ -102,8 +103,8 @@ def activate(cls, log_interval (int): Interval between each log, default 1. with_sync (bool): Whether use cuda synchronize for time counting, default False. - file (io.TextIOWrapper): A file or file-like object to save output - messages. The default is `sys.stdout`. + file (str | io.TextIOWrapper): A file or file-like object to save + output messages. The default is `sys.stdout`. """ assert warmup >= 1 if file != sys.stdout: @@ -112,7 +113,7 @@ def activate(cls, if func_name is not None: warnings.warn('func_name must be globally unique if you call ' 'activate multiple times') - assert func_name in cls.names, '{} must be registried before '\ + assert func_name in cls.names, '{} must be registered before '\ 'setting params'.format(func_name) cls.names[func_name]['warmup'] = warmup cls.names[func_name]['log_interval'] = log_interval diff --git a/mmdeploy/version.py b/mmdeploy/version.py index f74e40ba30..8db80794c7 100644 --- a/mmdeploy/version.py +++ b/mmdeploy/version.py @@ -1,10 +1,11 @@ # Copyright (c) OpenMMLab. All rights reserved. +from typing import Tuple __version__ = '0.1.0' short_version = __version__ -def parse_version_info(version_str: str): +def parse_version_info(version_str: str) -> Tuple: """Parse version from a string. Args: diff --git a/tests/test_mmdet/data/coco_sample.json b/tests/test_codebase/test_mmdet/data/coco_sample.json similarity index 100% rename from tests/test_mmdet/data/coco_sample.json rename to tests/test_codebase/test_mmdet/data/coco_sample.json diff --git a/tests/test_mmdet/data/mask_model.json b/tests/test_codebase/test_mmdet/data/mask_model.json similarity index 100% rename from tests/test_mmdet/data/mask_model.json rename to tests/test_codebase/test_mmdet/data/mask_model.json diff --git a/tests/test_codebase/test_mmdet/data/model.py b/tests/test_codebase/test_mmdet/data/model.py new file mode 100644 index 0000000000..95811df86d --- /dev/null +++ b/tests/test_codebase/test_mmdet/data/model.py @@ -0,0 +1,108 @@ +model = dict( + type='YOLOV3', + backbone=dict( + type='MobileNetV2', + out_indices=(2, 4, 6), + act_cfg=dict(type='LeakyReLU', negative_slope=0.1), + init_cfg=dict( + type='Pretrained', checkpoint='open-mmlab://mmdet/mobilenet_v2')), + neck=dict( + type='YOLOV3Neck', + num_scales=3, + in_channels=[320, 96, 32], + out_channels=[96, 96, 96]), + bbox_head=dict( + type='YOLOV3Head', + num_classes=80, + in_channels=[96, 96, 96], + out_channels=[96, 96, 96], + anchor_generator=dict( + type='YOLOAnchorGenerator', + base_sizes=[[(116, 90), (156, 198), (373, 326)], + [(30, 61), (62, 45), (59, 119)], + [(10, 13), (16, 30), (33, 23)]], + strides=[32, 16, 8]), + bbox_coder=dict(type='YOLOBBoxCoder'), + featmap_strides=[32, 16, 8], + loss_cls=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0, + reduction='sum'), + loss_conf=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=1.0, + reduction='sum'), + loss_xy=dict( + type='CrossEntropyLoss', + use_sigmoid=True, + loss_weight=2.0, + reduction='sum'), + loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), + # training and testing settings + train_cfg=dict( + assigner=dict( + type='GridAssigner', + pos_iou_thr=0.5, + neg_iou_thr=0.5, + min_pos_iou=0)), + test_cfg=dict( + nms_pre=1000, + min_bbox_size=0, + score_thr=0.05, + conf_thr=0.005, + nms=dict(type='nms', iou_threshold=0.45), + max_per_img=100)) +# dataset settings +dataset_type = 'CocoDataset' +data_root = '.' +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) +train_pipeline = [ + dict(type='LoadImageFromFile', to_float32=True), + dict(type='LoadAnnotations', with_bbox=True), + dict(type='PhotoMetricDistortion'), + dict( + type='Expand', + mean=img_norm_cfg['mean'], + to_rgb=img_norm_cfg['to_rgb'], + ratio_range=(1, 2)), + dict( + type='MinIoURandomCrop', + min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), + min_crop_size=0.3), + dict( + type='Resize', + img_scale=[(320, 320), (416, 416)], + multiscale_mode='range', + keep_ratio=True), + dict(type='RandomFlip', flip_ratio=0.5), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) +] +test_pipeline = [ + dict(type='LoadImageFromFile'), + dict( + type='MultiScaleFlipAug', + img_scale=(416, 416), + flip=False, + transforms=[ + dict(type='Resize', keep_ratio=True), + dict(type='RandomFlip'), + dict(type='Normalize', **img_norm_cfg), + dict(type='Pad', size_divisor=32), + dict(type='DefaultFormatBundle'), + dict(type='Collect', keys=['img']) + ]) +] +data = dict( + samples_per_gpu=24, + workers_per_gpu=4, + test=dict( + type=dataset_type, + ann_file='tests/test_codebase/test_mmdet/data/coco_sample.json', + img_prefix=data_root, + pipeline=test_pipeline)) diff --git a/tests/test_mmdet/data/single_stage_model.json b/tests/test_codebase/test_mmdet/data/single_stage_model.json similarity index 100% rename from tests/test_mmdet/data/single_stage_model.json rename to tests/test_codebase/test_mmdet/data/single_stage_model.json diff --git a/tests/test_mmdet/test_mmdet_core.py b/tests/test_codebase/test_mmdet/test_mmdet_core.py similarity index 89% rename from tests/test_mmdet/test_mmdet_core.py rename to tests/test_codebase/test_mmdet/test_mmdet_core.py index 8dab90fb4d..1db30d0aee 100644 --- a/tests/test_mmdet/test_mmdet_core.py +++ b/tests/test_codebase/test_mmdet/test_mmdet_core.py @@ -15,7 +15,7 @@ def test_multiclass_nms_static(): import tensorrt as trt - from mmdeploy.mmdet.core import multiclass_nms + from mmdeploy.codebase.mmdet.core import multiclass_nms deploy_cfg = mmcv.Config( dict( onnx_config=dict(output_names=None, input_shape=None), @@ -70,7 +70,8 @@ def test_multiclass_nms_static(): @pytest.mark.parametrize('backend_type', ['onnxruntime', 'ncnn']) -def test_delta2bbox(backend_type): +@pytest.mark.parametrize('add_ctr_clamp', [True, False]) +def test_delta2bbox(backend_type, add_ctr_clamp): pytest.importorskip(backend_type, reason=f'requires {backend_type}') deploy_cfg = mmcv.Config( dict( @@ -86,10 +87,10 @@ def delta2bbox(*args, **kwargs): rois = torch.rand(1, 5, 4) deltas = torch.rand(1, 5, 4) - original_outputs = delta2bbox(rois, deltas) + original_outputs = delta2bbox(rois, deltas, add_ctr_clamp=add_ctr_clamp) # wrap function to nn.Module, enable torch.onnx.export - wrapped_func = WrapFunction(delta2bbox) + wrapped_func = WrapFunction(delta2bbox, add_ctr_clamp=add_ctr_clamp) rewrite_outputs, is_backend_output = get_rewrite_outputs( wrapped_func, model_inputs={ @@ -146,7 +147,7 @@ def tblr2bboxes(*args, **kwargs): def test_distance2bbox(): - from mmdeploy.mmdet.core import distance2bbox + from mmdeploy.codebase.mmdet.core import distance2bbox points = torch.rand(3, 2) distance = torch.rand(3, 4) bbox = distance2bbox(points, distance) @@ -155,10 +156,11 @@ def test_distance2bbox(): @pytest.mark.skipif( not importlib.util.find_spec('onnxruntime'), reason='requires onnxruntime') -def test_multiclass_nms_with_keep_top_k(): +@pytest.mark.parametrize('pre_top_k', [-1, 1000]) +def test_multiclass_nms_with_keep_top_k(pre_top_k): backend_type = 'onnxruntime' - from mmdeploy.mmdet.core import multiclass_nms + from mmdeploy.codebase.mmdet.core import multiclass_nms max_output_boxes_per_class = 20 keep_top_k = 15 deploy_cfg = mmcv.Config( @@ -186,7 +188,7 @@ def test_multiclass_nms_with_keep_top_k(): score_threshold=0.05, iou_threshold=0.5, max_output_boxes_per_class=max_output_boxes_per_class, - pre_top_k=-1, + pre_top_k=pre_top_k, keep_top_k=keep_top_k, background_label_id=-1, )))) @@ -211,9 +213,11 @@ def test_multiclass_nms_with_keep_top_k(): test_scores = torch.ones(batch_size, num_boxes, num_classes) model_inputs = {'boxes': test_boxes, 'scores': test_scores} - import mmdeploy.apis.onnxruntime as ort_apis - backend_model = ort_apis.ORTWrapper(onnx_model_path, 0, None) - dets, _ = backend_model.forward(model_inputs) + import mmdeploy.backend.onnxruntime as ort_apis + backend_model = ort_apis.ORTWrapper(onnx_model_path, 'cuda:0', None) + output = backend_model.forward(model_inputs) + output = backend_model.output_to_list(output) + dets = output[0] assert dets.shape[1] < keep_top_k, \ 'multiclass_nms returned more values than "keep_top_k"\n' \ diff --git a/tests/test_mmdet/test_mmdet_models.py b/tests/test_codebase/test_mmdet/test_mmdet_models.py similarity index 93% rename from tests/test_mmdet/test_mmdet_models.py rename to tests/test_codebase/test_mmdet/test_mmdet_models.py index f76803642d..931b913cec 100644 --- a/tests/test_mmdet/test_mmdet_models.py +++ b/tests/test_codebase/test_mmdet/test_mmdet_models.py @@ -2,7 +2,6 @@ import importlib import os import random -import tempfile from typing import Dict, List import mmcv @@ -10,7 +9,6 @@ import pytest import torch -from mmdeploy.utils.constants import Backend, Codebase from mmdeploy.utils.test import (WrapModel, get_model_outputs, get_rewrite_outputs) @@ -79,10 +77,10 @@ def get_rpn_head_model(): test_cfg = mmcv.Config( dict( deploy_nms_pre=0, - min_bbox_size=0, - score_thr=0.05, - nms=dict(type='nms', iou_threshold=0.5), - max_per_img=100)) + nms_pre=0, + max_per_img=100, + nms=dict(type='nms', iou_threshold=0.7), + min_bbox_size=0)) from mmdet.models import RPNHead model = RPNHead(in_channels=1, test_cfg=test_cfg) @@ -126,7 +124,7 @@ def test_anchor_head_get_bboxes(backend_type): score_threshold=0.05, iou_threshold=0.5, max_output_boxes_per_class=200, - pre_top_k=-1, + pre_top_k=5000, keep_top_k=100, background_label_id=-1, )))) @@ -205,7 +203,7 @@ def test_get_bboxes_of_fcos_head(backend_type): score_threshold=0.05, iou_threshold=0.5, max_output_boxes_per_class=200, - pre_top_k=-1, + pre_top_k=5000, keep_top_k=100, background_label_id=-1, )))) @@ -270,6 +268,61 @@ def test_get_bboxes_of_fcos_head(backend_type): assert rewrite_outputs is not None +@pytest.mark.parametrize('backend_type', ['onnxruntime', 'ncnn']) +def test_get_bboxes_of_rpn_head(backend_type): + pytest.importorskip(backend_type, reason=f'requires {backend_type}') + head = get_rpn_head_model() + head.cpu().eval() + s = 4 + img_metas = [{ + 'scale_factor': np.ones(4), + 'pad_shape': (s, s, 3), + 'img_shape': (s, s, 3) + }] + + output_names = ['dets'] + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type=backend_type), + onnx_config=dict(output_names=output_names, input_shape=None), + codebase_config=dict( + type='mmdet', + task='ObjectDetection', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=5000, + keep_top_k=100, + background_label_id=-1, + )))) + + # the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16), + # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2). + # the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16), + # (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2) + seed_everything(1234) + cls_score = [ + torch.rand(1, 9, pow(2, i), pow(2, i)) for i in range(5, 0, -1) + ] + seed_everything(5678) + bboxes = [torch.rand(1, 36, pow(2, i), pow(2, i)) for i in range(5, 0, -1)] + + # to get outputs of onnx model after rewrite + img_metas[0]['img_shape'] = torch.Tensor([s, s]) + wrapped_model = WrapModel( + head, 'get_bboxes', img_metas=img_metas[0], with_nms=True) + rewrite_inputs = { + 'cls_scores': cls_score, + 'bbox_preds': bboxes, + } + rewrite_outputs, is_backend_output = get_rewrite_outputs( + wrapped_model=wrapped_model, + model_inputs=rewrite_inputs, + deploy_cfg=deploy_cfg) + assert rewrite_outputs is not None + + def _replace_r50_with_r18(model): """Replace ResNet50 with ResNet18 in config.""" model = copy.deepcopy(model) @@ -281,12 +334,12 @@ def _replace_r50_with_r18(model): @pytest.mark.parametrize('model_cfg_path', [ - 'tests/test_mmdet/data/single_stage_model.json', - 'tests/test_mmdet/data/mask_model.json' + 'tests/test_codebase/test_mmdet/data/single_stage_model.json', + 'tests/test_codebase/test_mmdet/data/mask_model.json' ]) @pytest.mark.skipif( not importlib.util.find_spec('onnxruntime'), reason='requires onnxruntime') -def test_forward_of_base_detector_and_visualize(model_cfg_path): +def test_forward_of_base_detector(model_cfg_path): deploy_cfg = mmcv.Config( dict( backend_config=dict(type='onnxruntime'), @@ -316,22 +369,10 @@ def test_forward_of_base_detector_and_visualize(model_cfg_path): model_inputs=rewrite_inputs, deploy_cfg=deploy_cfg) - from mmdeploy.apis.utils import visualize - output_file = tempfile.NamedTemporaryFile(suffix='.jpg').name - model.CLASSES = [''] * 80 - visualize( - Codebase.MMDET, - img.squeeze().permute(1, 2, 0).numpy(), - result=[torch.rand(0, 5).numpy()] * 80, - model=model, - output_file=output_file, - backend=Backend.ONNXRUNTIME, - show_result=False) - assert rewrite_outputs is not None -@pytest.mark.parametrize('backend_type', ['openvino']) +@pytest.mark.parametrize('backend_type', ['onnxruntime', 'openvino']) def test_single_roi_extractor(backend_type): pytest.importorskip(backend_type, reason=f'requires {backend_type}') @@ -485,7 +526,8 @@ def test_cascade_roi_head(backend_type): model_outputs = get_model_outputs(cascade_roi_head, 'simple_test', model_inputs) processed_model_outputs = [] - for output in model_outputs[0]: + outputs = model_outputs[0] + for output in outputs: if output.shape == (0, 5): processed_model_outputs.append(np.zeros((1, 5))) else: diff --git a/tests/test_codebase/test_mmdet/test_mmdet_utils.py b/tests/test_codebase/test_mmdet/test_mmdet_utils.py new file mode 100644 index 0000000000..2c24a9a8f2 --- /dev/null +++ b/tests/test_codebase/test_mmdet/test_mmdet_utils.py @@ -0,0 +1,49 @@ +import mmcv +import numpy as np +import torch + +from mmdeploy.codebase.mmdet import (clip_bboxes, get_post_processing_params, + pad_with_value) + + +def test_clip_bboxes(): + x1 = torch.rand(3, 2) * 224 + y1 = torch.rand(3, 2) * 224 + x2 = x1 * 2 + y2 = y1 * 2 + outs = clip_bboxes(x1, y1, x2, y2, [224, 224]) + for out in outs: + assert int(out.max()) <= 224 + + +def test_pad_with_value(): + x = torch.rand(3, 2) + padded_x = pad_with_value(x, pad_dim=1, pad_size=4, pad_value=0) + assert np.allclose( + padded_x.shape, torch.Size([3, 6]), rtol=1e-03, atol=1e-05) + assert np.allclose(padded_x.sum(), x.sum(), rtol=1e-03, atol=1e-05) + + +config_with_mmdet_params = mmcv.Config( + dict( + codebase_config=dict( + type='mmdet', + task='ObjectDetection', + post_processing=dict( + score_threshold=0.05, + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=-1, + keep_top_k=100, + background_label_id=-1, + )))) + + +def test_get_mmdet_params(): + assert get_post_processing_params(config_with_mmdet_params) == dict( + score_threshold=0.05, + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=-1, + keep_top_k=100, + background_label_id=-1) diff --git a/tests/test_codebase/test_mmdet/test_object_detection.py b/tests/test_codebase/test_mmdet/test_object_detection.py new file mode 100644 index 0000000000..99b7364d9e --- /dev/null +++ b/tests/test_codebase/test_mmdet/test_object_detection.py @@ -0,0 +1,155 @@ +import os +from tempfile import NamedTemporaryFile, TemporaryDirectory + +import mmcv +import numpy as np +import pytest +import torch +from torch.utils.data import DataLoader +from torch.utils.data.dataset import Dataset + +import mmdeploy.backend.onnxruntime as ort_apis +from mmdeploy.apis import build_task_processor +from mmdeploy.utils import load_config +from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper + +model_cfg_path = 'tests/test_codebase/test_mmdet/data/model.py' +model_cfg = load_config(model_cfg_path)[0] +deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type='onnxruntime'), + codebase_config=dict( + type='mmdet', + task='ObjectDetection', + post_processing=dict( + score_threshold=0.05, + confidence_threshold=0.005, # for YOLOv3 + iou_threshold=0.5, + max_output_boxes_per_class=200, + pre_top_k=5000, + keep_top_k=100, + background_label_id=-1, + )), + onnx_config=dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + input_shape=None, + input_names=['input'], + output_names=['dets', 'labels']))) +onnx_file = NamedTemporaryFile(suffix='.onnx').name +task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu') +img_shape = (32, 32) +img = np.random.rand(*img_shape, 3) + + +def test_init_pytorch_model(): + from mmdet.models import BaseDetector + model = task_processor.init_pytorch_model(None) + assert isinstance(model, BaseDetector) + + +@pytest.fixture +def backend_model(): + from mmdeploy.backend.onnxruntime import ORTWrapper + ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) + wrapper = SwitchBackendWrapper(ORTWrapper) + wrapper.set(outputs={ + 'dets': torch.rand(1, 10, 5), + 'labels': torch.rand(1, 10) + }) + + yield task_processor.init_backend_model(['']) + + wrapper.recover() + + +def test_init_backend_model(backend_model): + from mmdeploy.codebase.mmdet.deploy.object_detection_model \ + import End2EndModel + assert isinstance(backend_model, End2EndModel) + + +@pytest.mark.parametrize('device', ['cpu', 'cuda:0']) +def test_create_input(device): + original_device = task_processor.device + task_processor.device = device + inputs = task_processor.create_input(img, input_shape=img_shape) + assert len(inputs) == 2 + task_processor.device = original_device + + +def test_run_inference(backend_model): + torch_model = task_processor.init_pytorch_model(None) + input_dict, _ = task_processor.create_input(img, input_shape=img_shape) + torch_results = task_processor.run_inference(torch_model, input_dict) + backend_results = task_processor.run_inference(backend_model, input_dict) + assert torch_results is not None + assert backend_results is not None + assert len(torch_results[0]) == len(backend_results[0]) + + +def test_visualize(backend_model): + input_dict, _ = task_processor.create_input(img, input_shape=img_shape) + results = task_processor.run_inference(backend_model, input_dict) + with TemporaryDirectory() as dir: + filename = dir + 'tmp.jpg' + task_processor.visualize(backend_model, img, results[0], filename, '') + assert os.path.exists(filename) + + +@pytest.mark.parametrize('partition_type', ['single_stage', 'two_stage']) +# Currently only mmdet implements get_partition_cfg +def test_get_partition_cfg(partition_type): + from mmdeploy.codebase.mmdet.deploy.model_partition_cfg import \ + MMDET_PARTITION_CFG + partition_cfg = task_processor.get_partition_cfg( + partition_type=partition_type) + assert partition_cfg == MMDET_PARTITION_CFG[partition_type] + + +def test_get_tensort_from_input(): + input_data = {'img': [torch.ones(3, 4, 5)]} + inputs = task_processor.get_tensor_from_input(input_data) + assert torch.equal(inputs, torch.ones(3, 4, 5)) + + +def test_build_dataset_and_dataloader(): + dataset = task_processor.build_dataset( + dataset_cfg=model_cfg, dataset_type='test') + assert isinstance(dataset, Dataset), 'Failed to build dataset' + dataloader = task_processor.build_dataloader(dataset, 1, 1) + assert isinstance(dataloader, DataLoader), 'Failed to build dataloader' + + +def test_single_gpu_test_and_evaluate(): + from mmcv.parallel import MMDataParallel + + class DummyDataset(Dataset): + + def __getitem__(self, index): + return 0 + + def __len__(self): + return 0 + + def evaluate(self, *args, **kwargs): + return 0 + + def format_results(self, *args, **kwargs): + return 0 + + dataset = DummyDataset() + # Prepare dataloader + dataloader = DataLoader(dataset) + + # Prepare dummy model + model = DummyModel(outputs=[torch.rand([1, 10, 5]), torch.rand([1, 10])]) + model = MMDataParallel(model, device_ids=[0]) + # Run test + outputs = task_processor.single_gpu_test(model, dataloader) + assert isinstance(outputs, list) + output_file = NamedTemporaryFile(suffix='.pkl').name + task_processor.evaluate_outputs( + model_cfg, outputs, dataset, 'bbox', out=output_file, format_only=True) diff --git a/tests/test_codebase/test_mmdet/test_object_detection_model.py b/tests/test_codebase/test_mmdet/test_object_detection_model.py new file mode 100644 index 0000000000..bb16f8970b --- /dev/null +++ b/tests/test_codebase/test_mmdet/test_object_detection_model.py @@ -0,0 +1,446 @@ +import importlib +import os.path as osp +from tempfile import NamedTemporaryFile + +import mmcv +import numpy as np +import pytest +import torch + +import mmdeploy.backend.onnxruntime as ort_apis +from mmdeploy.codebase.mmdet.deploy.object_detection_model import End2EndModel +from mmdeploy.utils import Backend +from mmdeploy.utils.test import SwitchBackendWrapper + + +def assert_det_results(results, module_name: str = 'model'): + assert results is not None, f'failed to get output using {module_name}' + assert isinstance(results, list) + assert len(results) == 2 + assert results[0].shape[0] == results[1].shape[0] + assert results[0].shape[1] == results[1].shape[1] + + +def assert_forward_results(results, module_name: str = 'model'): + assert results is not None, f'failed to get output using {module_name}' + assert isinstance(results, list) + assert len(results) == 1 + if isinstance(results[0], tuple): # mask + assert len(results[0][0]) == 80 + else: + assert len(results[0]) == 80 + + +@pytest.mark.skipif( + not importlib.util.find_spec('onnxruntime'), reason='requires onnxruntime') +class TestEnd2EndModel: + + @classmethod + def setup_class(cls): + # force add backend wrapper regardless of plugins + # make sure ONNXRuntimeDetector can use ORTWrapper inside itself + from mmdeploy.backend.onnxruntime import ORTWrapper + ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) + + # simplify backend inference + cls.wrapper = SwitchBackendWrapper(ORTWrapper) + cls.outputs = { + 'dets': torch.rand(1, 10, 5), + 'labels': torch.rand(1, 10) + } + cls.wrapper.set(outputs=cls.outputs) + deploy_cfg = mmcv.Config( + {'onnx_config': { + 'output_names': ['dets', 'labels'] + }}) + + from mmdeploy.codebase.mmdet.deploy.object_detection_model \ + import End2EndModel + cls.end2end_model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu', + ['' for i in range(80)], deploy_cfg) + + @classmethod + def teardown_class(cls): + cls.wrapper.recover() + + def test_forward(self): + imgs = [torch.rand(1, 3, 64, 64)] + img_metas = [[{ + 'ori_shape': [64, 64, 3], + 'img_shape': [64, 64, 3], + 'scale_factor': [1, 1, 1, 1], + 'border': [0, 0, 0] + }]] + results = self.end2end_model.forward(imgs, img_metas) + assert_forward_results(results, 'End2EndModel') + + def test_show_result(self): + input_img = np.zeros([64, 64, 3]) + img_path = NamedTemporaryFile(suffix='.jpg').name + + result = (torch.rand(1, 10, 5), torch.rand(1, 10)) + self.end2end_model.show_result( + input_img, result, '', show=False, out_file=img_path) + assert osp.exists(img_path) + + +@pytest.mark.skipif( + not importlib.util.find_spec('onnxruntime'), reason='requires onnxruntime') +class TestMaskEnd2EndModel: + + @classmethod + def setup_class(cls): + # force add backend wrapper regardless of plugins + # make sure ONNXRuntimeDetector can use ORTWrapper inside itself + from mmdeploy.backend.onnxruntime import ORTWrapper + ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) + + # simplify backend inference + num_classes = 80 + num_dets = 10 + cls.wrapper = SwitchBackendWrapper(ORTWrapper) + cls.outputs = { + 'dets': torch.rand(1, num_dets, 5), + 'labels': torch.randint(num_classes, (1, num_dets)), + 'masks': torch.rand(1, num_dets, 28, 28) + } + cls.wrapper.set(outputs=cls.outputs) + deploy_cfg = mmcv.Config({ + 'onnx_config': { + 'output_names': ['dets', 'labels', 'masks'] + }, + 'codebase_config': { + 'post_processing': { + 'export_postprocess_mask': False + } + } + }) + + from mmdeploy.codebase.mmdet.deploy.object_detection_model \ + import End2EndModel + cls.end2end_model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu', + ['' for i in range(80)], deploy_cfg) + + @classmethod + def teardown_class(cls): + cls.wrapper.recover() + + def test_forward(self): + imgs = [torch.rand(1, 3, 64, 64)] + img_metas = [[{ + 'ori_shape': [64, 64, 3], + 'img_shape': [64, 64, 3], + 'scale_factor': [1, 1, 1, 1], + }]] + results = self.end2end_model.forward(imgs, img_metas) + assert_forward_results(results, 'mask End2EndModel') + + +def get_test_cfg_and_post_processing(): + test_cfg = { + 'nms_pre': 100, + 'min_bbox_size': 0, + 'score_thr': 0.05, + 'nms': { + 'type': 'nms', + 'iou_threshold': 0.5 + }, + 'max_per_img': 10 + } + post_processing = { + 'score_threshold': 0.05, + 'iou_threshold': 0.5, + 'max_output_boxes_per_class': 20, + 'pre_top_k': -1, + 'keep_top_k': 10, + 'background_label_id': -1 + } + return test_cfg, post_processing + + +@pytest.mark.skipif( + not importlib.util.find_spec('onnxruntime'), reason='requires onnxruntime') +class TestPartitionSingleStageModel: + + @classmethod + def setup_class(cls): + # force add backend wrapper regardless of plugins + # make sure ONNXRuntimeDetector can use ORTWrapper inside itself + from mmdeploy.backend.onnxruntime import ORTWrapper + ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) + + # simplify backend inference + cls.wrapper = SwitchBackendWrapper(ORTWrapper) + cls.outputs = { + 'scores': torch.rand(1, 10, 80), + 'boxes': torch.rand(1, 10, 4) + } + cls.wrapper.set(outputs=cls.outputs) + + test_cfg, post_processing = get_test_cfg_and_post_processing() + model_cfg = mmcv.Config(dict(model=dict(test_cfg=test_cfg))) + deploy_cfg = mmcv.Config( + dict(codebase_config=dict(post_processing=post_processing))) + + from mmdeploy.codebase.mmdet.deploy.object_detection_model \ + import PartitionSingleStageModel + cls.model = PartitionSingleStageModel( + Backend.ONNXRUNTIME, [''], + 'cpu', ['' for i in range(80)], + model_cfg=model_cfg, + deploy_cfg=deploy_cfg) + + @classmethod + def teardown_class(cls): + cls.wrapper.recover() + + def test_forward_test(self): + imgs = [torch.rand(1, 3, 64, 64)] + img_metas = [[{ + 'ori_shape': [64, 64, 3], + 'img_shape': [64, 64, 3], + 'scale_factor': [1, 1, 1, 1], + }]] + results = self.model.forward_test(imgs, img_metas) + assert_det_results(results, 'PartitionSingleStageModel') + + def test_postprocess(self): + scores = torch.rand(1, 120, 80) + bboxes = torch.rand(1, 120, 4) + + results = self.model.partition0_postprocess( + scores=scores, bboxes=bboxes) + assert_det_results( + results, '.partition0_postprocess of' + 'PartitionSingleStageModel') + + +def prepare_model_deploy_cfgs(): + test_cfg, post_processing = get_test_cfg_and_post_processing() + bbox_roi_extractor = { + 'type': 'SingleRoIExtractor', + 'roi_layer': { + 'type': 'RoIAlign', + 'output_size': 7, + 'sampling_ratio': 0 + }, + 'out_channels': 8, + 'featmap_strides': [4] + } + bbox_head = { + 'type': 'Shared2FCBBoxHead', + 'in_channels': 8, + 'fc_out_channels': 1024, + 'roi_feat_size': 7, + 'num_classes': 80, + 'bbox_coder': { + 'type': 'DeltaXYWHBBoxCoder', + 'target_means': [0.0, 0.0, 0.0, 0.0], + 'target_stds': [0.1, 0.1, 0.2, 0.2] + }, + 'reg_class_agnostic': False, + 'loss_cls': { + 'type': 'CrossEntropyLoss', + 'use_sigmoid': False, + 'loss_weight': 1.0 + }, + 'loss_bbox': { + 'type': 'L1Loss', + 'loss_weight': 1.0 + } + } + roi_head = dict(bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head) + model_cfg = mmcv.Config( + dict( + model=dict( + neck=dict(num_outs=0), + test_cfg=dict(rpn=test_cfg, rcnn=test_cfg), + roi_head=roi_head))) + deploy_cfg = mmcv.Config( + dict(codebase_config=dict(post_processing=post_processing))) + return model_cfg, deploy_cfg + + +class DummyWrapper(torch.nn.Module): + + def __init__(self, outputs): + self.outputs = outputs + + def __call__(self, *arg, **kwargs): + return 0 + + def output_to_list(self, *arg, **kwargs): + return self.outputs + + +@pytest.mark.skipif( + not importlib.util.find_spec('onnxruntime'), reason='requires onnxruntime') +class TestPartitionTwoStageModel: + + @classmethod + def setup_class(cls): + # force add backend wrapper regardless of plugins + # make sure ONNXRuntimeDetector can use ORTWrapper inside itself + from mmdeploy.backend.onnxruntime import ORTWrapper + ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) + + # simplify backend inference + cls.wrapper = SwitchBackendWrapper(ORTWrapper) + outputs = [ + np.random.rand(1, 12, 80).astype(np.float32), + np.random.rand(1, 12, 4).astype(np.float32), + ] * 2 + + model_cfg, deploy_cfg = prepare_model_deploy_cfgs() + + cls.wrapper.set( + outputs=outputs, model_cfg=model_cfg, deploy_cfg=deploy_cfg) + + # replace original function in PartitionTwoStageModel + from mmdeploy.codebase.mmdet.deploy.object_detection_model \ + import PartitionTwoStageModel + + cls.model = PartitionTwoStageModel( + Backend.ONNXRUNTIME, ['', ''], + 'cpu', ['' for i in range(80)], + model_cfg=model_cfg, + deploy_cfg=deploy_cfg) + feats = [torch.randn(1, 8, 14, 14) for i in range(5)] + scores = torch.rand(1, 10, 1) + bboxes = torch.rand(1, 10, 4) + bboxes[..., 2:4] = 2 * bboxes[..., :2] + + cls_score = torch.rand(10, 81) + bbox_pred = torch.rand(10, 320) + + cls.model.device = 'cpu' + cls.model.CLASSES = ['' for i in range(80)] + cls.model.first_wrapper = DummyWrapper([*feats, scores, bboxes]) + cls.model.second_wrapper = DummyWrapper([cls_score, bbox_pred]) + + @classmethod + def teardown_class(cls): + cls.wrapper.recover() + + def test_postprocess(self): + feats = [torch.randn(1, 8, 14, 14) for i in range(5)] + scores = torch.rand(1, 50, 1) + bboxes = torch.rand(1, 50, 4) + bboxes[..., 2:4] = 2 * bboxes[..., :2] + + results = self.model.partition0_postprocess( + x=feats, scores=scores, bboxes=bboxes) + assert results is not None, 'failed to get output using '\ + 'partition0_postprocess of PartitionTwoStageDetector' + assert isinstance(results, tuple) + assert len(results) == 2 + + rois = torch.rand(1, 10, 5) + cls_score = torch.rand(10, 81) + bbox_pred = torch.rand(10, 320) + img_metas = [[{ + 'ori_shape': [32, 32, 3], + 'img_shape': [32, 32, 3], + 'scale_factor': [1, 1, 1, 1], + }]] + results = self.model.partition1_postprocess( + rois=rois, + cls_score=cls_score, + bbox_pred=bbox_pred, + img_metas=img_metas) + assert results is not None, 'failed to get output using '\ + 'partition1_postprocess of PartitionTwoStageDetector' + assert isinstance(results, tuple) + assert len(results) == 2 + + def test_forward(self): + + class DummyPTSDetector(torch.nn.Module): + """A dummy wrapper for unit tests.""" + + def __init__(self, *args, **kwargs): + self.output_names = ['dets', 'labels'] + + def partition0_postprocess(self, *args, **kwargs): + return self.outputs0 + + def partition1_postprocess(self, *args, **kwargs): + return self.outputs1 + + import types + self.model.partition0_postprocess = types.MethodType( + DummyPTSDetector.partition0_postprocess, self.model) + self.model.partition1_postprocess = types.MethodType( + DummyPTSDetector.partition1_postprocess, self.model) + self.model.outputs0 = [torch.rand(2, 3).cuda()] * 2 + self.model.outputs1 = [ + torch.rand(1, 9, 5).cuda(), + torch.rand(1, 9).cuda() + ] + + imgs = [torch.rand(1, 3, 32, 32)] + img_metas = [[{ + 'ori_shape': [32, 32, 3], + 'img_shape': [32, 32, 3], + 'scale_factor': [1, 1, 1, 1], + }]] + results = self.model.forward(imgs, img_metas) + assert_forward_results(results, 'PartitionTwoStageModel') + + +data_cfg1 = mmcv.Config( + dict( + data=dict( + test=dict(type='CocoDataset'), + val=dict(type='CityscapesDataset'), + train=dict(type='CityscapesDataset')))) +data_cfg2 = mmcv.Config( + dict( + data=dict( + val=dict(type='CocoDataset'), train=dict( + type='CityscapesDataset')))) +data_cfg3 = mmcv.Config(dict(data=dict(train=dict(type='CocoDataset')))) +data_cfg4 = mmcv.Config(dict(data=dict(error=dict(type='CocoDataset')))) + + +@pytest.mark.parametrize('cfg', [data_cfg1, data_cfg2, data_cfg3, data_cfg4]) +def test_get_classes_from_cfg(cfg): + from mmdet.datasets import DATASETS + from mmdeploy.codebase.mmdet.deploy.object_detection_model import \ + get_classes_from_config + + if 'error' in cfg.data: + with pytest.raises(RuntimeError): + get_classes_from_config(cfg) + else: + assert get_classes_from_config( + cfg) == DATASETS.module_dict['CocoDataset'].CLASSES + + +@pytest.mark.skipif( + not importlib.util.find_spec('onnxruntime'), reason='requires onnxruntime') +@pytest.mark.parametrize('partition_type', [None, 'end2end']) +def test_build_object_detection_model(partition_type): + _, post_processing = get_test_cfg_and_post_processing() + model_cfg = mmcv.Config(dict(data=dict(test={'type': 'CocoDataset'}))) + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type='onnxruntime'), + onnx_config=dict(output_names=['dets', 'labels']), + codebase_config=dict( + type='mmdet', post_processing=post_processing))) + if partition_type: + deploy_cfg.partition_config = dict( + apply_marks=True, type=partition_type) + + from mmdeploy.backend.onnxruntime import ORTWrapper + ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) + + # simplify backend inference + with SwitchBackendWrapper(ORTWrapper) as wrapper: + wrapper.set(model_cfg=model_cfg, deploy_cfg=deploy_cfg) + from mmdeploy.codebase.mmdet.deploy.object_detection_model import \ + build_object_detection_model + detector = build_object_detection_model([''], model_cfg, deploy_cfg, + 'cpu') + assert isinstance(detector, End2EndModel) diff --git a/tests/test_mmedit/data/imgs/blank.jpg b/tests/test_codebase/test_mmedit/data/imgs/blank.jpg similarity index 100% rename from tests/test_mmedit/data/imgs/blank.jpg rename to tests/test_codebase/test_mmedit/data/imgs/blank.jpg diff --git a/tests/test_mmedit/data/model.py b/tests/test_codebase/test_mmedit/data/model.py similarity index 96% rename from tests/test_mmedit/data/model.py rename to tests/test_codebase/test_mmedit/data/model.py index 289ece5728..eecc3f7fb3 100644 --- a/tests/test_mmedit/data/model.py +++ b/tests/test_codebase/test_mmedit/data/model.py @@ -73,8 +73,8 @@ test_dataloader=dict(samples_per_gpu=1), test=dict( type=val_dataset_type, - lq_folder='tests/test_mmedit/data/imgs', - gt_folder='tests/test_mmedit/data/imgs', + lq_folder='tests/test_codebase/test_mmedit/data/imgs', + gt_folder='tests/test_codebase/test_mmedit/data/imgs', pipeline=test_pipeline, scale=scale, filename_tmpl='{}')) diff --git a/tests/test_mmedit/test_mmedit_models.py b/tests/test_codebase/test_mmedit/test_mmedit_models.py similarity index 100% rename from tests/test_mmedit/test_mmedit_models.py rename to tests/test_codebase/test_mmedit/test_mmedit_models.py diff --git a/tests/test_codebase/test_mmedit/test_super_resolution.py b/tests/test_codebase/test_mmedit/test_super_resolution.py new file mode 100644 index 0000000000..1839dee317 --- /dev/null +++ b/tests/test_codebase/test_mmedit/test_super_resolution.py @@ -0,0 +1,117 @@ +import os +import tempfile +from tempfile import NamedTemporaryFile + +import mmcv +import numpy as np +import pytest +import torch + +import mmdeploy.apis.onnxruntime as ort_apis +from mmdeploy.apis import build_task_processor +from mmdeploy.utils import load_config +from mmdeploy.utils.test import SwitchBackendWrapper + +model_cfg = 'tests/test_codebase/test_mmedit/data/model.py' +model_cfg = load_config(model_cfg)[0] +deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type='onnxruntime'), + codebase_config=dict(type='mmedit', task='SuperResolution'), + onnx_config=dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + input_shape=None, + input_names=['input'], + output_names=['output']))) +input_img = np.random.rand(32, 32, 3) +img_shape = [32, 32] +input = {'lq': input_img} +onnx_file = NamedTemporaryFile(suffix='.onnx').name +task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu') + + +def test_init_pytorch_model(): + torch_model = task_processor.init_pytorch_model(None) + assert torch_model is not None + + +@pytest.fixture +def backend_model(): + from mmdeploy.backend.onnxruntime import ORTWrapper + ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) + wrapper = SwitchBackendWrapper(ORTWrapper) + wrapper.set(outputs={ + 'output': torch.rand(3, 50, 50), + }) + + yield task_processor.init_backend_model(['']) + + wrapper.recover() + + +def test_init_backend_model(backend_model): + assert backend_model is not None + + +def test_create_input(): + inputs = task_processor.create_input(input_img, img_shape=img_shape) + assert inputs is not None + + +def test_visualize(backend_model): + result = task_processor.run_inference(backend_model, input) + with tempfile.TemporaryDirectory() as dir: + filename = dir + 'tmp.jpg' + task_processor.visualize(backend_model, input_img, result[0], filename, + 'onnxruntime') + assert os.path.exists(filename) + + +def test_run_inference(backend_model): + results = task_processor.run_inference(backend_model, input) + assert results is not None + + +def test_get_tensor_from_input(): + assert type(task_processor.get_tensor_from_input(input)) is not dict + + +def test_get_partition_cfg(): + with pytest.raises(NotImplementedError): + task_processor.get_partition_cfg(None) + + +def test_build_dataset(): + data = dict( + test={ + 'type': 'SRFolderDataset', + 'lq_folder': 'tests/test_codebase/test_mmedit/data/imgs', + 'gt_folder': 'tests/test_codebase/test_mmedit/data/imgs', + 'scale': 1, + 'filename_tmpl': '{}', + 'pipeline': [ + { + 'type': 'LoadImageFromFile' + }, + ] + }) + dataset_cfg = mmcv.Config(dict(data=data)) + dataset = task_processor.build_dataset( + dataset_cfg=dataset_cfg, dataset_type='test') + assert dataset is not None, 'Failed to build dataset' + dataloader = task_processor.build_dataloader(dataset, 1, 1) + assert dataloader is not None, 'Failed to build dataloader' + + +def test_single_gpu_test(backend_model): + from mmcv.parallel import MMDataParallel + dataset = task_processor.build_dataset(model_cfg, dataset_type='test') + assert dataset is not None, 'Failed to build dataset' + dataloader = task_processor.build_dataloader(dataset, 1, 1) + assert dataloader is not None, 'Failed to build dataloader' + backend_model = MMDataParallel(backend_model, device_ids=[0]) + outputs = task_processor.single_gpu_test(backend_model, dataloader) + assert outputs is not None, 'Failed to test model' diff --git a/tests/test_codebase/test_mmedit/test_super_resolution_model.py b/tests/test_codebase/test_mmedit/test_super_resolution_model.py new file mode 100644 index 0000000000..0c12260a3c --- /dev/null +++ b/tests/test_codebase/test_mmedit/test_super_resolution_model.py @@ -0,0 +1,53 @@ +import importlib + +import mmcv +import numpy as np +import pytest +import torch + +import mmdeploy.backend.onnxruntime as ort_apis +from mmdeploy.utils import Backend, load_config +from mmdeploy.utils.test import SwitchBackendWrapper + + +@pytest.mark.skipif( + not importlib.util.find_spec('onnxruntime'), reason='requires onnxruntime') +class TestEnd2EndModel: + + @classmethod + def setup_class(cls): + # force add backend wrapper regardless of plugins + # make sure ONNXRuntimeEditor can use ORTWrapper inside itself + from mmdeploy.backend.onnxruntime import ORTWrapper + ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) + + # simplify backend inference + cls.wrapper = SwitchBackendWrapper(ORTWrapper) + cls.outputs = { + 'outputs': torch.rand(3, 64, 64), + } + cls.wrapper.set(outputs=cls.outputs) + deploy_cfg = mmcv.Config( + {'onnx_config': { + 'output_names': ['outputs'] + }}) + model_cfg = 'tests/test_codebase/test_mmedit/data/model.py' + model_cfg = load_config(model_cfg)[0] + from mmdeploy.codebase.mmedit.deploy.super_resolution_model\ + import End2EndModel + cls.end2end_model = End2EndModel(Backend.ONNXRUNTIME, [''], 'cpu', + model_cfg, deploy_cfg) + + @classmethod + def teardown_class(cls): + cls.wrapper.recover() + + def test_forward(self): + input_img = np.random.rand(3, 32, 32) + + results = self.end2end_model.forward(input_img, test_mode=False) + assert results is not None + + results = self.end2end_model.forward( + input_img, test_mode=True, gt=torch.tensor(results[0])) + assert results is not None diff --git a/tests/test_mmseg/data/model.py b/tests/test_codebase/test_mmseg/data/model.py similarity index 50% rename from tests/test_mmseg/data/model.py rename to tests/test_codebase/test_mmseg/data/model.py index 925c36db5f..d839a24fa0 100644 --- a/tests/test_mmseg/data/model.py +++ b/tests/test_codebase/test_mmseg/data/model.py @@ -1,15 +1,15 @@ # dataset settings dataset_type = 'CityscapesDataset' -data_root = 'data/cityscapes/' +data_root = '.' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) -crop_size = (512, 1024) +crop_size = (128, 128) test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', - img_scale=(2048, 1024), + img_scale=(128, 128), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), @@ -25,58 +25,44 @@ val=dict( type=dataset_type, data_root=data_root, - img_dir='leftImg8bit/val', - ann_dir='gtFine/val', + img_dir='', + ann_dir='', pipeline=test_pipeline), test=dict( type=dataset_type, data_root=data_root, - img_dir='leftImg8bit/val', - ann_dir='gtFine/val', + img_dir='', + ann_dir='', pipeline=test_pipeline)) # model settings -norm_cfg = dict(type='SyncBN', requires_grad=True) +norm_cfg = dict(type='SyncBN', requires_grad=True, momentum=0.01) model = dict( type='EncoderDecoder', - pretrained='open-mmlab://resnet50_v1c', backbone=dict( - type='ResNetV1c', - depth=50, - num_stages=4, - out_indices=(0, 1, 2, 3), - dilations=(1, 1, 2, 4), - strides=(1, 2, 1, 1), + type='FastSCNN', + downsample_dw_channels=(32, 48), + global_in_channels=64, + global_block_channels=(64, 96, 128), + global_block_strides=(2, 2, 1), + global_out_channels=128, + higher_in_channels=64, + lower_in_channels=128, + fusion_out_channels=128, + out_indices=(0, 1, 2), norm_cfg=norm_cfg, - norm_eval=False, - style='pytorch', - contract_dilation=True), + align_corners=False), decode_head=dict( - type='FCNHead', - in_channels=2048, - in_index=3, - channels=512, - num_convs=2, - concat_input=True, - dropout_ratio=0.1, - num_classes=19, - norm_cfg=norm_cfg, - align_corners=False, - loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), - auxiliary_head=dict( - type='FCNHead', - in_channels=1024, - in_index=2, - channels=256, - num_convs=1, + type='DepthwiseSeparableFCNHead', + in_channels=128, + channels=128, concat_input=False, - dropout_ratio=0.1, num_classes=19, + in_index=-1, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( - type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole')) diff --git a/tests/test_mmseg/test_mmseg_models.py b/tests/test_codebase/test_mmseg/test_mmseg_models.py similarity index 88% rename from tests/test_mmseg/test_mmseg_models.py rename to tests/test_codebase/test_mmseg/test_mmseg_models.py index a6338aa723..de40e81041 100644 --- a/tests/test_mmseg/test_mmseg_models.py +++ b/tests/test_codebase/test_mmseg/test_mmseg_models.py @@ -1,5 +1,6 @@ import mmcv import numpy as np +import pytest import torch import torch.nn as nn from mmcv import ConfigDict @@ -87,13 +88,14 @@ def _demo_mm_inputs(input_shape=(1, 3, 8, 16), num_classes=10): return mm_inputs -def test_encoderdecoder_simple_test(): +@pytest.mark.parametrize('backend_type', ['onnxruntime', 'ncnn']) +def test_encoderdecoder_simple_test(backend_type): segmentor = get_model() segmentor.cpu().eval() deploy_cfg = mmcv.Config( dict( - backend_config=dict(type='onnxruntime'), + backend_config=dict(type=backend_type), onnx_config=dict(output_names=['result'], input_shape=None), codebase_config=dict(type='mmseg', task='Segmentation'))) @@ -119,22 +121,24 @@ def test_encoderdecoder_simple_test(): wrapped_model=wrapped_model, model_inputs=rewrite_inputs, deploy_cfg=deploy_cfg) + if is_backend_output: - rewrite_outputs = torch.tensor(rewrite_outputs) - model_outputs = torch.tensor(model_outputs) + rewrite_outputs = rewrite_outputs[0] + model_outputs = torch.tensor(model_outputs[0]) model_outputs = model_outputs.unsqueeze(0).unsqueeze(0) assert torch.allclose(rewrite_outputs, model_outputs) else: assert rewrite_outputs is not None -def test_basesegmentor_forward(): +@pytest.mark.parametrize('backend_type', ['onnxruntime', 'ncnn']) +def test_basesegmentor_forward(backend_type): segmentor = get_model() segmentor.cpu().eval() deploy_cfg = mmcv.Config( dict( - backend_config=dict(type='onnxruntime'), + backend_config=dict(type=backend_type), onnx_config=dict(output_names=['result'], input_shape=None), codebase_config=dict(type='mmseg', task='Segmentation'))) @@ -159,15 +163,16 @@ def test_basesegmentor_forward(): model_inputs=rewrite_inputs, deploy_cfg=deploy_cfg) if is_backend_output: - rewrite_outputs = torch.tensor(rewrite_outputs) - model_outputs = torch.tensor(model_outputs) + rewrite_outputs = torch.tensor(rewrite_outputs[0]) + model_outputs = torch.tensor(model_outputs[0]) model_outputs = model_outputs.unsqueeze(0).unsqueeze(0) assert torch.allclose(rewrite_outputs, model_outputs) else: assert rewrite_outputs is not None -def test_aspphead_forward(): +@pytest.mark.parametrize('backend_type', ['onnxruntime', 'ncnn']) +def test_aspphead_forward(backend_type): from mmseg.models.decode_heads import ASPPHead head = ASPPHead( in_channels=32, channels=16, num_classes=19, @@ -175,7 +180,7 @@ def test_aspphead_forward(): deploy_cfg = mmcv.Config( dict( - backend_config=dict(type='onnxruntime'), + backend_config=dict(type=backend_type), onnx_config=dict( output_names=['result'], input_shape=(1, 32, 45, 45)), codebase_config=dict(type='mmseg', task='Segmentation'))) @@ -197,13 +202,14 @@ def test_aspphead_forward(): assert rewrite_outputs is not None -def test_psphead_forward(): +@pytest.mark.parametrize('backend_type', ['onnxruntime', 'ncnn']) +def test_psphead_forward(backend_type): from mmseg.models.decode_heads import PSPHead head = PSPHead(in_channels=32, channels=16, num_classes=19).eval() deploy_cfg = mmcv.Config( dict( - backend_config=dict(type='onnxruntime'), + backend_config=dict(type=backend_type), onnx_config=dict(output_names=['result'], input_shape=None), codebase_config=dict(type='mmseg', task='Segmentation'))) inputs = [torch.randn(1, 32, 45, 45)] diff --git a/tests/test_codebase/test_mmseg/test_segmentation.py b/tests/test_codebase/test_mmseg/test_segmentation.py new file mode 100644 index 0000000000..b0c987ad99 --- /dev/null +++ b/tests/test_codebase/test_mmseg/test_segmentation.py @@ -0,0 +1,115 @@ +import os +from tempfile import NamedTemporaryFile, TemporaryDirectory + +import mmcv +import numpy as np +import pytest +import torch +from torch.utils.data import DataLoader + +import mmdeploy.backend.onnxruntime as ort_apis +from mmdeploy.apis import build_task_processor +from mmdeploy.utils import load_config +from mmdeploy.utils.test import DummyModel, SwitchBackendWrapper + +model_cfg_path = 'tests/test_codebase/test_mmseg/data/model.py' +model_cfg = load_config(model_cfg_path)[0] +deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type='onnxruntime'), + codebase_config=dict(type='mmseg', task='Segmentation'), + onnx_config=dict( + type='onnx', + export_params=True, + keep_initializers_as_inputs=False, + opset_version=11, + input_shape=None, + input_names=['input'], + output_names=['output']))) + +onnx_file = NamedTemporaryFile(suffix='.onnx').name +task_processor = build_task_processor(model_cfg, deploy_cfg, 'cpu') +img_shape = (32, 32) +img = np.random.rand(*img_shape, 3) + + +def test_init_pytorch_model(): + from mmseg.models.segmentors.base import BaseSegmentor + model = task_processor.init_pytorch_model(None) + assert isinstance(model, BaseSegmentor) + + +@pytest.fixture +def backend_model(): + from mmdeploy.backend.onnxruntime import ORTWrapper + ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) + wrapper = SwitchBackendWrapper(ORTWrapper) + wrapper.set(outputs={ + 'output': torch.rand(1, 1, *img_shape), + }) + + yield task_processor.init_backend_model(['']) + + wrapper.recover() + + +def test_init_backend_model(backend_model): + assert isinstance(backend_model, torch.nn.Module) + + +def test_create_input(): + inputs = task_processor.create_input(img, input_shape=img_shape) + assert isinstance(inputs, tuple) and len(inputs) == 2 + + +def test_run_inference(backend_model): + input_dict, _ = task_processor.create_input(img, input_shape=img_shape) + results = task_processor.run_inference(backend_model, input_dict) + assert results is not None + + +def test_visualize(backend_model): + input_dict, _ = task_processor.create_input(img, input_shape=img_shape) + results = task_processor.run_inference(backend_model, input_dict) + with TemporaryDirectory() as dir: + filename = dir + 'tmp.jpg' + task_processor.visualize(backend_model, img, results[0], filename, '') + assert os.path.exists(filename) + + +def test_get_tensort_from_input(): + input_data = {'img': [torch.ones(3, 4, 5)]} + inputs = task_processor.get_tensor_from_input(input_data) + assert torch.equal(inputs, torch.ones(3, 4, 5)) + + +def test_get_partition_cfg(): + try: + _ = task_processor.get_partition_cfg(partition_type='') + except NotImplementedError: + pass + + +def test_build_dataset_and_dataloader(): + from torch.utils.data import Dataset, DataLoader + dataset = task_processor.build_dataset( + dataset_cfg=model_cfg, dataset_type='test') + assert isinstance(dataset, Dataset), 'Failed to build dataset' + dataloader = task_processor.build_dataloader(dataset, 1, 1) + assert isinstance(dataloader, DataLoader), 'Failed to build dataloader' + + +def test_single_gpu_test_and_evaluate(): + from mmcv.parallel import MMDataParallel + + # Prepare dataloader + dataloader = DataLoader([]) + + # Prepare dummy model + model = DummyModel(outputs=[torch.rand([1, 1, *img_shape])]) + model = MMDataParallel(model, device_ids=[0]) + assert model is not None + # Run test + outputs = task_processor.single_gpu_test(model, dataloader) + assert outputs is not None + task_processor.evaluate_outputs(model_cfg, outputs, []) diff --git a/tests/test_codebase/test_mmseg/test_segmentation_model.py b/tests/test_codebase/test_mmseg/test_segmentation_model.py new file mode 100644 index 0000000000..b8a5c61bc8 --- /dev/null +++ b/tests/test_codebase/test_mmseg/test_segmentation_model.py @@ -0,0 +1,137 @@ +import importlib +import os.path as osp +from tempfile import NamedTemporaryFile + +import mmcv +import numpy as np +import pytest +import torch + +import mmdeploy.backend.onnxruntime as ort_apis +from mmdeploy.utils import Backend +from mmdeploy.utils.test import SwitchBackendWrapper + +NUM_CLASS = 19 +IMAGE_SIZE = 32 + + +@pytest.mark.skipif( + not importlib.util.find_spec('onnxruntime'), reason='requires onnxruntime') +class TestEnd2EndModel: + + @classmethod + def setup_class(cls): + # force add backend wrapper regardless of plugins + from mmdeploy.backend.onnxruntime import ORTWrapper + ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) + + # simplify backend inference + cls.wrapper = SwitchBackendWrapper(ORTWrapper) + cls.outputs = { + 'outputs': torch.rand(1, 1, IMAGE_SIZE, IMAGE_SIZE), + } + cls.wrapper.set(outputs=cls.outputs) + deploy_cfg = mmcv.Config( + {'onnx_config': { + 'output_names': ['outputs'] + }}) + + from mmdeploy.codebase.mmseg.deploy.segmentation_model \ + import End2EndModel + class_names = ['' for i in range(NUM_CLASS)] + palette = np.random.randint(0, 255, size=(NUM_CLASS, 3)) + cls.end2end_model = End2EndModel( + Backend.ONNXRUNTIME, [''], + device='cpu', + class_names=class_names, + palette=palette, + deploy_cfg=deploy_cfg) + + @classmethod + def teardown_class(cls): + cls.wrapper.recover() + + @pytest.mark.parametrize( + 'ori_shape', + [[IMAGE_SIZE, IMAGE_SIZE, 3], [2 * IMAGE_SIZE, 2 * IMAGE_SIZE, 3]]) + def test_forward(self, ori_shape): + imgs = [torch.rand(1, 3, IMAGE_SIZE, IMAGE_SIZE)] + img_metas = [[{ + 'ori_shape': ori_shape, + 'img_shape': [IMAGE_SIZE, IMAGE_SIZE, 3], + 'scale_factor': [1., 1., 1., 1.], + }]] + results = self.end2end_model.forward(imgs, img_metas) + assert results is not None, 'failed to get output using '\ + 'End2EndModel' + + def test_forward_test(self): + imgs = torch.rand(2, 3, IMAGE_SIZE, IMAGE_SIZE) + results = self.end2end_model.forward_test(imgs) + assert isinstance(results[0], np.ndarray) + + def test_show_result(self): + input_img = np.zeros([IMAGE_SIZE, IMAGE_SIZE, 3]) + img_path = NamedTemporaryFile(suffix='.jpg').name + + result = [torch.rand(IMAGE_SIZE, IMAGE_SIZE)] + self.end2end_model.show_result( + input_img, result, '', show=False, out_file=img_path) + assert osp.exists(img_path), 'Fails to create drawn image.' + + +@pytest.mark.parametrize('from_file', [True, False]) +@pytest.mark.parametrize('data_type', ['train', 'val', 'test']) +def test_get_classes_palette_from_config(from_file, data_type): + from mmseg.datasets import DATASETS + from mmdeploy.codebase.mmseg.deploy.segmentation_model \ + import get_classes_palette_from_config + dataset_type = 'CityscapesDataset' + data_cfg = mmcv.Config({ + 'data': { + data_type: + dict( + type=dataset_type, + data_root='', + img_dir='', + ann_dir='', + pipeline=None) + } + }) + + if from_file: + config_path = NamedTemporaryFile(suffix='.py').name + with open(config_path, 'w') as file: + file.write(data_cfg.pretty_text) + data_cfg = config_path + + classes, palette = get_classes_palette_from_config(data_cfg) + module = DATASETS.module_dict[dataset_type] + assert classes == module.CLASSES, \ + f'fail to get CLASSES of dataset: {dataset_type}' + assert palette == module.PALETTE, \ + f'fail to get PALETTE of dataset: {dataset_type}' + + +@pytest.mark.skipif( + not importlib.util.find_spec('onnxruntime'), reason='requires onnxruntime') +def test_build_segmentation_model(): + model_cfg = mmcv.Config( + dict(data=dict(test={'type': 'CityscapesDataset'}))) + deploy_cfg = mmcv.Config( + dict( + backend_config=dict(type='onnxruntime'), + onnx_config=dict(output_names=['outputs']), + codebase_config=dict(type='mmseg'))) + + from mmdeploy.backend.onnxruntime import ORTWrapper + ort_apis.__dict__.update({'ORTWrapper': ORTWrapper}) + + # simplify backend inference + with SwitchBackendWrapper(ORTWrapper) as wrapper: + wrapper.set(model_cfg=model_cfg, deploy_cfg=deploy_cfg) + from mmdeploy.codebase.mmseg.deploy.segmentation_model import \ + build_segmentation_model, End2EndModel + segmentor = build_segmentation_model([''], model_cfg, deploy_cfg, + 'cpu') + assert isinstance(segmentor, End2EndModel) diff --git a/tests/test_codebase/test_mmseg/test_utils.py b/tests/test_codebase/test_mmseg/test_utils.py new file mode 100644 index 0000000000..38e8e47e9b --- /dev/null +++ b/tests/test_codebase/test_mmseg/test_utils.py @@ -0,0 +1,25 @@ +import torch +import torch.nn as nn + +from mmdeploy.codebase.mmseg.deploy import convert_syncbatchnorm + + +def test_convert_syncbatchnorm(): + + class ExampleModel(nn.Module): + + def __init__(self): + super(ExampleModel, self).__init__() + self.model = nn.Sequential( + nn.Linear(2, 4), nn.SyncBatchNorm(4), nn.Sigmoid(), + nn.Linear(4, 6), nn.SyncBatchNorm(6), nn.Sigmoid()) + + def forward(self, x): + return self.model(x) + + model = ExampleModel() + out_model = convert_syncbatchnorm(model) + assert isinstance(out_model.model[1], + torch.nn.modules.batchnorm.BatchNorm2d) and isinstance( + out_model.model[4], + torch.nn.modules.batchnorm.BatchNorm2d) diff --git a/tests/test_mmdet/data/imgs/000000000139.jpg b/tests/test_mmdet/data/imgs/000000000139.jpg deleted file mode 100755 index 19023f718333c56c70776c79201dc03d742c1ed3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 161811 zcmb4qWl$VE)b8T$Ri?_wyVWGIQxU{%yakt{`wzyMR+}+*Xi(8AA-oE$8z4QJ2 zPBNLvB|DPZtAiyIa zAtE6mAtE9oA)_E8A)z26A|j(9qoDpLNXTgDXsGD_b^bH*pMn3h{#Dd}TK{*-|El@h z1HeOttAGzcfWrg8dkr3d}-~j*7@sMafAmfXp{Dc07Tu@K&v3K^GuwfUK?vsStf7nF8!oC|~ z`v1^L7+ikLaR(RmOEGHfnY$(>&;MNmU?TjpgNJ|z5Ct@n59^&=mxb#G@ zbEkqIJrx>YU9p+YWF$`?_1IC@yC9yRbf9tF?!w-`(xg;i{1cNaJ;PA%4}0x;K9gw$ ziDD+i;2EO@leF$g#nx4tb)`{iBj4_2O#YKbkwUd-xN(=jjo%R!%MFR(?jxbPIw`Pv2j0!VOVy9Mi(z#J0dc){ zB@B~apk{-)aI#qwO(RfJ%7q4;*FD$TSF8G`ou4;okfIuayH}Nu#;y(CMIN(k(t7|C zFizoTb-Ul#H*_z3!=)--rh0i)32kvmc&ZKbt3%?q%JS|W;UGH6^Dfb{%Jtpp>w!g) znfg^hO-SYHYLU=U9^!FyI$f8TLL|eBLl^ztpq>Wnic6+5nn7EXT#HK6^LIH(Eo>9` zQ)m2W^(ZkKGtIYmBFxKNLD-fa100!<7)=0RFOs6(tZgna=aU8ZaVu%0I>L`d!$ZrLS=PONw}{_f{9K1LPna^A%erMT71a1hv08!Z(2 zE1Q0nDC^Mt(}NejP#Dqq=;D_f5PP$Jkwuh7+m=~{WZ|4nowMZ8C}I{pZBGhoPLLXU zvO8d3IisF)6a-hK-hUS4oS6%T;z`UMQvxRbd~*^zWZg}&M&SXn7V@ce`Zhtp&v8m+dF+nAMFJHx`Pt%P$V74!vUXFwC^8mzWRZ#q7fq4d_U z(U$9+wqN}Yu_uDPgb=!vIpGzoUnka{zKbCR(1&x3q$>d2 zL?WP-+Sn_7sV>nEyb_#!u@s$XivpISUp9)wc9V|6w!B<5Ibmxt2?>JODImhMDAC$v zxs2xVLLVlp-P(rxw{B%*l@x*%vNB+u?#s$t8)J68q-J5Z^X5N92vo_;1=jikA&!K^ zLZb(4}WJBx*wdRnW7O-wZR*mbbt} zDPGM!3-ZRdnqX{iV_=+!6+VplqN0|~R)M83XV)#bK940}h{QL;Ev=NTi9~Ny={1w$R7$G8IwJ znf*LV44LBDQ!mv6A9o>ppF1nrAIN`vuXtfrn!3qZ&w)5=y1~kPz>)_slkR20z+hu% zGp#IKS|@X9fFO}*!n!#G4Yyu-2kl7Q;Cm>i@Q-8gSI*BPtKnZ815dooq*rwq*K8xY zW>goX1CHlb!2W7pwLii5q$71NLqdROLzj9K`2b!`_JcFlUjTmmWGG10EtDFUI{Cf$C>wf19F&#CgLVT)IbsJt9eu`OQ&^}gWHV>!Z;I<$e_q@7 z0ZWovQ#3p|DwYe7L!#vqx7iX4D_6XJ9iNS-0^ivad_ivKg4g#IkPs~*KLsD!%Z_Yg zKnsI%8_#yW8a2vwwtC(D!Nj;=x0Sbco|(}tUM@u} zBN)}Qi=%D%#i4>L@iEx9M=RfVI~K)D*B*6FjD+?;ZzuLE0{;elpK1Vsabl96nS+r& zmw}xccoLi7LA+I1d~3TPFpt)5lf_uBRWpzr-=k+L!Gy-w#S}SS=m~;ZOWArqz4U$o;o)NB1N(=|M)aC=?o^%Rly$8Vmt zohB@9EPZy<<9VNHxbRe(DZng9jKzNeLRLbzOIe8M;pfq%6Mo&SrjwUUl@aLRt)XM* z4|TRVwy9ue<3@4diYrNfOnebd1)bDplG0qE7|m3O%fn6re^acr5_9{3N{mLON(bvC z%K?aqzK68bIHiu`W$}W{b~<{@wj z;EQKmF``IFsPkgo(=A^XbZF%Qy+Xr8jhB|7g2+j6)nFGPV`Jsj$+TbygNvEAFzUI~ zmFx}&vB0LGjl$si)sNT3&{l(68Y441rY#70*U+$~p+UtPJ=R5Tq{s@yXye@9W56&n zhMyC`OE-><4CP%q*EaopJ`)6gd@z+TNe-%r#N zU#_YJI=u-!vRT~DyVDqt&^6Hb3yVnFX=%+1(sIPzKw4?~YHEq`)sQmZb*zyAvz(UA z>pwHx{0D+RuGLoD2-T<7JaBt18O7{UB_2Afpk#jECXyFD96A=QIe8ir?o@;%2hzcpU}1xUS=Ix(`#(_{w6sTi&J^OIP=i7#y6 zTp{i)$~HAR=d4nmZ@#SgYvuB&svH0K!GpECc)KVP4{-}C=7Md0KmKJ8%P-lun=5ei zLm^gyBOxQgr?gGN8%j{$lgNyRUn!d1A2VscxNruxAq=>wk$;!KIZvaY^Tws9i=_YY z%P!8(Y{Y5?d@9p4Bw%-RU?}&df;4nNbWJ>)~$sjTTOepusxW9~Eq+j-!bS;zn*|kP1;JE&($6&*gKTA?YuKs}T8} z5F^bb*ta9>Kq)zz%wt3=v%E2?WKCMA9jSk%&{VUKmmCU-U6DE42)AU-> zqFYLSq8s75t*i(OIc?^Ssx((VO8iRPLT9b(N$i1Ag7FMymr>mLufy4F96llfIKv zrWwf6-!Pi~{rt|imZ?WvT5>>#FsWJiT8?Lu#rdf)5kIe~PL5*o&(Mo;m}s%k`U=Bq z*w{>AHQSXlTOA`JOuUeXk@$$f&&Hy`HY@9vF-_d*(KzBj4O4K9p|P*XhIw?cSt230 z=o|faVYE`VUJHVTT4NN2CrFw;>3ccph&BR&5*T495~wL_g9-x4d9A0TDr@nmK~+t0 zIsozk&}nF2kN_Mm&1d_O@>*2P5(7*nr92W>WrVd?0553L$@MzHubc0QhR$u4a?Ucv z;>X8M=I3axB@NZ-1TQ`H<$S@NXCHy;d%^Z1 z==!{-M8zR5w$;7k=5d*hXV&|F@&EAwV$8Q?Fz)*8V^ng8ASbDEbgY>QDtP-riGgNy zNs!;1@dwTCtunqe5cjPi-+6D}a(ArPJ%E7N-EwlZR=zUjbWlGletw$jC%zYby?M;T ziVF<&g#!2eWFw9dr6zMJfzQ-d#^96?m~qjHEOuvZA{=-7dq}*OUFXB$o6aaR(s&S2 ztrJZ#(+_1ytOnJsvK4rD#ZFfKT=%zkmg61r>#=7tmlhy;_El<{VZ4z5Knr7m(1l6& z&Nmoqx|>GsS!09HytQ6(e9ue}dMkh*`)fYq<8Ct~ifEpDm^1C2$pVeR&&x#{T$$&v z^>`em_8kxfhkHRu85|5_&Jvg$1fa_qX7!>U{I=HfJ)y_7SWw6L(8l;HVa|XBJC>wc zAfqwc`1YeoXkni-s`<#`F8sQYg|lc;7}fcmoC|KwsTatYB_&QDbhGb%u#D=&YGABY=fhBSrb3t(M~y~K z@1kFe;v+OZfZFhA-yxMa%_m-Ay?}4YN}Sjc4~lV0l|96R4#ATK+M-LuJ`7#v*gWHD za;=3{;U}C;lW&8TM$~Ms6ANYH3uX6D!Vx3Nj_EknBo`W4G;0FKQ#YocOzx*tzt;$< ze%YT4t5!LXi}F8RHn}};+)byHaJn6&{?Q{`z$!hZ88pxDrffdXD(bF z>YOy0zmrbzGy3RFXrHV79IQwqioCC%lm>sk|K6uO(SW(MF9Ak3-CbO{qh96xMMW0? z3o@gF5V$V9ide1Twd`FUQcXs0mh2Gt$JEQ`;?KK%BK*TFS~(of9jTEq!OMg3lgX_( z*rH_T7j>{Bt2iP#u01jx=6cPhx=Nb&1rLqgHb_C0@q$o3t|R9`u#;Prf zs8|e%k{Sbf#J6ouR>73q%+#(NrrmN`cY!Ycd&QGHJ=uHGJ#5(X{hqxs)y+bJ^R}ho zCyOq&sP{yV-0FTVTp{zPcV!9s5!-ItX*xd5Oa=M3@_A%rZeXWoFqu3-#WK%KL-y5D z%&;ZIsdEn1l96@lOMiiNsd?E^2}gbzQgge1@O8VxC)0!y`+W^!J~80DLRB;PaL`A;pV$Wea+Sb=(h0 z65L|=61009E;cO1^+c?o9?uZ4C0%jtaRAgJsrP461d1b4=O3qckE0W`!QqISR(17= zC4U_0dX<`%rHO+S?ssnVFFX3l;BK#4$N%oFhhdm!!zqZXATFC*dz+4!=B;yIIP_Qu_m3j2S@nCG$ zmy{jx>P**b70qTk`|?%xwE{#+|B0GfPVOSejoI48&6q3KwtENExa-M!~H zq1N@VlUACD|klz$%wiK^l zNb$bj+EwDgcNL3@RN>L#;pwcK_&&{bTn8(YQts@bj&^(4EhNst`mIM6p`w%qAD&K^ zcJ{chWIAn(=ca^CCeeIy5^_oZLPY~_Va)@CuDwv@ew|O{g3Q2ZVK#2v&b%nw%V-TU zKbw{i0>KNc>JCCyCd4CE#p59x7ydItQObl&nn~t4_f#$Yb_Di|*2OvZA}?>XA?vKE z@VE*_g2DdF%BjTJsI7Dp@uHX(sprw`&oNJKzaRGTGo|nJKoaXK+qo}h#DO%3UU0jJ zx2@~%a=dn&GS4qc}djTZEWh=Au*596fd*`yA^(D{3UW>`{clPjS9S!E>9MC~H zjwez_t+NOUvZQ)@I*;UZ=af9lMq$@t^NJmC~C z&W|{jfwr?B#}|}{6?p9O6Xp2PlvbXEi-ccseuDKK9{yS;TjQyGJW)9q#m)7N_;I%g zjg*pG%3JY#sI1jOnON5{WYp~fP5L|pxSb)?`Wp7m$6}RN&Gpr<3iiG|s}aGl@j}|v zU2Z(KUfxB?q9Xnjh$Vf5X-F?A1uhU0?6b~JC zA_&_Onw_lc3UBd$-<{jfpj>;8gwHLBQ*J+UmFpx`F0U7rTueC%yi`_G;(yUI1beme zZ=7vuhKw!2`jrCG@zW>jJQFMEnq+|HO@}uELYlpBOJ5kY-HQw548~lOgSG-q8*^m# z=B7&{8`~&f&V0grkQ(-Lo~xkaroN=vRGxk(lyvS7bj`C0t?Jxb4ehnexu$X^qc{^@ zr88QTzBBP>p<)rQtBTw8JTetW_&4qk9JVUm9$nptw_2zHY&}>= zjbn$OXFc0fXYr*brZiKE>gIC&)GL=sp+Ez9y`F`T05o7pkA70qjZZ%emg^BwY-JXk zEq=8;Jg9ZlQ}Vv>_}*rBxa9$a&oz=l=40K-V=hn(x+WZGsiir3Pi5(bSRlEf#8_es zzblE?wl{1pi;zHBq`K8;5TSX|oJv>x#a+I>4cJ&}7x?xrHkC6EwK8%;C&7mwunm=S)D>%294*K&hM$U2yzsp4`^b%LZW;i={JujzJyT(%F$?bKksW(5U<%b zH(jFgJecuD8MajwQ#%6giAfoe&e}2AHpmzeKgkB7IJ6Ao!uEi)%>dywos~Ie`hus4(m?!k zdF}@$kX^!FJABN+CTL=Q<;FU;1EIMcC_^n7J{33jhL<9ol?ZU6PI!%wp?V>28nKJ z@WYwcSd#M6!s^1Q^)Of0eW}>lz2ce5+qH7)NrI1%LCpO#C*a^($$7>K{xiwvhYeM9c&~u=L&qm5Y(0L>9y^m_Z&`1e{c_jLhk)`K08#* z#U6G_H=vEU+O&6u3!4;;31||z+1a}YoKD+zX zbDou*9a(L2?pZ8vM_T?ty|!He!W6(!tWEA^SJy>WR>{V1oz6kR&p4nbAOX(>%rAMa zKH%-u3Yu#-U|Uv!eP*FUsjhrnt+Fz^ zr)^Kty?QU1eUYP&4QRdgY#K{|oWV%h+%0k!f*UVRvRy*vV-;yEo0Uum*P>11um5q^ zoUV|%NX`ehz<+p}Oc5`0xf8)ojKXx*;toSvK)FV4Hoya_R>g*TBTyy5q4wPSF7NMdPxZ*qrBm{ATGH zh%9{yuG-f^Km$Df?@)pb8ZU5 zAsU$OFqB7uBI*3!F(C|PkD80pz{lNF?3vYhF#6I(2uX}z zG?7-^R$Pd*w8yxaNQlWfQB`1u}_+qS@-p7m9xY_t`X7kJgWShv4P~7kv#GGpD^R$7_cD zN%hKB?(*27L{;ja9$b&n!3VjepRO-$tv;C@vo>saM;DKZOgwO`Nfw|V!g_88ZmRk% zY@o4U@Yc zdm`b)eV^|W9uUd`kINtfoK;H1$D~#wqCs=5s@6^E%?)lUE{3ukSGu_+|z$xZqRomo@oLl zGv8BWYv-St_+C@Q`dyx;hJCnlEsUWVY6=Agg(6nP^nrgWpQYlG9cAGa8tOu#G%bdf zR^|H@Bof70UZ8lLS54L4&qj#hL80DqD(czfPz%h{MYA8>xIenl$^zv-;mKPtNvld4 zr^x7im*!no+xG~!m-Nx=UUM*hke1nJ@g!OC)Iq_U(U*QEEFUn?#JZ8oqPc2$Gnq!I z^b9qzqHsx&sTdw3owFnEPWR1PYaEuRp7M?9_4-h|$%mZeOS!!X#ER%ESc_yeP$!m* zcD)wmHpsI0o{*Vo6K1%21ov}`%p;U%)P^i zc>}?0qfTALGwo$G8TY<}A#*1G^b~(kP#k^#!G}lZAb;0}t%_@v+PiK(bgn=3nNT{J z6|jx!UM{|BfLan+zoKJCNi^p%OjKaPL#>~+;xLYto)n}MLbf6su~O=%)z9&r5#f<_9d z*q`v#ZHzdMetsnM_$e-r(ye2 zcd6mweRJIvrMl<@J*Aj_OAV|Zi5DcAi~Br4>s5Cx)0J;;$qwwl zkBE0`kZK}E4Apv!NhNa-Sp3bqnSz|M`~J5j$$G*c@5@IKpx@NLy-@2g7<=lPQCuQ_ zkT7Kk-c`$%7#r$25hQ?Zv-}0*(knW32I|L;I`~WH9h_@au>uMj0^a|y4}bmRR1hgJ z4hi{qNJ(cpv?QHyQNUbQf{3-?>nqqZssbqksL7w= z{3vZ2M%j~b_b9?5Mi=fZcsKB_La=2Y-(ufY3bSCK`m6cKSjZ_vX}jO5KfrkA zU2HI1UvDVn_JUH1YXn;ZQXy6#3kzv6TsU;v9{uO_JRvT9k19g*S~g(K_JY+-1=oq| zNrc2+*QnJ@BNBYlOtDxf4yQPxK4q-X>8OG})uwE(w^pl@s-#7e}^kI$-jYd$~@<53ycA%9w>G~KVR zLInh1#eS`gxz(6#{XWXd;U#o*rE{F%1UNt%Z6?CDh#mU_d-R8o8;GTFY>#TZ&x^>w zv(}|8*$7YUn{UvXkd@rQEfj3xk~z@XFVwuCn%?{DCak`4zcnDYQ6sRyRue)V3dcrd zhnPEf&~)+BUHh`R@VokuA+KYWNdJUe+mY>P>(VEis12W9tkZ95vq~x4=MMC#(O)yf zWD?bqY@oQWFC-z_b7T(Y%QA{jB}cE=*X^}4;2UXGJ4FkjAjOx2lpJqJ;i~7^jAlXL zuecj-D6C>jYBVWfGbGI-0al%vPMEe7&ZZa#LHm$jZ{W3c%RV0S{HguH4diBTPSBeL~0t3V+dsX_8XIf6QYuq=)GOS$a5lk#-kwmAd^#cWq%0j}*~ zwKgAb$si56rNK`g5%pMXY>3~hi)yfINcgR6wq7kR87!*i6-u;!x}|GNV4)9go8^_>R zatq?Sxvb~uqd&3-`H+wnw=Cjn2!R2&3l3L3YEoit$Z`Vs{huWIY}Jw+Jf;!&Qx{Rq z8n`crMLlot4nypd*VF4LQGB(9-H&4~5p%{v8*<6F#zcq7?yYD)mFclvyG8$c%Koy~ z)6<2G@n7QpwV83UH(+|~0P7!ux?uc1?OU)Is`}-*KMf- z{^KEU0)POSP!c_rIw8UJ{ct@T!aFoL5)D_5+Nk5-k0#)m+o*13t0|9qs#m)j8bM0Q zOf{Qi*aCnFc-x&yNrXr%?GhGArHNK|1XC-adJhY>K)8CxEbya1W-tPu*C_qxP9DDv zPn4}^e5G7wF;9NNhXw=sTKcAXpx-s`p`(*|zmsPs0{z&qf%Ey3)lyl@+i1pt>o3@k zA8hj?MYBW0EH@2xjUbKh)09p>p9&Lny<3@`g)Bh$aEH*kJ+-|q_^nqrR_3VO^6vDE z26Dqm{@CjIuU;x{1cp5UiF})HNTgQ zS7!CyUJY>D@A}I-wp9YG-BD1rjVu&PFIy!S$Mb2%GERNJvQ2(9IW1D#QyZF^oY*{v zT#Kiy1l9cPn?O9?cq-j^bA7IbPSy+ynQz<>?j6=|_H-pbaG94*FH`m;WXJjq{H@&X z+x0Hc@<$-pbDW#o{CrxIzW3Qq`I}-)Y~lt;mlStvRpY3oaFyn zcC|c^0vWJ0+vG6*5gvG{j=l!4O~h*TGM##M>D3O&MNKNZtdZ9cKZMbhSlOG?7J2fs ziPN6Wp)0+ee_H#fuxoJxqFo}Z9e;N*UpQmesB;YGGcy?EjKo zg{_yW*h{=VUPH!uqJ)xCSQI2S?+>}K{b-5;tY=0XROUn`b%hfGN-s_QOR=-(@WFhP z1DT8i5d#$8gdNu#eHsd{kF55O7l+=+w&j zSa`qQ*2IVW2ktN^Lu2Wn*WwdmLLY{SrGs$;qAEIIE{EyNTi=g-ce*K-k*l@|VbJW< z)CV0-@WL-$rTnKHw~z$FDc7my>UCC@RX4kiP9=!FtSi){jf}fo5sIr+-Td%=ymmjP z3wop2W>z3SFq#PJc}5jg6g<{xVAjapZs*h%E6xy63buJ z3)Y1eFe2#_7u8#z6C-+b#i^>4@8N;>WE9mfnYn2-<#?MFnN#w9u|05dOQ5UvAmE3v%6VKAMnf(mC zAwn-%@d18`=k@KKJ9wzz;}N+5%VIJG?Z{wv&UwP6P8~*-kUZW6lILS6$fUWqpeIEJSJGR79dpGMfL2ElE zDneHQJL4+8^IP$8o*f?fa(Vf9h!q-`idLK#C9mJq?itg;wN#fHYs=c8bl$f*JVW=g z&+oQ|RqsBjHX55cOQB?YpQqQYR?%^cd>n$IZaW}T2TN%?X@DgS-8E1zARoL_W&>B~xu`gle z(C%J$N34F&u#orxKj$N%BZWwgP+)T{vo~&M=`n4hQGZVy7 z&lQCD4KI8RgR+}S%8B$no`qq|C7k-i(+Y0`m9NtiQ7*uoOcC7Y_BkJRfp@EHpprV~ z;Lnwo>-t{i7|jO=kP8t!E_D&{A9!>$__Wy zROBexvUsdtpZp8Jbrc?8p=4y{tI{H{?|#fO+gpE6Z{oZ>_d!xk<@;IRd9MOPukswT z;;6&+W>;#;^@1PY@k-jk1K(6sdCjS}g3p@1P)cE1%7}qOqnOx}?=;l>z&{aR$FZPh?+Sqo}z=&JkM#7y0ltzSmie)eT_w-+(hbSny%V~~p zcXD?mRMvxsKEG4%%+QjOUDypKSs@nlUw^RoZ_Fk8p*-&And-1<(luKbx_*7ggq*@R zsw*q2Y@T|N-MrMG%#>DYDjoLB^~LAQhKfP+0_z9(OSecc=u7B$G3b9TCWFMv-Me0g z)1Hm_LCHP~P)CIc4O}%GuU1N>eK-+>!dO=zT_G5>0VUVL_j=`=H9?DKh21ydqNp|v z_6$Ya)^Yn@ki~@Uk5Hvg&edaVpXcFUn8oN?284f3s&A8Tb=kmSwTw+p9ANy$`i~e> zX5hgy z>z7JEJQILa+9tA{*Z^7yM^@pctvnW0B(#NB( zygU~?@qsAenuTZ#EA?%f}xTJtY07kcUV zaFObAsf!&+wL=MiQZNmec-C=4yC&xxeiXd!9;aW@h-;tQlW2HMc*mW5<$8$o7$8*t z;N%jaK2x$^6C-7Ou8vMB5z)+MqnDiEuu1*AbJ|&7QAS=kfpRb7XP$@H!IHGWOy|W5 z742}o1|V#^F**L=E@?_DY@%Nv>#!)#U^}DtU{RItQC(0IC%-a900+ftx3efLr*VqnT{@RQs~q`2w%uNw8rLr5oiFO$+2D1QBcYcs$56I z!>d@0*F}X7?F&Gu72@y+RBZXhYDKbF>b8*kQEHnIXdy21r0X8VZ5=;NG&bXZlS3EOvq;e*oQr;_;9i2`NjCDj* zGi5bXJQ%->d597Otv#kYlW18om04A{^eBU;mw{13x?ivnjd@|XDjJBU!}5V3*^CMl2&w@lh1i!c=uH^e*q583v;U8 zR*-;=bFVxhRt@ty?Ud3*5Q%`;DG&ME`?!7#f@)aZriI49qf%JBtYDnzhftV(NWEq+ zp*y_HJsRw2eZ{XfJc|bOcqD-Iiy3mayx9{?YZGj?(O8Q57l7TrC-dPh?B!^jq1@K= zmfSsyv5JcF3#kSDO8rUoud5ug@CQ8q-2kI#sgKBS^7y{c6629Hcc00m)093wDH=`m z#^J?9)&jO@@2R2#0b{iKX1_`eq%X@t8!|nDU=dv;rFI1ibYFsqdfM5-2ds(&JOyU8 z)KdNSWwl7bq+(H4SFG3?pX88^xB5pvmdTCsB`})}gMWu^9u^t`doh4lNxgglKe3=J z$&qc^`;B)!ZSnqwilQvp1Ol$dhqBl6X^^R2mWm04lx20)mbUfP9rLwI4T zI2kDvdvN#g7eMlG+X_kawha5m#}-Y4M)CSJFR$zVEabyIf2maHWHaJM8fL;KMzO5x zy3_dO_AgE|ec~eW$#In|g&@M{a+Y-)koU@pKjmwkW$vITrpywA7Y$^KWC?MnCUAyp zX<;l?|946q))Up}OOc?6`RP`I`enET-ihwFcN@-+CIvK1{#I?QNb|8N%IfrsDMGb@ zJPdDIKYjhN!c(O-ZgJPkv9oXqV|dW0lgmm=4gV#cvv}=&{Cb=jmuPx)8AA}045ZQ1 z2nBA01Rr|8b?*meYgi>L0dEW#_icI{m%0cwlG98X)BXZJZap&qzpl0;**Wc}{M6?D>n{v9l(m3K_6}^T(9!*3hNE z^_{1GyQU^OZPDuY_x~_X<^sn9;tvVTDd(4H7Awo8kseF=-b9(g57LcHpB{)^>|u6N z3$#Ps&Y(8jbhM*RW62+_gOKC z?Qv14NX;*O{hSmVd1*H6XDsYjv4EUJqI9)WbcnZq>|FQiiwEUK)itG!KknAu=kf6Q zc;0j_GJVHx4A+X?Y`^d={AnEFc)sqz&*?Xtm<#)oRh&4f`RP;ZF9&;*(&KD403yy0 z`(>$lcQ`=IbRi|7WxOL1lu3=M{a@1V!Q+V}qQI|RxL=!q)^_sW42ZuLN5`}U_+JG4 z9vsR_)#u-LF5#`C8gA|ycz>PWU6x~ zH|sz}!Y&$%i6qt3P%Hg!P3+S62G1MxawNYm>nSxyc+j*qpLsijAM}9^>7rgr^S4Ej zl9OdN25WQkhqqdEpg+LX4_gOc*~z}@ckq`#+YT|a#;JrpU^KIZPVwI}J_%?=XUeCF zmiIJ1Hv4=7tr==s8Kp?EJ3!<-QZh9Q#wcMgJYVb&VULyI@dl#N%=51&6)5A7vdb<7 zzN7SJIzed#@42+V7evt!$}9R$*efRL*tP|w1wGBn9Nd=9Z(jp0hkPLLFP9FFu%d{2Q+g}#^GZhoM0WCw?c2rCjc}FPa zrbLXFAc=-b4X+L(|4_xoB(o)S5B~t$E(#%L<8zc`kU)b~;!V$^vHsMnmkf^tH-$Zd zt@QRnF_1u=W4=x`#TlFS+K!4re~w-pzUwT8#Nmfd{#oW54A{i6K&%)VPTePK8?VIa ziRxgMX8H63WNqc3WFdBwvauW@KxcE0NFuq>8(xr*8X)wgKcO-c)bg~T>R*y?|) zYmh5Ps2yTX0UY_`Q1qF`%2)RS>GaApPdjP)A5@;z>FaLhUcMF;o^i{h`;l#Zx^ zzs6C-MKbS7kNGNmEQ_h7)V6wzlO|bgAVfsCX$79Nsdru_;T`ZhQm(OnpA-I&>lHFeqyE?3<`+>noj)gJDt%=w{E zyx{d&mu0f}Hks`ou8It@{L6hNkHTBy;h?T4N zsCGNaXHf^1Lb!$m>iO(X&FZ0XF^jzTE0`ZkF2BFa_G~=-!?52srU`W_0%~Sm+BA;A zbQ7S!O=Met^=g2CXh+g?E?b>l7`dPw%jd6EByo6o5gM$LNmcR)ou7}!1Y-9)DAR(I zjJR)%==$yaRn<>dQ*}iUQ@>m8ZOSikCoGMMKkU)r4b{C4XnUQHPYsXC?|8#Sj`?@A z*7L@CPhDFzXWCm_^8P>`Nx_{@EGj!u4tR3tZg+{=;2W6Df-MQ7`{N&MUQW?jtF@kW9mi0O~0a~J~N3F>mDF7?0^4!sly3e>~741Da;-# zCRWT=%ao9sJv`rGRa{_(mRyl)hiKo@l_L5G{)PR z*7}hrxsUTEFlr^orky&Gs&d*#K3+P5KQc3!+aE@p_(&=0lb6d5%PW;*OM9U)Do^YY z`zY;a##xZaOD5^@usB_ZJf-NZM5b0xO_aYo0qkUPqyMs;DE~hImq2L0_n!^4Ik@72 z5&k6*FcZU$YEqhw?{I4ybJQ+;lwnxng&H_habI?z=>3?@l?(p>ZB|dp_+xRm%&SEX z$kfTsc-=X8m@oLH4>!2r^kfZ{kPA2yL+}r{nLfFa4U5l%2LPrXBQrUm*eJ{ z64tO_jHydgaawj$w%si@Tp;<` zBMukt=Hq1Zq%9Y51HQ>sjGLGx=qrMl2=kEWNgv%8SKv8evxvmrH|+W7hJG&tKU(Lj z=b~MW!=w^DzNqt`)Qs3b{{RxTAHZRQj2l^v@j$<5ZJcZ!r{pc_oRKnSUWlZrx>R?a z8-++E;y=f5JoXqB_>9iTyTc_JHaArz!F_?~%hNwJ=V~ZRS^?0|*d%P9mmz@*$<;$d&rla~JE%g-ZUaKa+i|&47{unW!xf@_bhNxEqPn976q$=ZKVat`t_>P#s2+chk zEG&~G#zapkoWugV8#+o zNSK?tE|GiWX-I53=KG5e75N+ci1{B}73|Lx=IU93sg9CTR25;&8*=JCd*PRW5Oc-= zCAz1?2HOcr6i7+tqHajN&GxrX&&vQvNkPqR-1Sd-dshn5615$`{o}X9^B9kn)=|We zZ^0_1lq@YkrD`d-S3Z{O)1LTg&&dZPdB>r2D~?Lh!1SX_H4VVQ|t0n&8aPZ z)wPFB_`MpvWMu?*bV_`z_vh}$gE*`sCC(3|(z7Bb3iw4tZ57!pPtHnp*!21BMRnh1fcuEQ zd*O5?ZY5#6j}S_~WMidrYfbGcWp_84&(;Kt+yT=|^y{xmMJ-h{H7zwQ-WGMUBRdT8 zYmS8II@Pr7?!%Ed3Bf3?Dp++nR+v@agr(9{$XV|KNlv@0-rHXG#UC6Y)zcr}cd6-8 z`1=|5*#k7Vw_s=kqwB#?7G9I+-ujxDk?Q^PC!$Duf=SIuD=oHycI$=_At?iao!_DgOXR zRN^`vXOHm$e9+QR(>!yd9oc$WQ!MfHQSg$ZT7DM~xRb<;ir=uNM|C&Z>kSo`R)CwR zFz8K@f9VuSzq=OtK3cH|+x_MiNuHwYHUs&GmvUkIH%|7KG?w*bdisx_daK!k=&s}5 zR+PG*4)ga6Gc1G~wBlcvLzsNLdK^Gk{iq(ex6|_1hECsSTMOFLs3Ed9!r=b^g#i7T zm4|*A1^dDA&($EB57KkNhU_WaaI4Z<7W)Sy=9rcr)doty+sI%!sJ06g_nQk`dWzYQam&RV%+w8E<8s@>WkOcEjw)o7`bE{xaoS zRK?f`^`P}Fz)1f9=1_!R+_Evwv0g~_mXiDQZg3n~X$Sa8K0~otDK@_im~OUE4?zW| z&*rSXHmOdkL5)0!itS2WvvMo8+I){D1ysPR21*sP71qpMvLGs&KWjhmF)a)=&U{4S54E`tWC_uzPU zgS{;8HBuOyE*6oNzd<9*FoPA0`$8cyDaw=+%*K*#f9#(hlJH!3&LK{BU2)%kCuja7 zlSxS~Z8klY^KM>Aavv8xkx6c>!_XY^lET>k0Ka86f5nNaUAlX6mAyE ze$4*>A0+<(rf~|9Jt%bxDsT6bXA}M;52|uBL2^j$%fwtLnXt)PZEXAxfw4}A zwfbDaW+pP=t<%moG#)0WlMHOS%nSt9o~YaMOpRm5R>sq#VZro93rTa_ybQ%Zo}o5 zlC@#`V~Nv^)=Eg|b(}oK{M1ER?DrDub1!DO5tYp>)FX!z?(ZAYM@`BP#~l9v66AU- z?@~@jQ^ow-Z@R9O9I3lXs6p##vG8@rm{9&=;huEMOiGh&6Z1C#0wkV%xxY!2*2|WPj3G6C9QW8l? z18uiBs4GBm2&s$}zdf(zg|*gB36BIAiExloZO?IQVZ!by!BB5>3zXC>1t}Ib7qG&c zGH_Lqu4n~OnovJxl&j9z*r>8`wd81$exg)&`}nUeGu;;2idFL%C+#GQ6llal+q(S6 zLCrX!<}BAn(xxRUN(S~=<~`&1-wTSMwB3fqL{wmuH5U$TiRe%2PZ;JgrK*fq+gDWq zaD8^)@Whljw9MmmRIlXqhVF&_KUPv*CyVsyDNc!i4!hrPTU?Kb7?q1?h|S8OU9j~L z`_0Kb;qf|>@VJ)SpmQi#P#-Kvw`p&xug$X)Fy_)ey3)gW_5t8K+uX|{fX9NY`Y>C?bidZY!?AC~vDRA$Pk zNb0woD?b8B{5-HDs(6Ji+P20wB4}vp%h4G{DGibXelI?_L8vEG$*XbB>t#YpKuxcH zX86G&NHqmyLP#HU6X}I-o(q22%kO4LJEr5M#lPc>4k;eg_n{LVB_xX|^03AdR*bpk zu-hH$iD8D7A@jC|l2)&fy|AH{sZMzVc;q@tUt=lP-`Hn~_zQZahH`}gfRJg) zl(G9#R9yTIhtH-ObzG;7zPwP9V)YLFYj$Pz;+B&gwo#$9rG|&>Bm}461%??d&gBcd z=Np}VyHU7wN{v9MK&Q==4<#Y9qIHP1L)5r>t zs4$b{r!}Z=ena4$S4>@E3XdStw*px~nGLs&g6%G&DT6L@DtKhh(Vs4H2sOhm9i4Q&I z-X4iTR#*Tk$^0$u!A_3)AjU`)kP~kg#N&I53g?e&_EK}sQ*}t((RNTg$dKiQzBsU8 z#M#}-hqmOOWZtKGg;7=#kb>XzgbNS|7Zw4Yj zxIqLaRLBbsTqA9?-*N@c)*GHf5^>3rFMD)cAj}7&p=v;HNg(PcmqG`hOK;`njfaO6 zW%q^cWPu^KR93) zMYX`3(o=BkLQ3~HxwhQ}ueJU*!>*Mr)&|MP20N%x7D7^afR29u0K)^y@IoGWLkBrL z;k5RoDQHPTltPqKVv%n>?SNiI@<2;tVsuWJi&09D1&yt9VR3tN{yv`k2=FyQOmCn{ zY^5Z?XXR+Z=NXXXZgGp4YzexTS|s3N|GpZO1Y0!?MI|N@63LPLX>B5*4>U zKYjw4r&L^SNHWxZR~(Dq5MO zEhBZTxd&W>oxBGfEPA*rwvq`_w1d62zB%A%Lf+!*Leon~T9SpUm_2X$7(>_=!;S2c zYcx7mWq_)7V5Zaa4K+z2QR*?#Eut;UL!n9hFqi|QcKjr;jv$bHMT&FUo+sn#s?Kig zH`z})ckfhq9h0e3R<9JAAa;=HK6i+=(|>wSE;xTEc=a&mRXC1^S>yabp8^mw;k2?2 zd`<2aK2}vfKK&KkB#@r-BJm@M*)hebDN7Sq8m$~$`8bB1x2Je7u z+KAX+Zoi&9l<2ZuK%?kzovcq#dK@t3TQu2VOHB*d0nlJ_(t~rjqo_gTH~8a8vi>U3 zaApc!%5v4DDJrPD&(XHVJI5v1Tc^XG;rnONtB&pMMXOtH*dDG@qO!zkCsUEEPwgP$ z{U*o)>zt2Lrna=8UdwZk3S!jWOJnvQ8q@y(dic}u8py?>mgAw>k2|HdrH9@g2?AbQ zngWcPBpHb!#ZlB+QTPLI!x1y#)eX*A1BXj;^uMAdl_LDdTeUG~WL&Kw)jZ@0)M=di z-sZ|5_KlRQ5bvX--rjcfZtEWw*!f*Zvl6Hcu_436cpIW8CDxE{M$IdlIVGo-m+j;% z;0yU2Ex2T_&Iv>N#rz#-C4Xo|q>wZceqfJ9v@R=4VCk+mGLGf+gvg9+f9()dkHlhri)slU zojzacJkt|kPCugOb`j`oe3PP=oZ7CXxe#4$Tt^ZX+pVlE4kYp^P}q)p`eIHjSW0Zv z4Ac&fCxyQd$^QUn8PKUZN7b&_d2TQ+)SKZoJsXZv8!eVAPCtAqQb;<5l59>quITg4 zdP7CCY$T9L0B(04m~4cXNo1``qsgh%DfFmGXv>oNBeEP=N=nph0+rKyZ-OXjq^X&( zMejJ+T-(hFS%^mrPM$-}Xt}pSU8->VIMI{dRC3)}rU1Ehw!`g@T}|o($Z!4y%T4{QMRE4O0;0^8X^7!Hc>Sx*=cr8R! zD4*Ito)^89mV0Yvf^rzO^rF~PU%;g{qdbyRwB!)0whr96;qc-SvDP>yjary3-Y`~` zbUcXo^udtH#HrdU)0Om56@IOl=xbZiB!^Uys42^p5AT8%x4#xz2;r8&%G*UlNl*ZL zZS~Qjjqao0UYiu)G6`K2=8vOxx#@w6Ta5NdD#!Hl)W1x z=9ysxQQd}v$#Rq#A0y)VCkk`JbwJCG(lF-mdo%nz()%1UeFIfStlD~%Mrf;VElaol z95m{Yz#t#A&A;ZEHIxy+-{sC9|S9?MLa;!o`qXh+Rf7(Sl}sg!*;i~#zd;RbxV~38hU_Bs<*2}ZaovfyS_bCwUFG;+x-=hJ(XU`;}0sJW{uCCW#VJ8k4LN0#AUkWI+*Jg`%`(Cugi6mk2L zq^kZy3_Zf>=Cu@Y(03{aix!rr2Srx zvkth~CuLtjxCh;izsXe5MeXF7#ksItU-ypye9*M)oYgeK=QIJCXud@B2LeW1FW9=z zmgAT@>_FH6f<_vM&KKMSi;*a6{tV50#d6GZR$E>07)&l%@ZU9 z?WH6rpLWQPOc$t$?0IQFY(O4o7yRFnKLwluizg#9qHmk<7o1r^NWjYYstwkWOM z-Bjeo@AzY{eo{aNQN}uC^k7@$sin-Qap`aQmt9zseK#iEJn`jsM!I|BI;Eyx3Msm- zqM`8^VbPmhCUdpZqB@BoBazi{ZVOEjA68CCt(cHMk+}zjYotoc@oEURgb`V2?*4oDS><)Ny zxu)XWGxSu346{lDyCGpf5|OpJ;X#7SZFSHkmXz3Dl9T2?&ked3k#wN)fK)`NU0NJZ zk=%YbW%P}9P8aiA-y=ksCofZ^f|sB)vE~z}5cNATNiV$^zgAy|l6|#uH$svh%Ego@ zK%YJ?*3uM8eq61H_iWSS{{V-Ov~+4dyOW}@AAPDB&h&Xv+$~N_Nh#7+m7$cOLuFp% z0e*g6F$q;iBS~z)TyX;2I+T?qK>bojdQIVFAx-7VDwf+}eZp=MERb$_Vn+m}<_8Wa zjB=*)zqLE~j==u_XSvC}_op~N6yTk)X=V{pN9-Sh*2#*AU?|bi$ zDLY)3qun4mlsj#yBwufi5<+S6CzeYgyx)~wHwkW_!4f@w%KohOAnuCcm_2!KZtN5BD6~AZY~h!z8H3PHu)o1f^l5B&p>exCPX& zhck@_gOEvdx6G$Z4j$&ZmX{c!NsPf8IV#L2;j44-2M0xm&iunox=voIHhglLJe=@P zK+n@JH0_1K*(!L#+Vo3b-*laTYW zbXLaCdEXyJsxq7I4N@gVVJD&iTEBq&@ikAd)4TMq4INqhmK=65lL0 zrKN}TF^2bib1FV+iKWTzC&g~L1w`3ItCh*Zq?+lr#BZ7^xA_f9BhL63xS|ALB8!rg z-s?f~-we6J$-v$L7F^bpKczrdP4L5?!8jRhP!3IPBV~d>`CxZRozsnC0aF^?Dq2Ck z`HVBXoeWWqeB!M;Bf6N0%ej*>)C^_J4MpzSZ7ii}K~Z_bZIra^l&alrad+wI{iCg? zaXoqkIfdXe)r-bj1?`!(dfMHIiNcQ1luj_UnqHt~>1=&fEx2+N-AX*R)U}11ca4Fi zL%TOhfFj_6Z?f@>HcCy+``O~-B0YL|CE}Yr`bP#yBS|;V-%W3ju~On{3wUi6#_m69 zLimZj_$a4B0>YkA7YMhgKgSAavcRc-YIP{rGD6goycsAKH~#>h2bVld;bS)r&x+>U zp*Xd$pW^=jDyiLB;SRO;)>G~uGjS|`j zxj;D;2?YyLBn>vb$oN}*&tKz(2W4biU%D=MB1())u9pjH2G`hfw)+ zy5!voh{-~fE)ueBr1|pLem4BCj51sRm}wxB#U5!I15S$tC!d|S2W#@fMc2tBJ=>z? z3SDQ}$tqT&Pwf$4N9*y%u5V=2gPXjNs+sF6(c5X-=3=1InJLnfGEy7f+uKp~{B47& z@T!?jp^eV$1pE@|SY8G6QRIHmd@QMEdS+6j=GuK`uQAT$fd#clY`Jl3`PlRBi=990@$7nrZft<-g}!8b*Egf6 z-r7lCbl;oFuU@X{YS9gp^C33pj#)&}xpJuWC!KJQIOqu$K3tDIu(=z>A~Vp4zMxRC zeXY=X;8}tuGLR-N?kjzU*7E6y*;|HzYy-rPO04Yg%ml?QkWh9Z*gJ2I(y|sbc}Dal4$6Sx8o; ztn4rhQD<_Fk>^BhK#-+vvD_Wn=e@qVO^G>woV!1WMnp_&^(vQN4Z@WWw zRW6B0QW{j(T9k`Js5}Z;&|7&wYkO)0ZhW!SD5ZRRBQq@PJ?_i^N4(8WqeONMg~<7* ztZVXP2qO9B&@{LNbF(?va9uJ|6dEZ>7D|5dzh3-t$49Dm`K1w_@pcc)++T;EWgjM=UyfGdsv07d%+Ib)0S39&OMtnKLv_<&E z$mfXHv!z2!a=KoQgsgykHu5-Y*{J8f%JA@eE>UJ=I$G*Tc?w(dlpJNk4ZG3@nA*pu zmOaDC9>!4A?9>mDoaYykLD!z)*`Qu^+k9?Vv1hjV0Nx!+L|#CMfBcV|CB!6bhuTWO zKMPysiR%7IDPH3EWOE($#1K?GTSUW?fGCUEPl!;pihRl~X{|P}e72>07A!GepOf5j zpZ*r4JaiHd-yalXF)Bylu4l1hGFa;AgzzD}vO!bQuBqr6Ye2bZm}5j;CXOPut< z%^W~K7o=%{&I3FA6V$RFX=0gtK8S|$tq_fgirM*$ z1A)V8W*($wCm}Ps{VN@ri`IFa-AYQM={Z$NNB$q$pJ1Ws*|Lf4!tmb7SQUb z!-iK-xa~`sNh5}!d{pB8PkoeuP0SS-$+pbPB12=`9R}Y840Jv61%vyYpRdd=UdqN3 z&tSh+N6X}imb0ZS6)H4&%>MjG)XBH@7v3Y}p@UmjQ66?S=?CY$Jl4x;-fRzu_>JF0 zVVwIa2(>{np@IJZ=~0xFkNZZoqvCCYA=*bbkBB4FczPzEga>=M_8a*f(w#Ky^BP#z z+EXC|a=7j#Ol)nI{I~3X00tQB^qO>Y*nM} zYv@L^@FanX`C;O@J!F$GUv*3LypNWWmfVokx{9m~op6K`{gmnaaVbkh8O_1(NJ?53^;X}B#fAs6A*>l&T(<^@<=rd_m!#O_qAlq zbRE$h3V4(68-6%uY+gMJz9`Ck5~=>{Y&z^cN{(w>VQG|<(lYz<4@Jv-HlKc!1rzfa z0r;3c=vF!{k#n+^KnGC2$uOPc4q#bQr9z7tzGvA*h#vLd=W&7CSmJMzo;w!>xsNyN z{1cLkpXN@B?TonMn=4axUdK?eKbYikCrgdKBai`C@g1KLKiR)tr0$QN(xav8h4tTY z@H^saJ)v>;lq9$%vI$wd)fL@}&aQjl=3|X;%%L5IJRw(Jj7`eCHs*1&iZqnip&=Dr zX@wi1UcE5es~U7qHkgf4Oq%+UeF`N0SYh?H(GeXXDYYjvF8TtA{umL}goJZ2{If}J zS4zq8s>kuePN*a&n?}%zs;ajR%5aaK8!{4|5CFbJr_@0UC{I97EMU1>ENB^}CT5|L zbvT_sa>J})L&y|e#f+fu5%R@WnGZYW%!wgz-HQC0NnomUGPd7xBj|YxN}5}dW2;hh ztqN=us9i}^-V2MqhC?G79dTr z@SBC1PlXh`!q#0=((2XPn)3yAd#y0bl{wBKDJv~UZMIrMQjikcXh_{kI+o(pa~{Vh z#3gK#+nC+ffuI2O+?(o2*QfvxGs(X4>ub_U(!geOHUifp{r0N{y2thxTYO& zzAHc*kO$8S49_HzswZkT`^kAB=9U74d1(WE#n3q@1)(J+Ve}^$_S!lLT5}v{+9DA%h?2;Jy4>T1N z)jMABW}B$QlbEaPwTfX=)Rf-ac%9HZhrYT4#{6vBR>iW5WpL_qcwuNgHqXS}7Aa!)NFJ&^ zRD$D*b=?bD=B10M-}=hm6aMJ1Pc`IbGE=W-ISKy&YuQap3IR!yk-o$1&5y%QDO{j5 z{{UslZm(4bw1)KG%->b&qqLn#x@{vXNq<7yZOLxwC+{dFpa-~>W5Ihb0g^{Fakr^n z+}JqpG`8q(sXqnh{{S^*J}q$5jSJ$Z51O4^swS@eas;+xB4U_z#Vtxh>=C`Y#ix-9 zO^w0WdmjbHaLU{*v=xLh%?%)(+?#6_oizil{wjx$>`pC3h`zFghE}+^g3-Ng>)oyz z@gNX09d}mNi}b8^s>(gv8P|opHq0!e?=9M0MwI2e^&!=ue}dT4HWLhxe|D%p)W>`Q z^IZ0(g0>@h;d6Uf;pV1%yY$b|+%2`7I0;u_ce>90^m&g+;$L zI6uVFJi#hGwaf;_-aj=w<^KSsv$FkPLThojualIkYBMtUl!X5Pej~1*j+{VL@_ULm z+zfYPt@nULikMn1?7q8H!9n^!dm-j`(%eow`5)5#>$&gqoqkuRnVm_;b~fvi(UbbK8QXiFgiEt z2+-yM7t>#&l|eh=sAR0xA%^T(V++XyZN4=OGYgkNkeX62D4(8$F;6G4ldmQb+uA3+#%DwD% zAaDBXjy_marusHOLPhKd*q=*u#`i3nEN?B?NUDE#mMvt25)K!6d zl#ZhQf2(88Z))gLUiVA!i7&Nv9muu$;B%s3?#P7qg|fCF`5WK|HA2o!la?B8K{xo0 zf32~muwKeCQ_`=e3D8lHGWb11>K@|q>~Zuvk=XZkW|OQq_`S=^J9_)#(x2h`p-9A= z-<`$2IO$eZ5pD}`;@cw5DL!BvX6Q?Et)ep3R&ys(Bq$GE@ZYg-1ff*$E(#Uuu$GXD zj2eAK9!|EEeq#gALmYz70o|YD6#XfHvBPL ztk`TaUw#(%W$2|M$L1QJicXc5DAAcN(dzYvU)F>rwGf~+C?}a6FML$%xiFxN86YlU z>OU(LAy1DP9lAg%lbcZ~5hg-~%UD8CX?PDaeyfftURWB#?gK=Odn z+!myt*&lhTPh}iGw4v?z6;Rt@I!QJ=^f>Z~C3lBU(1uGXDi`OTUaZ;bn7Pl0 zi*aecu6}A(9A(-ETy_0?6n$LFlIW2oE)7bfCPEu7G|Yq-6i6JBrC)Q(+P604h)~z> z?uTYNMt*-aD@|7$+&!a3y0ZK{mNHYb>eR|@LQ~3dPcVg>3k&HN*k0c2?TdXBXLJ%Z zLSoHqQo1(&sUbe|GZxA@ofaWRn<+bbw4wZQA&N70ic|Zp^e7x5{{Y`%Kfd3*6&{Bb zQQVOUWl3pgSSe8iE0x8!#|U$;m|mBfpg0KzqtPruO%5qLI?XSpGO| z`=nT)s${2;mT57#m~{wp>F${I2+}FcZ#oL)00pg)Dh`0lmSW?;kL;>A@ah2AY`N2J<==6 zlNOkMZZ}=bJa6>1)srH7HNmZJIM1WH8{m@7@{>kU?z6*|iw6jXmIu?n|3QCF` zE;7EMLwpd|m7|C88H8;W!tW&m)bG9Uvh%?*X|1SPO!nLsUXncRg>R($K85n+#ZAtD ze7D1vS-RwzDzuh~2u{PG{uoSTXj0hDuu&hqIvI!6J{)s zQtE*aXQ+iONsj5aAgdtkPfsi3Z5JSPrPIJnjU6O_VYLyHe(CnRQ=}_g@>ly5LMf> zGpUlsQE)rSa?)+?Xy5o^PYrdPdjjTMqr?PpU!~rGRb>?1i;MlLA`b|0wUTjFsLTRY| z+h}!2NGnQ8)U6;~m0zAcU9y`Eg#CgVg9vk+)|0$R%!{0fx01JWx` zS&79xm~$0!w&6EYrb%`4@>+<+@{{IGrc zR?CUiVL*)qM1n@d_4VB20SjQ;GO~38VhVi$=Y)fGrz{2Bg#~LQ@3qMM@qlm(Libq# z9Rbs^+*_B&(;Fa+ia~D>hiwFq3J{AC>-k{>rG^zXa1f?H7V=%(Pj+5?Dj;k@WzRfR zHs!a%rnd`vg$=Y|eV0BCZ>V0U`&Q(P@0jQrYN?eeb1Ttl>yhoS;x=560>U)Bgbmh| z_i9R#0V7aVhz8yk*t+U?rE@1crPi@JakxA)>t5B^Tuzz^p2~x5@GMS3;`w!6_ZP`> z>Z<$n=Un>U6-u1wB#|aFjTD}H>jQuE$6$gm@p#B1sDirVKy|ls?9C)1y35MZ9-xH{ z_Dh6*0Q>OcsYunS)=cr0^6yCXDqRw$mHlNzqSVr>-A_VkWc*Z;aAXxzMfx|ny@Zd7 z0;YnRd;4gZ?$)&Wgu|V}&Kl1>13kSV{Ldm40}Zhi>cIdE2i3Ux*{oR%$sL z!DKwm_I5u10C*I_sCI6ncZhnInO$w$n;A6LmZAOhDF@?;%6?Ji(pZOckHi&OtmHBc zY3fM+CIjd}O0;~f$DEL~9Erx9$3+RYEtZ=z z7-w_a=xm{<PrCMjwS=&D>0&`YJ$yuGSd~1=HB}G!VzV1bPK>q*@ zy)e<(ObT!KdV=5QY4UKtnuV*v@rridqLtu(1CN-=N>m@FHl-C!v7R^v!c;7ks+XA5 z+KKdPRB`ev_~3cDBXh24>TlZLi%-EpKN@QdrDo~t%hUxr(0-RL&-I3oivCaH_Em6= z{{Y?br?#Jq50w%9+(K3ITZy;aa~=G}zODG8;KixjZe7k{{W~5Xd~feVDiTwxG9}d zKazY~;hthr#lj)i;y9!Iz~-;R5j9v{R2feT+uGL;g_5It9xlfN-V}_9ZM@HV$#K?5 zzNDb3CvJpXf5c)O%EOh#(Ly(I3bp0V2GC%3f#Mfv7xQjos8(tqQ{&OX!XUs)Q5cG+ zYzZ#7q_Uz3(i7e*We^CMsm7fp1atw74PhsG+~aaiyas-msXA)eVWyIr{MiU6Jb(t^ z0c@^DX7?j2DcHJ@0)~@ooj3LUeDPek;F}qvE3i;a^nF z3^qlwTWPu=Rti?2KvlkfJU08=B(O78&$R^64}kn>m3<)@Mz6RYaa>RSVT%sP$9zhh zANq{-NURp7ke}HroZm+)9?ilm?)XRr+|uH2{fJSA{$q(gU!8nb2A&amz27Bj05EQ zvJaR^-^&fWf))lF-6qzSw&^mK&w8hO$lwH>&Ez&V_!IKL7=ZlA9iFN~49Cy{*N5<6^nW|JFDQWRPM0{tu{Dp%3Pq+Qg=Pde<6sCkc4{{gV2q|UHR8v zr9C*WLWbr;%Q9CbCTZqDS&)`)9eLd2Y z6MZ0`hCDvK1lL_*3j`;7AARqPvQm<`UY$AGga8{U^Zx+qT*cp$f&Tl0`M%?oUu*vW z`h1<=cq^q8r@KM$-$)~WOnR<1@myxeZ6i(`K~hKx0NfG{h`!rmiM4nb z4HYBxek3V8HmC_VN{zUGNDXCr07wKT8#~)_G!6d%JTr!2`_M+?&^-~Dy-9UMgz+cU zIrBU)GR9%>pZNrf{{Z&YhcWtaC?@^~dKBIg;ws`lyx&l%la#=x0{-b3^D|`0cjneO z3k?#4XnI0!okSaqB)JVN*EuU2m*m&!v`RO18OLFWt!OQ|`#MFglX1%4Sn60dFHr?M z0QW#2Tt}khc%%@?Q6_osA5`Xzp8KVDQ(048KG7u>{KhSmTWaS1kX(In7YZ&+FzV8s zP3D?p_B)A;i4ln14k0eKejAUN#f6(sM^eqno4nJ&fZ^eoLAI7y-F6!utdGGqC*-ak z)`HZ;7U{2^Yk=`<{^7bm?&7^$vbYUU-i&)Q(!*~fapb0{c8?H**t>Xs@V}nEYCNm( zal#7dd4@aVm~LZ52mEN%5BNqY)!dvAZ%H_JQ-fj$H?N3U4~6dC$gMb-Jt8(o~*L|3J7YUhER`V0r=dZ6B=p>4K^GjvXpp2+6c9pAw4Jo51p)9s z!w)g!Xp3b=dE}5Dr>Uzc0oF)7&~*dpW545q8#hWMPkz4CK~GI+04*Tg{nNk5@A=@C zxE)gWSmag8>Qb`PgF^a+B!WH|AV++{SJ+9+ElpL2HzcQP6Vnd?WZS7;5PsI$)%Vhy z^SXubpa?5QCk3arqxM#ke=YFvJt;qDCsZ8Df)xU5YEf3G?u&!Y={Q_X=Z?^qPfkRQ zBcytwnKq=@${9ZDN>pg@E)OsHD_C~5quW2C=hUI9AaQwjS@lkYRKT)QzI=|X54sTk z#nwY6QCdg__Uq^H#=~rzb9z)a#>IEEjJoaoLR-oAU8JAS@h_tO+~v7V`Ixb{&l7rPu)%99IE2 z-yP`uVB%vhIJXollN3qlO2ONgnK<^o$4DK7)1TaLqP$cNa3hEwU?{+)M|%kYn5Zq`K%Y-0{w23+ws74SgkZ7^GMeAxDQgccUXDF zOH_W%JYA`!Dv!hJ>?VKdCigK?2dQyD`}{bpRPFu|7EYWK`?qoO9~8uukTBiO3QDB* zGvZ|B=$|hp(K`L4)xMMlSb3BIe1?mX|07>0%FMr(A}s={ib6q#C_C#uc=00Yks#_ivCB_#zzqxTJz zSz6&%O{x`|o>9tGTlIdK9m0Mk7yNN44l`39xg&>TkIhZfwmzXs#kBP!SKqe9N7MTv@UBo-vAdmV3g}s> z2SE2F=GXb+QZ7&{+wX3k>v|BR#@Ekl5D$0AsQ#ze#+a_~(YT9ViVxjPx?@g(zf#)U zK7BDM7b!2&NI~pz9-yNcwcoVN`j6mLeOByso#SdW%;l6SAbrsD5+f1^os!_htURJp zpVCI*-qZ3&$Fz_*Sb{tXjq1IaxX}YqVuwMiZ*5U3?2>=F?vGK4m^R}LcCRh%Y4}_s z8H`f9M_ai5DT`3x?-r>^)jNiH!$2FUh@JXJt=cOXl!s{W`7&(n@wcs!EUkgVn(^ah z9nPlJsOl)F(rc83fxopxb&vdMPv~M^Iir&{I6JU|@hR2-4cX=RBqt?Dm#MY85^HB| zD_UImSlk*3ri}jpC?1wgnOq{say9ofX|(fue>R%(K0uWZ!xC_9h9J#e z4`?3?D&1El_$0YD63^j#hp8jUGHRRv%5t@-s+D1--){M~BmjNtKtGNqDfarQzVjM; zElx_X*N+&={wF2HRRU!#$)x9{ivCj3;BN$uI&hb;h#wJ$5?_EXJL zGW%qxU#j9fkNh|h`xUb@evVSZ{AbbaC<$=AqlBMnOTE|u`-Mt0-=%|zbfSc3t^(24 z2i{aVl(=d>eizwrbp7lG{z&Ph&X%tIp|1039*Q1ZYN7*bnrAfjIC_hDse3Q|EqolF z5Y}Sh&l0lphQUgXxO32-@H&Nge$FB)xg5lrNs2}|b?-OS5$3fP*3uJahYths3UHu( zBR!QXfC(HypO#!wKnbf+<;My4tRYDM0KJOkPbRqBdO^MXE@|~3sW|bvH-F5xXd~cL zSG2Eao(pG=&s=GXl6a+%^E~(R%&6=!5~#}Fb!#b3kk+8GY);m`#{_Jq&R0Q-)0&7L z&>juTO}O#3lhxIbx{gB08tk|c(l~0M-=o?plfb&1gle<7uA;5Ks?1d1@WdZ465kqV z=#8Fb`$ICV!_mRABn~+UjeW;K4a4ux4dm- z@J~zKKKvw%vrB7+h^AtSlx1~EDN~OwTr_}@$a_m%zZ3JsULxy8d%aYq9?Q5pif+|F zGzp{)y^!_)0IMJ8ialt(JCE-C6@nvY1l!@z(@3WeyY@-^h9b5^eq|epo5)mw#WE4Y z{Lyd{dz1seKeR>xyBJHBY z8!OKKc*L@#{uiM~Inq^NNKAuo`xXBHrY~HYXN|*thyB8;n__?6@3-##<#1s*uHgjj zbdCNP^)xTwR~mRn5~ST1Hz%eZ4hb8k_D2f5BH#hf--X?5^nkS^Pl%3Vnd^-#_JudI zVsAon+gzLP%a-`BZ797ft@W5jQV-6k`ysHAl(O}}D)t!v0Qd$N!6F8VEB?szPvS4V zL&)hqsP<|={!Qi^0D1;GCRt5yNpX#W7+^Zx(?y$W{-ar<0f#63c&DQflwBVn-p zd~xK)oR@Vj0UB+QenZQbJYhCWm)%?pwLysHSj&zSy3@u4a;3INx$e`xJq?3wbTZIG zFuPplG?ED#g~sda&)Os(ynRX@URDnSrReeplgQuvLwq$;v9pZhcz0;A@fP#CZI!_@ zXXI2TQ;3NoJ-I1`1p(`Ajw=Yyt@AFLHg@+;dsLj+Gx#RLI=WHMmK1N<8Xh2sDCQK#bFI zneh>3kbr~>l1hz|E_$8t?Ofr`FFZ*nw40RVlE!(=(K=(pk#Vi|QU@y~O25S00hxf% zAtZZkqH@N7#-AImujD_>;Ra%(oJG`I#^w;#TcN48)&Qs|6RVfltNgsMei-s(H|Siq zl^B7dkE#Cv^DI(s%Sim86ULrr564@{5d3m$ohb z5F6$N{L@dwr+2z?QiU%g#$r^XD+W6t^aUpvhr!lr9DO?Tkw|L?4FQ?sSb_c-V0X z#=Rpu002gxGw;H;(G8U*xd@`^BKzTB-9Fh;W3lWmK|bzyRr62BFG5UmI&J|%NFJM8 z(+g{gHIF1BONPz-EdY7yw&x1=iOuqfxXRXrqrP-DcFWDoh_~J?cWZ6vLLxkNR-C{+-z6T@`xwwvrJ7EC%&}E%lPUubf$u0&XYjr~rH^lM+O7edRsi^TfOyrBfWFiNmmi@=miA zjEj&$j_O^O_{UGwV^cEKU|QqZs_mqoaC9K~;$oj~aL3q}IlZkv3#Zvt7MJk~KdEp} zj1;|Um&1J2;s@<8!eono*r{ixCZO7!GEZ`Lj?-_wWVbno-8WI?N|~5wq_tS1W-37& zC`g3+Ab*JC)1Kj#{L@)XWvZ%*XTi&}*T{!hWy9{=Tw+K|RS>*x zb9)IMg#j)bM>bQ$$9A=+)FYJc4dndMB`~P@6}i;|zKUvHLdU7@)B<^p)nSCmwl%cl zdRgqz`&J{&DET=z!6t6;rD*GI+vW(emoonV(f0>2Ei!AhO14a5_t4CgO^`h-w3H+K zNf_|Zqr>VgG=90Qs!M+e>P7UO0W=F~lPc*h1 zc1y3zbU6IC+#tB~iU18p$p9n-r71`QZTJq>ZB14!SnArcH;sjwEC3n-a!rR>B$7&y zpu?e}fwZ*3%x_MloXIDo{1nK`DM%*7j<&Wc#G(mHF%Wy^)NC#5d^Y_M^Ehsl=8A7% zEp3M0{4}CoI44y#0J_*bt}p9{q4%z$YaM2l&>WE8b(8YK0dGix*WDzGk$!(4;g6&{ z7z;3ZMs!Bcx)+nSnb*W;KfF_x+%X%nqR|e-Mk4FHr`Hw|p^$SS zy}T`tf7+)J9IZe904~*Ap}&2o&6ywH;_dhq&$%f&&8=>j@aSHJh~$DoY@uOe%bqrz zE(%-WEk0cQS>s;aTJM@B$+YtDLDH$NMyZrW7G z9SgmtgtygM$0@nmoRY|Gmu;hcTbXhP`TplAfuBUaVbf*xMbD>P}BRsx71tB4<#Ip_kvzS?Y>8H>u)EiLV>@_7APvC zmpEJ;I*2}qV!|~^d80|9DB8fN+wjCT(2`*ztKtzfg3k$okyeqEvhD4EU|n{pR|GQ_ulc zpawj7dw{!gIKnk31cT{g`t-($%HLWfyx1$Rm5`L>thfQjs~3PtR3J?w$hAcsl_$3afw-s}%03~tDTu=1kZ%S4Qv-%pYd z6!3)dl5fjW+X}Grw%7OhyEF1UH8LXW*FY*Z@Y`?TRXyHaQ|P z979NP=SWHrg*c<+03@G~IKntBhTMkqt`hdhj*&J_Xy7mnh1;)f)@5?ijL3OS| zvF#MnK-N-Hoz@EnI9lb$^+mzAHsob;NE{W{>?PV-wb~DcwYp}0lNv`Aa=gSqth$Dh z!m2Uc3o+%Xlz^7qJ=3hZ5(0bHRG<9NdRRq(2(ygE5WoYQZHePc%}wzLvQGJV+Q3R$jAI<%$5Or;V^5S!UuHp1g>@v=S^ zZx9q#yyqFTsJ>R%7rpsnOi_|@Q=i!e2q6pgOJ|} zvXZ3VksW<7jnWfb_NSgx%1zf_c0{7HPif1kDOT?r3ny1Fo~GOM>wG^=5qatgKED+( zw{^bp@=_I6nt<40hLV=johV2a2^|0(1|EAXY@unVzGS8($`DZTHdL;*>HfIeNm3Ei z7W<_oH3`-LLy06`WgIgscFhte-^gl~BWWq|4UMdP@X~F{EaOr`xnh%|#HOSaMz8v% z{{WUGFv3KJ{T~&~xlq6MdkEn1=v7ma**CIv19fhz5=Fed*s(BxxLj^JgpBb@0@j2A zwI~uT&wum7;9S`s9HWRyE~uN?0SOl;%$}Vv`*csWh zDXk||l8c>1LZLCPP#Cd+NhuQ}e=%>0Sw-%9oMNNe0DkA>?FFr@`!>wmZB*Jnb z7n1TxONuK<1cC@4f;#zM*V7AYa$0;`PZSzn0a_H|bonG$0yeoP$YW)hTqi_U7Cce5 zH@(%4*fQ<>DLK!P0(S4T&>!7bda;L02?1ZewP=<$mmQsnq=0RGtPUIpr99G$DI^j+K=a@5 z#?tOlS3uKZE_e9hEbB{SxD-uk7AZWo8=NSoL9$MLwDT4h{IRf8%sC{-*-q&q#Pi?r z!hue)#_1W>fF|i6^7rAujAV$5jRh5Lx#fPR8$>okYh4sl3aoVyIRod7hgm;xfbPdN zX*cxS-Hj8_m%qVeHUL$wZ}{Uxq`CblCm1%kDgX|>akYX#imo^|JvwcTCHqi{I~_cg zweQm#1f}oaM4syiT1rLAPWByew!b@+^BiuH-HMIKN^W;24+oU!dE$vW+>#0s?2z96 z5a9#mdtps*;t^5LWZIjNs1?Opvq7NMl6m_~7G2xkq?6R(=y7S`7LD$2VI$C@KeUTR zNy|pa*?Nry6#6|DnMsN_?xHZ3QF1?ZAOqI{)naqZ{{U$-%a2Jn^+HXG%=qXq` zgU|K(;kT5`ma$80DI(yNU&{@k9NsC{OI6m+t^E35d_MrW%j+c%UM$nlx(}4u_I$6F znbf7}fgf@-li&xJPI&rJvyI)usoT*TKImRa$4%>HKfLc$=1NL#Nf?dOW zqbhF?7q@?+XjTrKt1n_dnT@}=1}T_{B9-66@KdnGUw5>f6$>ayf$g@P|f!b>A?3RJA_Q%_dI(j9v#%h#u|nPsBDhYC>Ba?va)v&f#(_Mr3g4QuOtVc4d|~ z>HHHPBdJF%QCk;MQ5mFslZN(r?li0x&<{jLCHiL`vXt{u7IMyG*-8FkAOq$0YQ}9V zPxgI5Kd|q*Qt*@g>M!DNs8ww&>A3L$y@xz`01>*oU~)Sh9v}e+rC&s8Dj= zT{a{b40QgW$FufKpX|zTb@pugp>VuZe|VGlkE(MZBqZM8Z+uu#4Rx|vN^f#T-gv~I z;CVe~XY(ht`UJ^R2=M6x;f{NCFW8bN#WG>e zj*_I8S#_{M-sJ=gZ$orVJiD%Ix(}k@cWErWD=&7YsS1@j=o2b5s%=7(E!Gn9B@bm` z4jBm7W>Ue3_J=}lNhl{_k4@}_K$BDsqk&qL4P>(+ZIQY_S3UgkGHFA#3RuF|YMnG({{U2o zq=iW>U+zduKhInoo+mEjW&as`6s&zx8YYUme*3#TYqdui6M9%Habr1b*OP3bJfC_Xg zH_aVzo@v?$YJ=}E0Cwl( znX{I-vCy{4cRENQ5Jj;EP6M4~F<|3m1ZVuk^@}nnD~UNSM_hSocr-np$_^kl7kl@X zAzE!8+3mUnR)2Q40{{Yh)Xu5h?0k9zix10x6$yl|`i9Id2 z=5V%w#cp{EnUc$H6xf1NkV;4d?_qwNzg||x8xv$X1Td6&r8u`1035=G= z!t)91=87uEa-I6$8gU~cl#4lS`T1{!3zfEultFuweYtIZSWjIK#mw zIs%5kjkmvCEu}b;-U7-5*xvi$#Pn`Dq}NJ?zy{y0c-kR{EYTIJB>Jv;xrF3vySdJYvLIYd1F|ept&V_dv-KaeJN8M@#j_)6HnI zDxgUsz#DYwj7~JzB90{LAZ(jkd?-4}%iLWC7*Fnt^&=U+Y_okBZLhm> zlCud)Kv#$oExsCY%?&9$5aj_#zUsK?jRJ??H(CS&2P5Qhf!0cVmdq^Z(sf*&{P3;y zp3v|@wv%wKTMOT(@xlsBju)~}X(Mo*Jgv_d3Kj0SB{}HpCBzF6xCZ>W`eEp8d#GGM zU8F9JW!9A4287!C&0fPC9b23CY3FWu`d@?q+BjR##{JN|kBVngDd}^)N?Nk7eZ}!3 zycBq4mZ~bS2YcgpNjD{FxxYMO&BMig%z9qf#F9xw>XKIil~tUL-DWGUq!DqbAe8Op zs@TyfakLizx6azw%am4>p7GDDVZeVpLtkz$jma9pP z8bgi6XfpD`ZW65tRlZ6eBk78DSTClKG-L(66x3Nf5|i?iUD9EpT7U&TISsJR&9>2} zD;;+doqS_SMnJ9&fbSU}{72oV%RGsJCU&%#ff&1%Yr3Y-2@}qmXJ>?+m->W zIax?hwBGy^Dpr?aIHatt1vW~QHzSz-7@^|88=tLJmYmwkG~vTx+`mdB59m|N-HJxV z#dts?MJtP+&bzfsb=o93nL4$pvfHMgBo(Kb`!!nQt}XjbK^yR@mfFYJ$@dD5!{;rj zVIQq$KzD4pr?#V-QdQ8E-!u2m5;%3Oinim!5tQV@QD z-J{}`oL!JCO|5K6%$jSmP>y~#t;W5F^HlWyy7A|}Q_b-PpWGg$TsC}O0#8Zw367aa>HJ*VlcI%nFvHLIQUMSS)b?zTiskKQlT(DqDL_Yo+ z-)-mNKIOu+m9wpgXr|NWnXFQxp-Ee+ zFOUMBea6{QLN12T!V(loRkzA!+|%No%94iu#w(kVx?JOs=I{*{weuvBZK8pW7l{m# z+j-p=Aav$)KMaRTyabdg;Xn`$IWY=AHFFQPcW5ln!WkKMpFyM1s%EIUVfQ92&{+)z zY!Vw#zOMM*=k`UqiZv|>W>#6F zpLbvoTse9wH#jsQn>KyLZABB~w8iAG8xd(~-L(F_v;hie6o9feO(z6XRJh+2H zO-N;O+e*;1wv^o}Ng~H>&Nqr+6fn@uByj-X#}IQQ5Yj=+m6)Wmv92*2SQF&BXIrIZ zh*cV_ccaIE<8t0>Tq$LxBqe1a7S-M)n+saWzot0EE|I`nJh~NfpB$=a4!-wj_=^=C z8eK8^gc^MZH!^B6BqgRJ)FL??y5ZR<4WW=#E(oIt8kDiYx7ZN&!c`a^3%R+R-dRbZqo3L1dAR0!CEi0oGb zVRq|=>&H%BM#*lM4a6E9O)E({aOyp|<=m}SNzK&E*-L7rOMDiTN*0AC%56Z{gYm@% zzK%#F+>=nK_@x8jkg@7Yf;AMI{Es{WpyjYjKUS`)gB7q!!b-)DmA}stxGfv!n6ZO9 z5~J#B#4X=FD!b=wxXf`;s;S9_avnqn8}HN^D8G0W-Tl(#z)rqlB&U(YyQFgogxf6i z7rvVGb#&&cJgU@tTpl|NgPKQu!So+RLbV1I0V+r;|(EI)vA~z$VEwXRRXo9U;RGaKdd7m%W zmJ4I9jBNHuE=F60#UzJPPRIh}jk$gpD|i9co@{_-PN=ulQm$9*uc44N3$|_N)LZ^N zHpc$Y-7d5$*d1pZCjClfB9etFtz4i}ia;mYCNke%M_Pfu>i9aIo@o!}nB&+9^b2CF zTH2Jwoy9H@(zPmg6LKp~+;~w@Y0^BvOJ!e(>5001nZ;y0%6I<&NglRMJ~>`4#*^Gr zT4!kP14KgGIqc85*h8kue^&c)Xb m4}nuZRaR$+UAdn+o>wu83DFEqvo6Pdtq>L z5&|MZ&()Rn?&)o`lkV>wPfPT{@$ylMyz|Q&{KWb+r((kDbbY*jNufWs-dhJvCx*F| z;01!ERM=!%{zyxQ60mYRQE$#vGcmlI zmU?GugIN;p~hKl6?H>v5Kmv%#~ZTUl6$K~?m6QV={eT`Ci~o+ zEuks8q_nL_}Y1m%@$jL+Q5;| z7>H|nQF5_sl0C#@2u={bO0JZG0)ZRj7F~S1Y1k2Mv7iMGe>`r<1)1ct-Vo?Qu9S7# zo-`7g&;|8F18p`Jw*2vzNFytw?Vk`=isP6VNf{v=I430~%PeXs(k^xv_~H0YY976} zYUza;3HqXt3KH4&Har1W3Hv1Ne0?&b7KMZNTj&>&>ob-8r_`k)gpWgq>=3(wXj>K~ zJ8gvr5u!lOo7k-~S% z$LOaH4>SJ&z0uTMC{iLxAvXK>KoKPmSDfqd%82TvXm*Zwsp?8cEKtqOI;UTvtc`_`>PO^G8JCh8 zDEZV<$qDLp$AP!1+k^wldUt4YvDA4Y}SVpR=(F7_3(chE&38SQ_EH z4(?++Tc=ee2x*1erurq*AgKfopDW^OQ*m@Hj*1@7#YU|iT)&$S{A$Mo%s;q3 z^OfIb^MH=OfKl9si%pWEuge_hfUIIBrlh*#NK$}a<7;1hI3jX=wA~3g%Ihn*QBS(} z0~#@u=dwHzcdIQFD?-3F^R^o-%_*@CDYm-MD^M!ZY(d8DV4wC&jekh=lsQo0%C-bQ zQ5%oyMnA(Ih2%-L6ZSX!%I^m@iM^|tN|u8i4V`y$WO)!yp45q|31;>o-*7!Rd!(nC}J7Z`UX&C@L z0v>}Fno5=#9$nf=;nr1Z?Fp0IbBo#9OKdu1cd>$GT|vbJ!j6mgpLmm}&iLWnpw@2V zQ;Yu3+-mLn1y~z|K-1CwVxayKRIAk9ETWwzt5uT;a3pGJ*OauCg*RirG67cm^Z8OX zk|^ImBn<%X4-D5woCztr7eeXIg`q0t%b!eh5~*_w8!0a$HkWG;q=BSFc&D!SPC7Rw z><+}-)B6tfYN>5o%V~ihv^@%;9j`d~lXwA0GV^6OG*uT^tvMPCRK^={Yh{!;zV%Kx zfPtlHx8AwF!xOkp#nodtgz-|jt@e46;zp*z)&~AcA8o1Rp9hLK^(DIGIG(l4%IDG} zmI|##;YMedW8pJutTTUjLR=%}anR{G3932EQqkDgA5zuE-Ln%DoP)rq!l(3uX2*)f zCxy9IC_9BpslM?Z@RT+@xng#okh*dDT9+Qiw0e?=hh@MwbceVPnvQCnxj5A@pJS)+ z?v5>Mw@Y#3^Ks%2%M+CRjNvTF?vdZj2cuDK+2~vE5_|{2Fe+WFJ3v%<=kZMkq#9gC ze-he{Up!9J@?C;6F%I>=Y0f@ZvNf1pI2kw0{L>1j#~v}&ZkmS_^Yg%s(wdUmPsy!5 zSeS!jaG1TyW_Pq7Tc?E-BQ3$|m@}*Oy0X-|t$MvxaPu;14?A(|t!EREQ9&2znAY|* z4?v%6Oc{U&a7e|5E*ueASsi<{-`{&+!uM%(@o_?=s0$<%?Z_K`SV;?4y4dbKMJm7b zhBaWZ2rNPI04-n+GV4|gH%e4)NEi9xC4lCYam9O;TF*ma<S|@e)jxRT{1p zqz})_3v5KEqo*kfMP&&jl>u+V_5OHs*rMa5NiJ5~x%*3zzdShFbW>?h0E;TLf;5sZ z(|cnK1w7hP7N)4#u-1!n=g$fDWpCJ>R(`Na(n7%;hW`MT5a`m3XmL3z=xWD&o12>-JYrK?Znf`iVhOiBjxlELYdX?wE&%*3j9B1?4vj!`^4!}RDZ3#% z(r;^@dF728EC4L2!azEZn_H>(VK*0A4FMQJ!L`(+*mM1SaDq}?5rixiC;@TOx@YuYzHE23B9q4OHu~mKp*3c zB<_TjQAVj07J#rbgQQ!SZHJbB5S(F3Yi)kSM~JVcJtruzw5a!$9Vs`ql_o5Oi~j(C zi()Gob)h}}g-OwiJa#P4s(n#Pep)nlsYcp`!0C$JWlyk)(hdq*Hg7lti$;?oQHYiR zam`679=Eu|SX9PFU#%)u*x7H!7F8F9Zbwi$lKfPw(A<;w?T&xTlZe^5e~Lf6r?xKt z0NPCKK8lUIWUSi9o;*qypd)S3h^e_7r_SDZL(c?>K?-r;<4QTXq889dhbK^1HG-7d z-nX&$Umf@IZ2ti3(tqhY?=f-Fi6au?-rxI)Q68}3Kvgx=w7 z8!Ty2*aLC-;TN`x2SRo;;iAh{{{ZrJ+vS0321;I+MHRQDIcc?ogcBcG7U`f7vBeJ+ zJCVDmKd&V{00qgF!^ZonJu$uRmVo(fir&WG*^jO${98E*Q$17zlk0aDPq0Y`{{XhE zTDGCq!m5e?0Hr6~a*f0P0OBM5Ci;a$9%sZ(l|!@LR1c$Z+*RR>xM-- z8+$nMS0^kbKgpcBQoivX!5o9<_G#;nr<)&^5l9F9DE{MDm+?V*sY(5(ed3%=G~h{R+fINzC{9DVlcL*HB(@j*qS>E|8r{aM=2I zFZkOW!)V~Nt#IN;s8$*$^BvfplzMDwLBz;ttsT;o05-7zV#`N{M(myxbdwR7P}dk> zQ}X<{)8x!*Ein;wE!Vs$N7^V@k<9PNZ|RN;%H9A)Y(^n)S>b0LPg=U{wC;Ng0J$el z;Heg9?G?ewLY`uyn;e3KgLmA3`d9~jyTY4X=}`jR4mqqFBV8c8ZNE@CuB8Ud*V}lq z!>qUFk)O;kYVl)LDRU!Ksghd{N07sTZN7zaQdBk|{{WryG0rBt1h>;sC*G>ZU%= z`9nEpT-O$6P06*)%{G%dnL0$6?JZHMOo=H|k4jilP=?UkF1Sj~v?0T5AdvyJRn*W^ z80usV5(hL3*tqVS28&#hNEvY>q$pQn6E-(G9NYk~weNBP1l(I9NXV0IEt>8RXI?G# zzs*_SBEip2*LDddsPIhEifF0F+>)(MEu?wNO%}uK>R8Wwh zgEj&I)p9?d$LC-#$Zw0jGzp`STT$wS!!n!`(C{{YIWJ6J!u zkK!K{#V@7BDn?Y{o2jr;b9F@`-n8U@=*MZ~b-?2_w@CDI;d8FJ zby#S*^2egUzDu5)@I+L#s2jP<%~r_JTkmE+|}{R>s^GfLZRz0E7#AoM0D9LZOgFz#ROq{{ReX z>p^U$5zRoPj(ctM#+KXy5Cz*xG?ah_HqNz+Z3|J;dmkZuBk zK33g&AS%IH?RnXvi(RT^n$H`!b4stnc*m0w`ow13Wx--ntGQroM%UW*Ha7z0P08*# zG)_pb@_^4q{!)9)*J_SlU4X9?ZxOWsvUS)v! zA&QkL@_%wuO1b`;?qQZ--YhXi&~QBwku z->W{IN4j{QEhl60!=fNjjinkCmhF8gHd6H15H2?#JSvrlb4S>&g%RRGR{P(r_QMZ_GU_z6)m$uW;Xa)QrUi6hAstQ7g*7DuyQGeH z2Mzl)flh6EEmcyr!E=8@%wao)C{wLfJx5KKrruc3W>T9>7Cy5&NJ@zJ9(d7@r7PGi zMMR_@7`fY@ENDg4x9kT5Dr#5ol>$Jv=kdY}VI;teH#83o>_3hhEg>QCkc^3!84TNfLwEM4PW>$o zNx1d@05gbeYxD$;zeOcO`a$)?Cf0k^A1zcAKJZPy?_!&@uic}0`qfO(i^|Zp$s@-F zM5y?azn=JWX5DyhlUyM_p zAT5Ma5BpL5#;-8qt^V~d@jLGn@}?vSi38V{LyG~?2@^cLCZ~wyf>{5B5QBNGLoiJ;InLl6+*RbHNonFmSan~irH}NH9)hT5uQLpAg+iOKCRzK3M5ojl;U? zmh^7?RYR1AmeNC(k9XY)g0(G8G=!^CER)Y~KA7z!5vOt+GM}nIO-Jm1^CcaV6m*Rxd|Fb_$Z>PsFM*fdpDVdh3{|8 zhPL2XUc~Uo9-P$!Y$2$kduSX%BwY2~$Il_&1=n+PdveVQ&{Z6u&w+Fy2IkXadXF!| z7R+tLQoh|^cq+yg+srSeo%c$F_}bIW@dq?7P_Z$rPU7Ck&-&xfyrWzDW@~=$+-m9D zDDUo2-=pGHIH0X(j8tS1sXomsuH$4A{69k!EL{HpaB=Hz#ddrw{{VJ!zBHhopK_~GoYa?)mwd!ixdtSGxAsBDN#v#9?{WMO^a`uBWBaG`H{!U<^tiONvyEFEYiUt*Gl8*X{q8n+d&w@ra05C=jBENHgt7E!AA)->`y>?vXuHdf_V z`0tHa4Hgx;hQh>=YgqOF06ZbA&oq)WTW!m5I^X!=TelgloA5&Yo`Nrbh4G-l!EWma zZ6N_#l9FycN52Uz=~`$OUjo=ruu_KL?~MlBB)QgJ-jM23CK; z)K{X<61;wPm3hfIqK`6QWpx>1t10(`2t$vBROBVT^raxIrL-j>O1ZEb?r-#iWgJB) z1mpt;gl+k+KI1^M5uKa?`TW&akg9QcXnHhKQlc(f2Y*9sT^c0<;GYx$&nsk#gG@!# zhGVj^)hz`4_QING(39eucoxtjja6NYQexDm#y@xLNvcFPuKVx~^_6YF%p7mqwQ8Yr9->-PpD2vy{dadLhi#{Q_RXEU>shdZ+6%MUQRGKX6kmM zW935}Z~X5Tx8xJH7j3@Luo4mcxJ;Dt0och`!uP~v+>qcBYYn>!ly59J$QhO&K_FA@ex1d`-HpPmE*h=Q+aBsKcuH^x}4$a3VK8OQ8TJf`^Sa3VnTX` zWw@RF1;!#~{G90zxl-=%6og#0M|--I_dI-A@lKe3N-|VZDm)`_e)n@w{{Yl|aTzz{ z;P2F`@oGJsv{4^=o#VZSoAPn4Z|Le6dqGFV z%Kj8iY>8Jc>pG6UeEAHJ=jXc{sWghM9ujy(4# z(-FjHlF@HMKN>-~@dYc?-v0b0Aji^{T3>`;V|~28TVbamoXqn_nqHL=WdaW@B@E`} zno()XtczJ(uY5G%5YA!IJLnDs$!w0Kethsn0sQlAt;ec@#aqlPswg0&D*M=5<&UQ_ zd&cX>hg%_11?_S--)*pqt&LFD78`nRg*2P3*=z;K8-ZbkgT-xb0&bu@L=m<5ZH+kJ zMTsE}#D2c8+@&sC5C*2wwCN~uPA~BO_ynPY$UjG2sNKK9;jTv5~PUpk_0E-l@t#zoLezjAWaQyQ^ zHIfnHfnY(@lYfp3*@5|hn6CH8L-E;G$mDIj<}Y!%*?8wXzsLs95z{1p=2k6*ZSeuv zehQAPf~AWK`|;v{a$V*&V0?x>vvtF)2v{u=PM`_*<7-ZB^jawi z+*l8B#*#oFrvu}yNG7wO3viIKc)rMl0D zCguQaf8Foi-~A(r?X&~?7PJ2VN>8{@SYZD1AMp=WMpm1xL@M7LeB_HjyUHlzU^qzx zfO*@d4<6}Gwf!o`Cx~=Bvz6%zmf?{6Xi*t`xZt{^I;PMAdQ)?(>Q3D+y|MI{0L7q( z3Z@S?A~@~`TN{^B^VHZplT^rbxpC;q=QQf1aT_l=R)tPWue~+tEyL2oYIrj8loAr+ zk_u8aC?@3E{^N=Gv1(Zzxf`x{+dFoVk``PdSb&kj!>=p$C&hA7(H>F?R#|Oqk#Fk) zEI$l4DZ7a3_#`tR3XU^-dW|7mz*svDd!%BAv_GlZJ_=@(Ep^z*7#}bpK`qH zRFWBYBh+R;h-c}Sdf;Db9Fv;sS*jLYq*J@v)h;L3GG|F~w%LlvOG{LVFE~P5Xa^fM zRg|*fLIRaxh`o-d+-uC5J1b{=@8*777h1*f*dnH5!Lcsp@ys0uO|!oYy3|M6ql&bS z9cL<2-&a!0QeT!F?@-M6+8=cxXDfj;*n^`ZXN|PPHGTGci7y^y)TTa-4?LxXLl& z&?6*}bdhH0nzY;>+6LD3!-o!ITV}?<95mc$*RUUww$O4_M=AD?sV)HC{{WtMh30Rf zuQ`{B)AOJ$GYc}(1FBJXPGp+ka2uT=w5g_&kpo>hNBzekasZ=1T)&~lsu^w)rAQS?X?EPdq)VXNt&PSXWmDae zBHe@GWjkGg(m9^Z!NeYy7k&Ni2<(crQ#3OYja!c&Fvj+n%%!}=0u^zFgReHyl$1)1 z-lTxL_+1w%7dU(SSki}07DmM7cHecRi`!e;+4p9-&Ogbm8>wZL!r_hHwYdKPRlJqo z&Ob#KNt^8nqC()z?sm}%wU%B{{@f{(Bai<8#Rw=tnAv_HeN3bbGurb}7rV zH)n1ui5iL*2sW#0DjQc*U+m)4<;DZ z1T2WLkKG5|eYo^E3#ikEl^T|8;=~$@6sbt3Gag2_d#MOYO42mlFEZHz;ERo0n{~wl zYNCb;BF4%bNg8!`m29ViYIsX37+8A9K7_7@z+p*k2?X-K*yKgos=TJ2!~)hnX4Cu|{-Qz4_{<$RGJ}+<75= zwvM!Z=lR;Bv5|ZwpTytuE(f}uw>nev$EI~$_}@T^mfKKLwJe@pMmK!Zgs+kt<_*Uy zbH>iOCYx;6!8RtzHw1xvVo`TO-&h3g$Zjvo3fwCCjMYdK8|cA4+y&Eyzw<+rTk{)p_3H`troo-RZZ z)G2;>BFb}6u>c)JfHx-?+ud#MwyQwsBn`ng8=K$P3%09Q1T8g=wj%!khwAvy2K`G! zf*0Dqsz|v9Zg^5qBSapvainc~*lmnRzE>+9>bA0!Itw0n#DUPXFRw@%+&1&Z!*g44 zzDw_DUH4kY#~QNM@gl`%+8wpRm)85^YY$S-wYZ&vkDnpFFf!``%4IonN$Jkqv4%bi zch2i^F=YPD@x9Zk*mbm98LF~Uq%;qoo-^6pEnFh&l%*X+6#4IkwU%1C_()eER+H~L zVOybQ@kg0-uIg2kkU%%y*Z%;967Ge&+~&O%b{mkEyDjjkuegM_IadR36xWsi0Gl2J z<)x(UHB*j$@7!wkCnxZ3;B=N6vi|_%a=1;(DkPFNus-~9mom!QSQ5=XL0+;6x8;H5 zYGzgdoUxXj2;B1ga4Uvr8JbdqR=6Qvg2$c`g_9tZ8c+czZ8rIOVTUFQZk`!zt;&p6 zfC^IFN=1(QkUx$J!+|w1v}Q}2>J%0<&>?Z^E34KZ*;fP`5zfSa!yis%_s?lwG&VXT z2||dt7e07xY#`SA46O)yOlDj=q%G8>tKdz}Hb@CpHo9v|&J5G%$&WS*iH_R8rD4F~ z3H{|GoxM^{IKD5-Oc-`2E**$dIjv!^b4YO`Q0>A2-6JqsU6)=+#!#0qJUDhE@hcR` z6XVT+HZ;hSSq+7>twcK3r6egMa7q^F7a#LPW2j);Lq&+dDyU@=MtF_~pp8N5ASMhh z0?r>_9th+%JBiixGQ(uDmXgr}#ImN!$_ercQULTP5{~BG!|Uj$jLwm*Y0)5OiE(7r zvP=N}sfbM3N=i~nN=!!^xj(WATI>$zyI<~;h5!Co9hY z+!Y`Te`sQvwg>x*`@K}w8Jz6}DrA%o=4RKjl7AdX?ABD_4O(vGPmgOU$xYhUGupqH z@HviQ`9aBS1*4``dOcb;382^K`zPS3F(%|*=X3GLi!QsfTg0uIZACyRTwl)^N>iLH ztd|1X2ER}`iX0FY5-seJ69s*KyOnK$)?}q>YS*~{6LLW7?!p7XSYzl*a}jDZ z^rvIo5I6of9&^nqMghS@ZBD1kqD-ez>Xd2osYn#r?KJva5Z_BkDrwM@bSxyLRyH6M zn^@wPJg`ji;NV))LAIdlWAW%2=%%0?;2aH)qCv`XvoxyaWn(kBm%goXT4Yg;G`<#K z7TZe1l{jo)?|Z{`#*Gd&VSQM9R>>r;g~V9e#B?6tkVjXxHnav_{BLb@=;5Zhr%{bk z%lt2_&CJ=ZZf1Ro9GB%x`nfD?Pj}1 zbrWnH!Z%N1AV-pbc)e6}Kjw_D!#u5?myvT6>X+KN4qD^SnGQP$O2QdZJU}ht3WAc5 zg(<*PQnZhCVN7(C(8vYG>qU+2?lcSep0RVqQ84hx#Lsr4J_LKjo}QthH_1x8JyhNt zXWko9a^7T(M#<=!%vr3eko3vb7c?Y>SzBr)ZK~Qn;3*{_6>c#l(k*LWk^UH`?F&E*?FvGR^}7?ZE{iHX z$Q}i3eb}>5o5mD0^8&guq9&8@)(ng>VFK)HLKGga8Zf&OVLbp_*wMLsyOdPo&GO}8B zu_Tw409hqUYAR%lsHnCIQk@}7Qtp@r5B6Ba^?x|=*B~#^)!)GfisB8LxI+VrqK|km zoQ~+?MwfB&=qr%cn^GJL2^Rn>P&Oj`u1>?x7W!In_U@F;RZFAi4jl^3 zRhb#-=e1GRc@ zl6|s{W;&8@e^&LIXXC@N%diQg0d`no%;N~c;;KiR$b0F zDk~M+98M~y2FmLWypuyZ@P{aA2}Ld&BoYFMN&vS1031bqx$v6@5*5p5_PV-E_sR26 zK5OB92A4^#RcY*?xfH>4XmltGON?(})0x1d9drKxHs@*_Kf>kF-;2~uMI>+iX1j+` z`nIdK5)o(KCuHk5QPzVc%4X-_b2eCzkv1!KXabaYO0O^*G)!*tc`+Bi>2F`(Az5u!a)n$$RT25OR^ zExAt*3q|%`Zoz6rfL+t9f%|*iA;e~iavo0Yx=6LVK1!91(NOA9fvc?uMz7+j)}=r?TttLFZ-odwxPe=&H7vFxfArbC>|K)HCthw zkU4cU>3HtEkq~y8w13P6S=+hJH7X@ zu*NJeAgy;cB{uWe?}fJDtvFe3*tsaOfxgDwaFW{#m7r`Fz=9N11<#qW{cVLfXJwn* z0^Jt`Cr!8HH|hA}ZXGE=*cI?z`bak)juKmBm+NJnM@b-DpHYORxFK~|4+tGe2Kd+) zNe&}bol3HG0-N0UPa%!&tbNu&wbU+tJn);O-FsB4$SlO1>WBdm-B{n1n^FE4`CAKa z;5tb43(`m2J%`bFug>vfMyj_ViS5o*v?_C{LwbFg#Xjq0r5l|@C6q5$D>^)l{+rDqQo8OhhXGjP zPHS5NJT|x+50*0PKmtY9iVEBjr=78Ytv^hbw*^X__8 z8lpYhC#+{lLR4%~VX*%IJV&_Zqa<#5BgkFCRmdDA4E7_4di#(FC(h#t-OUf|v$AyM z+LGecX-=diYfPp91a5BPcEOl1{`&zRw$Hp2PAg5nPVxJv(5{rl;ta`kgj2q&gxZ{O z!L2SWT(upOZy*mnuj#+gnysSUB<_pdc}2T219zP4tP_D5s2K^1?NR&UH%o2mDfieL zd~gHyifoQe2dn*k7RCz@X*MR^X%8{Sq172hOEp2Kt|K=}Or(NSdjc$-u5E22Q9p(u z>#+^@Z^&}#)63}egl!cmV|N(b`ZfCN^-cQ3nsq-fnMMUOD&=^oRE0{ML{vv$YEzB@ za2Z8_S9r6wyOiPwX?7|MW|#Zb*}6HeI)SPFUZ)=pIt((*+In`GBzdq6%#w5h*^_*L z0Oz8GDLJ|{@lVbCJIpXMEeGpX;>no|_0|Zvh`;W zrXon}McPj6fPSeO-uaA;z2Hc?kjHjfw+)-@ScEQ$T%1_(XIbsLPk)Y}fI^dLyib)) zr#l-lJh{^!SS*{VGO8+RrnM~vAOM7cy3d3Hl1SejzY^twmGJ&;#J6>?E&!6-o7meR z;gHNsML_;x8=B~&PNY8wo=+lXmA3iKspg? z*eA@WnOp;uoD&g^u)#PIyY*Z*NjKqsY0rRhJxFsC+WP>OxSXOg%z?GbjR0@`VToO` zKipl~JyBRlyv_RHm-1pjF##Her0hJeiJhDO0EP&9%WouZ&D%5;kJ^1l~cbe{H*;y&ZZ6u(P^BpaQ%hC>t z#PB%?O>(AuKk_94Z)<(4c=Vs-!Y_(dJE9@?!sPs>=LAFdz6DxDF(G488g!$~B#=Hh z_L*s1ao{8h`lLr~Eq5-U>{JR$6b1HJxZ1>X^}zGVV6cn1CQ(+>14Or4p1Vo$OF=WZ@Y7J0prhzyNSJ>oy|)01pK#O6NJv4&h_MZxY@~$SO4XvPx9TRMJGy5Kvk@CsCEbK3HaN#p9KP^c@DNx#qvh;x4Do1?FmjEB}y#2YR$^Za+u%aY;xwgoAjx`3s!;P5Ugbs=cyD@`^i) zWx8u}Aje@SASgH)ZEGjUS;mL0iQBE3E$#U)ps*dgx%!3A$ZqR;PtPQqn_E83OFB)v zg8#Xyb3%KXp9e;weU=^}b(@Rkiv)h;>P;FpG zUqbV-#Z0fCxVVlXWz3(Fn}L+_o^Gt6YIVY?S%R`vX%3Xw3$_n*gaV^=teY1!BFB#H zMJ)`G#XGF$jpNYJbiZiRpeugC>=Wy0rK@Cve)ecTtzmPY49%`}An3U-o#Q#*w-|sW zxg;eZro`#FJ04nXy~fAa7mPYww}yZ!zAH}VpJd@@Ue@ax_*%st;m&!kstFl@K|E+Ui3JseCg41;OaQ9>wZb7ZCEaZWBe$ zXruSEDHOJ6gur1-o*a9-Dq##11o%tAAct;zL?mc8^Q1+Qwc+FdTyq5E11*pvbOP-C z?D%G{xJCyK6wDxV^QJhXt1(28Bj(5LEDi>s7fA-ZYWe4Bm zeiy#IABq}U$A0)FAQuSdZSl+S`k&3P4uR+tDTtjuvs_gAR0wN5a6#g9l~Sa(P4=*q z>E9lhW@*wt~T zYx5(|A3m7y(YiP;`ftS9!Io(ahs+6BM~=YDs_#Dxm&Q~v-Wll-ZK zH@RX*A*<%IZ1O^6!+aFdc<#j;wJEQJ7)1@2DxTS-k^ z-EJ?nfIIT|V`0H4p*4>0NH+4lzlItBmA&f=FNhy{+O7u1`(Z_+iCR4*t-x%NYbXwO zSFSWhc3A6OZc2Xe7ge|MV`AXhChOV8Z;&Hp+|MIW32v7)&vuyZ8~zyiWklP}80m9< z@LrU5Og)Rm1%3BbK4zt;{{T31nJ4zPwmzWI#tRxchobYXbBUSc{?olmW;#`=P6Ogm zHUNGYkhWG#I9#nsy}Uq^anI8U0>w5!(S3VaHz@dix5mg#?i(Q0xCZu7H@&&zd{$;M zC^AZ{R1;!(d2-(hP8Nh3m8Ve`BbeLei~%=mWKcTy3AMf0R^eK;iqzbjB<+4!LQSkz zqnYR~Eo>>m%LSLja`^l)jk3{oxGgPY+m~E8S}%2rCuFE@e~vSX+Q9_rI+bDOIb&-U zz-a&jwloh)&ZG&`ax4xMwRMz{&ufl$#x127swzmo@1$++!ZbN28g6r4@9diUk-I+d zuqA1~u&CS5#(&cu8s$-~IQ2>W?|@$E*fw7Rq6B}+XT@50lL#Oc1JcCvAFi0@@aABD|1Rnlt3q#9Yz~Wxt%C< zKCMLF%D(+Rc)+knaGbevS`nBi3o0GbY7taPI|PT&NjCJpB(ON|UkCBC?*$o)MQT}G za^^ekmC#mc`6=0}nI|&S<~$UoAPFIq1fHNB&ctp{D`VTU!9M-%Yh8I*SAxLa#NmmW zGW1JBqSUhMO{rEcxcXAi+xA8EI7Zz--<}$69M2~__>;hQW_to<)vRy5pJrZsJ5!Q0 z3N+VPsLH6ps!wccW#vgtna6zI4@KsOko(`?*+(X>?ire?Q!yXW?axhC0fX3|BDR^YGO zNwgTo*5gjCr~u6<0?iw(4P$Z*9O3`~yNS6cil#XWJ>^Q{$*g2d=LWYa_uWJ7Hd|9~ zC2xh6HK9dpp|@Q_VRq1^tpuj$yd2V_llrPiqb(xfX29TD<8yKa#^Lu`97bZ-Ix7|{ zvla9d+t{QnYY6}ryL&wbb{pC_+fkY7SIO#jMdA|~UbB^5RSC(hN^$pMH{8C<=uC5^ z))nE_J)o5k0af-{8{)-@@_Vzm{TQsLZsYDja|Y=S1P3+j(|M;N2nVc;&mx=J!zqqY ze~92g=sXXI34c4WE}N0*&?+@GRwgvK?YSvU=u`eZiAW~K+D9|X2IAB*QDBkMO(~Vk z-Dzewk9AO_<{H0@w311!9~`4$+y}CcgnPc4r-t0 zF0ye}6y1eL|>>0Gk88JYjX)?2k8KQEOk$_QtNX0Cf-;)0T{{T^|KlaDp3zPDm-^|26c=~=TpFOrwp6ETM=u#*$ z^D?5;SWQX{DDOx^a~)~}ZLdO5l_fwe3)XiC0F{6VIP~5>GvjlEN#}0)WJv58ly@go zn0-zj=2R950e8J@t~t8&lWlzzxAb`Cn)Yqs?r*AQO5SeFN)IrUyHk?m%8cu!)t1m! zq7IhO&`=6CDm@e~Y&C55@|L)kWCo_hd3biH&QtI@S~*~fjxffMUBcuOkvx2D(Phzi zK@k+whRx|tpsfSJZTjuzZ_^TSPT2Y6+S%W+a;L;yqJwh$+B`lZXSS$y7Eu|89z>|G z5{FP<2}tgNzj%)INd)bq!cDJ_F6^!sPe-+K;y84Gz}G#lZHPOs$ZqSr1esqe00~wH z#!M|~9O64~^b-A7GscTW%&@C7*+nTdh8%1>ZFDKF<Yx( z)oo>J0V@hnR)T@1!^O^d!LWv!H)k_cYt<l;68rf{G7OepdH-aPWf{x(8oY0`5k3q?XyuI~@Coi>TZ+_S`tr1f~g zsy7av%|e9ldlq5uRGkkn+KN=9DP<&(O|5&B1z@IDxvm$>w^bDtXZMyCk~0I(=q}|= zOHHv;4kZnxRXcRMnpA`WZO`tcU)_nP5Yq0FPkvTiF2>*vq|weVsVz|(xU-A#^uCY? z{7xvlRa9H-m_JzhDV#bmLyuB)Wz|_M>_`Kq{XD)nwqb6(K}OYBlA}I0LsW`YUG=VG z5!s{5dFL2Tr&(+vK&b!_OMqN}2sa$@McP(8MBR#1}E)MOod(+gI&#W4e4ubb7Fetz8Ci4&G`XfPsudetZ8xE zYNXrK1cWNeQoIRMDAX=N2H@^NzA05(8xws7ADx<+eY@1d9QWB z=m&Phb&xug%R9F>3Tj*-;>QkPaNfC=GX`ahV@6Fqxaow$&Z(xtmK5fofI^AWeTlMe zP2zCvjhdCte3AzXJ3*hia{PEIrX|T%EX+a-N%M&tPA+%cOUN77(Y7ZsYwN`1x)GWK z@@bWl#Dy`brxkwfD>f;;tajXOew@ZU*M`&(TiNhld4@K%z&Kw%&MS{UN{dTgWZ*i` z2?CQSSbz1Daq7-OYJrNZo&~;$iWm#&gKhr+GyJL1k|9(Vl`?;`<_QsF(;>x?J{wK8 zrmJuQk>1nfg$(B#w+c^MJnsPNSraKtk2ix=4dq;@2Ax z;h6++{CF;qa8o#CsBGo-cz2R%96HMEEppqU!cY;=qVnH&NeC7$Elw>*P4y{9nZ@$} z+9?f<&A5VZ?u@nUvgcTp33TV$o#zvo=_LB9g%^yOIryH`GcI3LRI}c#*5@*G05u!< zu1`I=o$=)__G+r(zRlMsP1DD@dLmpI+s54~ONP8<$=Uw^FIB5_itGx*@arq4N`VN6 zB~q54lEYFO*+It=d%<8q3bpQ)S(M3qy&A2E2LLlbac_iDr zkT?GT=&Kzcl9@l>kzb+z0C1|-)DP}1J!92!ujyuOU+i)ty?>VU;kMTv#XB9>l1G2$ z+OPdLedZ{I3zXVg+&6Q;kwR)Uv8ylpClp0t_ zxl)tABfZWR>)Ojk>pBt%DIo7~Y)%jg*b5!X*Q(b&c^^D0crAvKazTci7rJ~L&9=T5 zajI2~(28kEAJGt_z4>i~iwwb9b--U~>!B*!=Z&zW?wef-jfaR_+iYO~rHr*h$0;c( z)2M|V{IQQ5R@hPWqzeS5906T-QdM`NC5?<@;bLvw~^J1WH zpqty{`uwn6(@(e;K&4hHAc22}=wmFC#_M4AueoINu+?q;c-HN;T6wzFM34b0QRlYA zZOmh9R?^EwD4j%-uZGyfgM82~odG(Eugo7jDVnX3Y>)~70dS)ohC`LA}*` z9(EW@o0`xIU1=AwKBnCJ~RAPOgI6;UzRssdnp4mF7-D!l0&KU*AG#NRz~OZG)oF0M&)0I(s9+odUWZhxxVP) z9C{B`&o8AvG9f$yr3e!#zRgx;QDmf{1g#9XhmsS25)|9afwi=qNh5QPi3STzSxnqQ zQTgcF-O>$>oNVTi#kpJqqlWO#>(HEsV6e{0*`^16otw-zITr^o-=sOGzalgWhW!>a zRaY{<8S75RVxZhebU0{Wp(|9f?QNGA9VDLuokSwoH`u5V(-hovv{pTq1UP5WZaCwbF-@_6cZ#zvO1B;v1vjmL$ZsX{m*%i>C4JV(M>*0%T%s3iQ zqU&4FP3}IpKauJ)1`_IsBiz(D`YA#FBhu$_Gla`aUGvR-c&40FTd-9gG6+cuHq;y- z+n5$N81+fpx+guavF6{0Nc(QvE<&JFV$!K)+KcT+ zWxWlcuA?PNR*|t%j^y>*$P7qQV-)+lj2UOb+ME1Q+eJe;t8sa`vqys(6Q_u^mC>>m zaH~aq5nhi0PNhlQr%4Bq7CgMS-($WhJgeg`g8`mebGq}m+w~W;A1_oU4Tcxhv5yzI z`Koo8GNoT7W*RpZvqXm%QLWP3a8jB|%dnhYg*xNQR@#)KV2~`4y02g{P2}%mU5CQz z`t!W^fY)Y<5i zszmxr4C*o&SZ&3r(AY{!)Rp)MwfO*TVQR-Im9ke)Nai)bGyn#Ao03MP+*Bd}ed z!rMk_Fv8&C$1(fXv$^dS1YW?mjrwof%nZ@Q-1yd4rA5dRy^@YVMP* z#1$1I+8T6%cS_Qro7fyp;Bf9UK4@*P!#;!B2+Ze@BSl%43-*s_n4Zq7W`70fFNBmS z*>iyuYCP=HbF60QghQy9p3Gc{pm!s?u&~qO+y!{|xdlq;yIT(C%GI*_-i53D=?JO;2DN0t7qIwICewM?| zknWUR<2!}G`tM|3*2jvxOU-dOJ=!8Y0->6!^+=K+)1)Rl>^(W>8+Elb?aEYzAwcL* zAdR|X>E6%RG~m>1YXhweTrZ?;>LGa>X`qry$!!hB^S@UA0IYhxE2N#3@*f-N`J#iC zE14p_K&Ck7wA3VKDlI{UwwAy*5>Uet%ZFs9?S-h22_O@S^?oh(ILy+P&Wd(uY^)9Mt+%0G~B!U38Yo>Jx&gJaOo2rb|X_=y`=XCnw$)1*i30pcIdzI`G|d+HyFkkJ z$ey#qUi9kDvYRQ(YC^7+D5zh^p(pihOzovhL!cvxC&{R+B0IJHfjzRy(xBF(;nIFq z>x=FT`@#rN)tNZiMciJ^)cB@>lvdz148TKx`^||KNrT>>eylj zH=7QoHsq81L}k_3tav^NBk8BA(Xlii=5Y;Z^CffNGUkXBXY-d3GZV>UF4AL6ug+OW zvRP;?^;-U601q=^k09+Fvf>quq=Pri{n6U@TgvOxH0_Fxfv1Z=9nBf9Bo%zoCcjs! zW(w29)F;S#p(GMiqVuj77L^}8OIG;$B-$p!6CjJ-U=1CO8=~F|&91HXlDb!%+)42y z{8JJ6pU+$=aOu9*Jg`ey%u&yf2?-0RJ&9=ajQPV#>s2<(wF*6OvVt5bJ;ThU)TECs zed)iy90!$Iq@}HEqd7gEZ6tbGdNYyTr>KU8N_HM=UQdW05T|_UkeF^l*@T|#1(0|0 z^!H+ghvMefdi6%g<1c$ZI-u!vXCcc&y&HZxKH&N>W;H?4p0IxDR{U*&)RK|=4)iP0 z-E|~VDjbvc3KzfiapEdBSdPzv`Q+PsTlM@`A^wygRb1eK)2sc_DK_&x^0v2LXC9p7 z9$UdyKY6}^OJGd=%fEFz4?;HtI9SG67RB!Pc921GOnzxJI zDgbp>|%(d2T#7 z*#XKdthrOERO4o68GZ}#ie`q;i41qnq$z#bj>%~%Q3^||(l#R=ZRHl0rSAJ)_d36X{C?8kHYw+xgRbf8N5ZdJ-DabXxsmNdOU3q$bCI=PmyL>c)ua z3C6^ATCS%|1zKu!y*I!7p}+lD-s!T-bOrFJKvnHfr;~0;kc<9*t7A)T4nm2LQU~+s zHrN0jP5%G`zBP2=k1YKb-TP`H&Y?r6ivIwXG}#AA*RwqcRHP|DAULDXWGLf)&~&U3 zxbQ{|8jlcLO zxqQYxW$e>$h|`bzjQhcQ7QEV>_Rk``VowEA-zor(_?z3Gh~w$hSul1lJh`XZ7dxdk z9$vL9?5=$O0A84O$)_9Sfl(?X0bmWSw%FKOAcPq_#*#@F=YQ8cY$TT5VM;bPzfsSY zFmAl&&g04h(Nu78=Pb0X!nXDo{DMj?3zg#qC}H>@*D4kiy;F^Mq|cQ zfVGurNZ4(AoHxBksC4;0#py(U(1#gk=1W-tX)HzR8Kxw*zH=~EA3Va2X*_pV`ME`iuoe;ww`?PgRN`? z5l*0QanGMjDQRSUkCDbKN13=q&+lCMa>k0-E4%%i)!lGghD{4yG^BOknLp=`4{iSd zI~l1xE-UCp#zF=QNDuue{^(XZGrEFR=yt#B$bOC}FJ}cbSfre?zviwe+^D%=Nc6-d zaOWVZfw;Pp8CQWe3fKUBPAkrCstk2rEP;-eAltqqh;%u-m9{-3uxt z1P?vA^ZYP%6&JN66Ke#1A4Vm1>y4K^X+KE}S89=!D}`_kF}Z6CAIbnr$Zc0CAhH0| zosTfuO55@e^522^TEy7 zDOxo*?RT!eNExuX-|p40j<9d&etk*ZBk4%&(;_2Kn#debqefEDmz?&9Yg!r93ewtB zBn@frvUeZ?#Cs7qB!Wrpcdy`ulV)lx9BN|kHva%B=4WY+HB|WVpE!?D$~;z= zM&X5KxT?fVyHIKU5!R+q*;3Mlx`OSJpjMN01HX{y*d8gowk1 z_s8oSe^;2#uY0bP#d7o@z zLd^O!#c2pwQ3<(SuWmz_JnN_Tk(1e2?g>C1cLl5*vb(PGq;@L}?Y zxsP+4GZq`R8iB=1Y)2&+?Hxp}tcPhxa_$cI>8n;(Ot0&9(r3gcG!`9dq)ni zayHQ~zbOqgrKwg(1xh^j{R{_m3{e+205dAQr?me74ysi-)NG}m;Wt%=^Q_H7l;>Q~ z8r7^>NVU^zk`E#4j?{c zLSLhHfy-_2FNO7qyhh3`(_t}L_PH-YTWv*g(FHA~X}BuZ8g|5NJF{{%3-nr3RrYB> zb9aelfd@goem#RwpeuPH#zaqRXeQ3qv1#e+@W`n{ggv0LeGi7YI+a%9_X;WXMk6df z%%r#-X-bgmnpdIFFJ}iMZ)oI-rB;I`PF&)j0xMM*$?uqot4fgK8go;s0F|=Y7xGenYY$>L(V9PH zBci>*7v4PPo#vYporns$c{If%kO`>j7!Ft5IRlYC?tmMWU#k7HDZF5*<;JUgG|rhl z*y&7ps+^5XN0r`dFGrG)q`e$~0ad{kAm3#Y<=ZyLMI=%d83-hEfg##w$GF(o@v`pR zmvT=&8%0^YD@3hvu(1K`+_yIN7rpejzPTGIcY|DF6fCJn&GmZ5P=h|1N@_G#B~=<~ zOqZM@T=#=O>q1L~5s;DrvkN^zHCmmZbZ*~_HO4V!i|2A~<6(b$A$+vI70P*0**DbV z^-QN^j*0RT9M(C_+;1`<97Tx@8^qq%J;R;6AkUf5cD2uxI;9<)%`YrwVN#?pRHq@X zM7q;`!^qL83P41@^%|^;scT*LIF^mBg|yX?=f2`ye8s~_I^QsK%b*;TCM}1-_9+GT z4(I~;IE!X1ZIJ@U%b|B+{NmBO#_`K&!6P zD>U{hl&SHjKy|db+l{CjYDiU-2i;Hv0+YBm2bMl@)YHBh9TbAjX(0B4W9AodWfYLR z*~-$_4hQQo&_+jsO^-38rEjg&G}zp!N&zY9a!)KaZ=!$Mmt+QNd7{@Uaw)Kp8!Ojd zU^I;{Fu2Q0fFxN-A>eFww%6Z$d$(Y*x3>+aZ?bs;HV@U{(fjxE@LXdRql8TT=~@lW zx63}n*W$K+%o!JlIhQpmiJPgu%$6BL@*mf+V_Ep#}!!|MlW^>qGn7d1fHn{ze(cg8_jO)dY9lbsaO3yhlhG5EEX{vIo83qID zLR5yH8tmCA)JCE{=@z}PR@KxtqDXgK8X8VU;OH!D21Cd$8-VPFB87w*1ZJ_O`|{{Ut=_KjYDihO6S zwNTG5)0~vdNb9o{gaF{~^$!lf6oew_7dI+Axt23eUqOpgn#T`l_DLbm0+R#FTr2#D9S#x52F;vDXkUs*FBL0t>yhAr-W$B`-PC_7c(A6U6s-53 zm&M1?z3b0RiK<(g&%Nqg2R{zDVdEs^V(wjH`Y)hVGe%|O zc2lLyqDOIxJY2$AmoK|<^hYlx?%^6#r&bW)Iub5?Hs_8p%D&IdTG$-{8^e5NcW;n& zuhY#drfeqFQ(`vY$1ivfSx$037)=WvV3e!q(3*V1WCa$K$s$Z4fGTZHNf z1D2;8BmV#nJsrqJKx=A&I+5rCv~1Nsw1wi@5$LF;Pd;%oEm4`LLX`YETk%$%A&_;n z;!sFY+#S#j_8|4gTcpG2Dkd**yRXy5N!MXSG(Do~OyHkr?jU7pG$_fWQ7Tkel@?~X z9mP7@-|pf&88>*TMJfATL;<>h0D-nOiQv@mxXBZUbGtfkY`U%$c1@lWG|EO}=DDL( zTT)ism9rgg+2rde7f=toYo301>y2q=H>i3A0~DeA(pxg;{o<_`V|k1J0B!j(56p8x zsKX&QQ3_CTL?>;%ZhW!K`CE+t0Aox*{a1P`J_PrBI*XlI{$RZe!IGlFLROUA4Zaxh z8^&b}_5f4!DAXH;T!kssB`HI15=rUZD1RJX`826)Sllmpup9pXamux{uoqH2t?Km) zkAF$Nd#BfUT`rW-RDkYor^rIWzUXc|p<+4j-hLSLMxP->d%cOd)x%RsHL3mf$JB{^@2TJU%L(dZ-(&?2URE+g50v4HZgULsB zs4fsua+0lE+V&T=DwMlBB}87)vRGyI-eg;{IfO6y|+9=e*XZ30gVlcWxq@1_^k@}9PQ?cz4Yeh^Jf|C!%Az5OpcTD8hH%kv8an9o;T_uZ1^MhvKklgwfUyWY(OKDntWp{WQ3K#d^phzA7*NYxlgN>O4#2bi}rhKmjIO}G@5D37!W zx=#Cphu{Y2x_jIxhO3bB?8yt1^%9Wlq>`n3GBqgPvXqp2URd`cu@Pa@8-?E~bRb2wj>h|roHHs@{P zwnycOD7KoYf6P85Ex3U>Ijy%~`6R=_Y^>@Ef0NXU9(RV(x4@?bf3-D3Ne_wnr5c#H z#?$a37|B^>c(zsZ}%l`laeo5a>;GYegN=umwD5+k5RyxMYjc+j{-lqmIxcNrI&MX9D&oLu}d-X*0`#H_1uHOxB^ta@zl2pWB?Hv4`GR&R|ql?ch#^->@a2lOE zP18HJhHK)N04Q~w^^<5hGPRIbT#W)hwzDBiW;4)2(v>L>0;Q%zc}OlFyqA~&0)=0u zY*x=;xU}MqRymSxoZ9o5X||5u)DUvw4+~nmv2Sf?v!o(cw7S-9jh$m`f%izufdu9> z;(Wf+blQDtWl|RbxJ68vB~Cjf_9WF1rs5`&tyI&{-rzaEEJxGqjE}YI-~AE%mBzDeaVT4nzt>`lq;`S{>fGDok?gt^J+S7E`+j zDFJEKiLINbd#SyJ$!*+o7Ty;QBE;J}9JpVOi=1OyDJCr(wI*;TlJ~n#SIBOgjW5n1 z9y$V_TtDJR0ar5v4xEifk>kHnkL?3Mx{8R-0q3 z@_!Bmg|X9C0W9}6acF4W)L0GSq0F4m9);ZYek^wtp{1{yW|vv6Yfk$^TmT&QG0hGv zb;mfmzycZyv)l{g22!eKO0HU<<+$)3sWjW(*28Rv9TB6OdqSFRpyiUKAT3BL7Dd)? zjz#%IQQ&xn8U3wE6QZb%?Pxd2?SH%ou(1c6(gmytI;cKcIW?9F2=PHEJB}gEySINh z>)nCQe(5f;a1IU3^F_0i_^ZR3KN+%ooQs(!)?&~eml|W9!s=RiPB7?k7E;i(gz5t0 zNw~+9wsUA@!s>-(J3)939N}?i3mzc(uR3AX6VcQ*rW~By!i(niM&VXi%e9=-Q_nn5 z%d|>uK21`4&CZS8MvEPVI_mTY;F1v1h)CT^HXEp7^@45AFNsz%ikdjf-c7H(2A4VS zWB>=#io8k7hAAH~K?{i?qw`lJxPS({Nga!x^t|!G&LK$lD6@N3<(R8;LUmSYw^}dZ zD+$sNcilc9Hy1qd+N7q$u=vELXv@IAxcd&n?Li};*mf(F*w!&w8^60#TT6zPpSbM1 z3bA=Vhx{q0H0h%8KeXZzQU6gahsvb+obn0yA*~f}5F#MlnWfG~>=_PB>rw!7D zBqcTm=KGV&d{6A|&{kGI*;0^f2{$ib8Q5CRkel`eGed~UFmtNtX@iZu#+>by&Du1| z+3Aw9J(w#Qa*(JqxK))X(=(%iO+8bUmt#`ii5I)n>YR}k&hf-tG}vjhsZKcaNw@^4 zP`0kC7^ZussH${K834(iNhd%7wXbWJ5(7Xc$3@6_K&NF)-=A+ZpLM3<q3jVEvna>yEQQ|UGmQtSRx89pR>b!bXw-vtP-j?KBZm?<` zGM5L5l6z?#&vxr~OpkSmxOMk^*0$o|pk>ou+s;bydX`O&dq~U?w|F#{2kwl{MYg#& zCo^@)mt0bN#Hr0BbMbP=reqvfo*F?zM0mFH5n`*Iuf6aD*^8rNb&Z!mK8@L#sP?m< zM4yuz zs8x{?JfsDPa+*YSsyod(hg9Eqpd4lPl#2n6E6d6D8$4`%9dkE>K>Xm`4!0IM=I!0E zHqef|x}FDzK4{HjTGmGUfE^@lc`dMUu`Ii9tDA5LAd*1}rDx97-L5m0Mt()2Swy9R1imYvI9Chu(Fb+l!c4F3$a)Y4HvX@?3SoBlV^?4 z>lmblWh z248WNwvfG2nO21WHz7vX1Yh1U<(AXT({R(^eoKw}S26b)YN~%}>J2A3yOvQM+HGl= z@bv{D)H(vY=SJlu*pN?LT{cF(N?r0UE`9-C_A+4{Z@&}p3ZG`K4*g?Ff~Hi{E_W%~ z#1ZM`i~bw6ra4J<$hx&Qz9$05s48ay>-2hVZ`>amNnf-vzly*Bom=0} z-HWsvbp(<^*+rTipJwa6DQ$+G`>kOj{{SQMR92VSleCQ56(*r0vqx#k?Y`QdYDG4J z!||Vv;){(q(HfMXq&O9;RlGvmW71U{Lxvd-v#Wr0I!hajh&hck(RqrBwn&4cosR?; zXaUsqom7#+?i6;D;dXn=*{%;}PSfu<7JO&iM7*e`{0Lbp0Z1p9Q5^@U#b;_G!|@6k zDWs}|ZvrkP2Qo6$g=)d}k_vZ`yD4kz4)Z_&Up)`Wb#{}-4k96zMOT1)cZh_b1!~J~ z>Q3E7{PE>#zD*j!cWPq(zrd^OU5C6~l7M>DK=*4N(e*m*XDU$pGG`dF7W=T^$(q}+ zRVf>!IDq;Pi>mu4S@)b;DY-hqX&B1MaVA^t4fj zfn&Tnk5&5B80|HayF>o~AtvUIFmTr}C0Y6elSa^z}F)jf4Wnu_E#* zcFpXp@Q{+E=el0Ut=6IC>yGz{xc!<=#pY^(X64d7LUMgEDYtI?#-XrYPRi@${{YCS zf91yZcm5l^qN>5PeTVNA%?NKuN?uAPKH5q`mXNy@_=Sb7?mp|{VZ}DaT`Iz-OqQKU zlOTKD{{T!f>6$nqBcOYZR!gnrO2eD`L#X$686t0N1+Fj&-krIX3{{TNuVyz-IPc68ZVMXAo zsYy2IgqH52dGx^#+NxIAI6FSC#WPRH&NVBZ)_Xo?y!(HZHB-X;;q5AhQiQw-7BYa} z-=_wgcAneUJ|BvXXC-*Sli3Ehd-%RKOVD^1#7cygp+#`l9|L*JFsm zx7cHV=R5iwg_DykVY!w!`hxWH+Ol7>_HX0M;OWzp>K6Bu0sMa)cpG6Ey^Yd0{^M6i z#hq$N4^r{w`iPj4ixh`%&!(FVQmk`w-?6_56BL@l(^REHM$An&b4$~X88u!#Qv zO5#)1EVNPX{ar`n!4)>!dyqO|@`E1gAkZNa3V!$iCkr-BUs8c&C1KSl__nuS!wwR( zGrG|Vy3NhK{#e2i8f=FpTZ@pMy)e-MIM8(~?Z)R(AIBTI(@#p=Zjvq!@%6@uO9tU; zy*=87tDacKjMk2bk+sM-2Kae0u#gRsdH#6Ci9wO}3M8IqD_SrZbQly{TE9j=m#s(Ek4+z=!LY$$h#U3H5!==Zk`g`$B zlpR#7##t#(EGa37Q{f2jNd)u7dk`G{ z1ah^8*iek8L}cY;P3JW!K=01vi{OSwWK?XhxkS<%TEXzH{I8D0f5Flo_eAf|IEOen$TQ!wfo^$%oT9D=}P%n}xcB0DkuPVbqW4 zOg@eJgtDC1OJNEwOJye41giraK1XN}?4Xs}H6zX9j#do43Eay@XTCz3H+*huydPD`xDIm1~fv=BFquQ>rlJ0GU)u+byHH zb=K}rbh#`7+sV2Xg^&)QqzP)eD!rV>W2l+7V0QCJ(aa>=1Jk;Bz~&CO7AJ_q6!;b| z9MiOh2OP$QhT!MY8>rNDFE7=WT!}VRX;Boh z_`{lR4XxG=o*{LNMwzk5ZJxh}!1gAdPpmsY;vQCcQf(94+imq9x?6DfWke2^aGXhE zkTz-=1;14JXX+Iif~HDLQXto5yf5BB_^Zb|vlnSf`un;k0r;s`km^#R+f;>>EEL~R z>Oltg>NmwIiQ|p@56N|$Q*3aiw1HC{`rh8lYNJ}mVjkJCy{e$+Wv)826(}oP>dVfA zHaT5O`=EM~J@~f6QqA6QJ`VXVRacHsI#O9v5htPTC)Hh^$hmPX0>n=j_h4e?fx z+qE{N!duj5ieJ{iZPcKoC=~+2E(h;pfpW96bJo>GQAr!0%^=yN=t&w5f_xR{3feq7 z*>1k7F>4&&(ZfDHAyw`o@DsFUM~-kaIyeKyc4$5Ny{1W^zLJ!gh+V)?xjII!N0(k$ z={qqDaNy3Tk=w4zBXRnqXK|-W6Kb}CI*ei|DIV7_Lgtd!u#b3Hd)-Rbc7W{9#Ei*L z&JEFW^**MC9h98D;g_=M)+>mbLYYQFi1-EW5Cj+A7?5{vXx;&9t&GI4-tyO0d zx_8V0KI6dg*hTks@p=ep^DDP#`LF*22?&GafX* zh4IpPCD{7vTIbi+!syu65Ypjmxx@^+8V%JiXQ)irFifN(lv4zae~Bxj2SX z_L-Q{ZMxir{{U$Gl`g*-rK2un^0kh&z;l2nGo{GBeG1y}8ibsg8#O&8GN{twz>3bP z@q4FOQm*k?Yij@+kE9zCTuJGUdCF|B--ZVZ6LLm~8xg@f_vPRr*T%8TU&epj8 zYxM;i*Yo_!wqbswCFIOgNmFdRd?mqSW9T;<4|XEx@RqV2(Z^NWsCIFYQV5zYe8^Pq zwT$^~@E?oy9`R2+{VYIlKqW-Tki!PnzQGDt!v47HT#?cT?F}4YUvO&wo{{0V;?l#ht)jLrM7Zx&DNc{0DOI{3&w@<*SdR$BMtOKO| z*A-+U@OvUsp+l!rV`hjn=zI`%QvC001@sTM!cCH4?nZCh2iE zCvrQIbtjvtIo(j(Bb}Ynbx$DVnqvuiO7lBNTdlC0=30m+Q!}Pqiw)|_tUDfKj<+VICQ1~h*Be5t^NO(r#q#sP ze`d)xMQTxSm`GTP0O~>5+IGcD( zN0CpOb#6pA(ikg3^;i;Y6Spyl$I_P^)RQWtXSj1q4Q4u;16nHcf{n;3+;g>w#=glJ zxVDM$iBtkC^L3?8vsp@+a!p!k-u6n#9Pzkm*`q?#^|dY|edovUQ#M%QeOi>buf0{5 zDj?ZFxyd%?%b%tzwfG!zgSsqx7h>4{QtGG!cidjiv%~OC$}bWSsCkj2GN((QB@QWq z+LEL^un7q#Z3!R^?02`n0ZoHEIhhAEYOI(yDd78U@L1>Ei(1#W4vzI2xx_5QC&hKz z?C?khWkn<>&ue06;PCE|x<0$q3pqrp60z=?+6(FOPYSMjs7Q>tQER@|$`S&C{0mVd zm=+fuJ{XUt!x+Q5=GjVZM<^cGp9H(WWtg!ir-8B>R}-hhrqa_X=4%ciI*^wfRhTxg zO@ZH`H`r~lwjGNHYo6lXZ^c4Z?fi_Vj1nK06Q@R+o+kM(MxC-HhC43t_0q2Fek*e4&Ld1W_ZRKRgrAj45B$W^eKMP|~5+p4q`6KZV!xLP= zx-XE|!13?RG$Gh>BE%#E>oc#$;=WH8ud#KIOGDu+yX+)7=t%t9k77kgai%3Qj;ty<1 zkV!Z6!U(ZXv}=BZOuFH@)=#D!0SsKWFFU{5ThiFp=v%p3smBN@Dhl+sJZv;A7Zs+W zO@JO}8(Lo>Ab@(GPPoO1MYHxd-;)0T@Qfze-D922E54i_V70=|8>oc@{ZAQ2#`f_3 zzF6?S)xP+pNAaIk^mk`2FzR-@{+mAIRHG$M`!uMLWw{D-tu3Zs04T?GNg*o%K!arz zn-Fiy71)|LgPbnVa~&gy@6k*$nbKqx0HOAcBP!N!mfRrf-wXbkb zTzO|}BBpL9J1E#06I@!*FZ#UFb0p?LI;k2+T-G!^l5NLa(~f;dL|F?$j;#cX@6*!| z@I!Y1q@#-DCN(m@E$5O8=>b67ZhL>j=ZSiX76Fh(RMRvJlabn#(H%u^yj9DX0|2(2 zm(dE?S-cIC{{V(Ea@KUtIh!ZdGR+>M{Vs_>sW{{G=aLj>2aojK;!v_sz*q?$9ZE{B zQg6`U+1O*Gil)GqmyQSBBZm!m_hh(c(QQR^HFLoX&S+zfLfU6-M+;lINm4S`2$IjW zFDhjpXZCY1P=@M&WGhE`d;Q&B5$1Vc5bQ4ZgGWw-PpuHl+8kh=-?TpWx4bpiqrD_d z%X~L#Pq9bI*<~|TEJ6*Yl(KvN@4W7}2gDKMKe|5kuu~fAXBa=q6R_uq&ZvFwZ-0mvBVGeD(Zvp7Lyfu(WK7WgXeZT4v(sPY7X zd+m*w$oHae^pCFY999fp5jp*(_Q?DFCd0(&vd`4dITAY=eS+(bL}dv6Og0p^B%rO7 zZyw=FLQ3t;De)^v-vhdgkNHjG`lGjp*G5V6idGEWQ2ej|05I@2Ja`jp^yG(Tr$KTn zGFi%W*VtTiNkfUs$SQHSAMP7a9mUC8fTbeWRfz*g)rQ|j?F4Nf)7{;!dSztrtg8fr zQ2X4E(j7qFJa`LVqlz&mW?PEPm*nKg4kec4y0!lRCfgt-hl4IFX1qhaP;3!>YPe19 zfrS(JG!N$h^AGC+;m4p~sA`+l+J;?#iNZ$>=mbUf(DIziO)Ef( z5#N$@?h;)>E-$HS(k?-+~-nXL|WRD%1qA;sF>#Dj5Tx^)?W*121dxY?aJS;TpKLE(O9tK_QtZ@lbD z5nj}`nNy`fmFfjAHc~=VhXRnhc!4|Px?=k?OJ7FXO4!5LK)5<-sJ39=Fhz*VHAaUT zYvr0Lz>$x+AnC40g^qaJ}H`0-C4BgQF4L*{!y>HW_Qj zER;JwO3c8ftfik7@36J4x4>e{1VeJp(N*y}hkJeYYp;T~IfIYfXX6Y!ub6Z164#hB z6lzo&YZK+tp{?{nVGbp>T~KKysfahlGTwDu_SQ`s;r*2Z>w;S@jnk7pv@kSg9lvh&pXIiC(oMptD zh3aY4c!w+bVghOk5aQt*ntG%^5LNaZ7OJWv9Ym4H*XkVu4abO6Jx8_w0Bfe7%{9@V zvJP;q)+hrlO<{v6S&qz1Ee|v|85>A0gtRF)mFE zP(~=Bh5pV#AO)qjIy&UM-L}=59u`cP-J$zYQxvbOc&}K5%0-BUnJY22P0re6D&Fi6 z+2b}I)?W7m>ZR&-9tU0TqNH`d?GSv4Rw7`6%O^a=kmS>^>=BnpU~b0_`maCb;L)GSmGP9;*h!+z0m<8Ug)Q zyF1^3$S#U!4l40>g~Ph0VU3^aW?N`kkkeHO$q98f1O=D9IqAOsha4XZ+k9GD#z1Q) z-6xChk?P&IntNvU7YnGwnN2M;RzJ8jJzt8Z*;}-S6l-16)qX7InvzK+2$fNk_(l1E z7XI5~q+^p;on(0<_lL=PSN{M9@KDdpQ9?T-0IB+e#q7xH$8fu zW*j!yZ&FIvYq?Q;-s1WO+g}}1lX{xB;jWshM?AhqTY1IS3~NSL((WfsY`ONw?Bo#b z9aNCSBlb|Sxd1L@d2!GW)r<~0u3^(`5A6UR*164T_nHlX zJPGku*MnR`&9Zwg@a|@4t>!G(nX4#vuTrX2st?MQJe8@Yi?6z+IIxD=2|`MUNG97` z9CMUvcGT@nL+_I;!PQ_1>eQ~K$#%^~+IVFk1GqF61A6W@8VudrkU`U{5@o*AZUvwa zLP~j+dUe=a5mhveV@YnSxy?iv!bYmo@J5RLR&_#eez@vkm82;RrFYe&SfxVNwypj6 zpltkbqox zpMXy}_-H$V=rrTBg z%7)r(I2}l{GU@vBx~x=?V`<^_@lO2E*iK!VO~LN(?(m!m5Cy;{=}$h&wDH%D+&SQG zZKb`$MzcfFX63uF^=wAu6cp5S60o;hx8AU|@paj~kgdgGf7thVueUgn`lR$8g)4>8 z%U0iE4Yz?VH#QctTjO)ktIlKkMSDMG$TarXM$R=AIT%0PvZ$ff5>}*yKG|Q{x#0@Y zsa6V3`{LO_$gVR`_W)V|Ho4xh$Y#@Q#|zkLqI$0gmI6%AXgwLG_9>^e}S6|~y)sCbif17qqhZ+dAe zD~ire;kQ?)Qh(DVg$~>=5(U+fvgba(XCz<#?i<;se|0wLKLmYPHjMQjyjLz7*=SJ$ z-QM5g#M{nDc50R*c&028Zcie48)2kS%_xLiognS_VNs24km5lIuv<^*E4jIlR+#2QMZ&u4!gs!|`8cyu0;7FdI&VC2i_awXb9P{Tva8R~a|Dih$d_ zk^2v`Yje2GMD=pE-zhtIK?3-&Q=V$%*o0-WS$c%+uTo08B=Ozk5Uo@cBKQ62amBqk zd>3FM>dj1&M2Kg*&x>2?m zinNkR+#3SfSEs`oF5;Bdbc>@XhL*w>y*dravI=eKdv0)-I48G38KBo8rZuglI-Trv zxwXDru$J43Q<~7*qN}b+EO!BJmH{V0c||w=&n!B~+k4p!EkU)oC3+5f%$aTA{SL8L zqdJg63|GnuZ_jgncHbE#J0uo(1a+;!h)WT?WVH2Uj@CG-#2VChVbt@6W}g{gB)0Ue zNg#3rTh|RmK?@G^8UgH1)MIs0w{l4d9R#KelB6v}sn-eTd+&oR6k&NL>rms4A0DN8 z9BRu~C45$}7I|GH5Jx+VVIeB#R8;FEf)Yu_?h|%jCw+yjwl=YD0*D66KgSriW%qVX z#*@y&rZHd(aMH%XDgyriEM?XZQPiP9h2O*BUtdgMc41@McMpG}^dZ8y#T+)_>OSE# zc((NLe6irYr+7GxNAbJVucTN*tw|95#(lt~88NlE*M!?r2=qFJ_rx4>SSg%3OAxtz z!1&WDb(TyeHl;K(9D-D>Ithy4NhhW52cJB9ClYuBHQf>U0=X8((oHo3od#qX{{Y1# zU|*|06nc}DY7}}_eVmyIdIWA2S7SKyE^Zw5WHu0_pAZVyZO7$+ABYFOCfhek%)8Eg zsa&G$l|>{F$lh}s?AS;*&w9e+QGOkZu#`21a;8&BJAG9Ia<#vkH;)ryP2Q}U-?sk% zyj9_j-f<-+R;7eMnoYqCD}Da}SHrZB-QXi&Z#)jQ9Od`Y>*!LPUG!|5lYCCs$6;U; zy>xbMfKMu|K`c|P$>%gD?7V`MpQ%7Zpa1(OZ}L zmnWjcp0kRunAyy+=x43exfYlC=KF~KLGoFwx=1Iw&kpnRQWN|~&l{E8*MIju3s5&U zgZ}{ZSx#Y$E}#Ztqo;0~D2rF|#unM%%-^_}z2P?fi9F>A28H6HWz`ZcX_;BS z@zh*D=lI}I-eBx!-YQa|)|KS`)c#R`lWK0ZsvFKMAeB<4t6}@q@PYYZgBLV*AH0o# zesnjX&Hn({DL*lW6y9W7*=f)J06LUG8z1+CcFp}TK8y>J^h=UPXK$=vd#n>nNK%_|*4d=VmIkdVVeJ9H4ew>3 z5pmF*5;h83T)y4kB8qqA1qSh8rzeHOhIZp(+xVqwFKDc-K3rOK>Q!=%m)G5goGq=U zN`dh&O0`=4Nj&*rdaZ=0V@!;5U2YaPJ)9J-AIs(&E5tKG_HSi-w1Ivm#`f$T%b=e| zl*UoTZXh`(%hQz-1^N>{IX{*-Pvs24aLRB$>i+&p6WXNzicFl>?-P@kr z`W$wAezuYtgHIz}yZ|R5$4)#~rnYBlC36d!Lz)TD{KpG~`kSAs`AS3IGt_5N@EB(pyw!+rd`P(s~ik{8&P9an)nYHM$8V^cb7Lu|Sq_Y?Kx_m%x3Y&Ff z$SU4=vpWcifvluy835vR&HUq+VFS5S@Li<-+GFuJqcO5a9oCWw0k&>-%H)S0{-nDO zlTdl40%6E-tm?8*Z}Rx!HhQSTj&@e4pdp~0RIiocR!i-n5a!uBMxfc*T)F&CD%h0J z?%a{$q-2$wwt3*W^V@$hIZ*8hn&MqaO*WHBrrBF*QXwJL01x$~D{owS50P9vN3-!i z_T3}AUPq%s@CPilMoplW7U%;T{t!>zU3$xWAmVd5TOs8Lc~-doSZN4WP~+q%ovn=_5!bQEz%EUn3{{u63);$xmsgkWLe^Vx z+qt;9_cl5PuQ_tNw=6!z^bQ1)bq67C(bA#p{l%Q&$6unai~LvO66lJ}^?dY(I^OscnwC4hIryaBYRH0=2^GeB1z{-+lEy;{6PGH*x*R=DOk>U7S>Ep3{Y!7&EXH|-KPd8X~QLpuip z;0W(ks()Y%?#pq@yiUn=38_Kv-klPn@DFMt?KFk9p6%c4XKx#(4VVN z6|zj&p#K2(leP?_ao+@W`Xeb%z1V#db1XI$MnMNr@8sP70NXDP)!*6y)I9=_sdc1s zW>w=QTtUirNdEwkRLDQ}kb%LlyZfvAS?Kj3_kVME{pS7Gs*Uuc#cdss=Qwof8McFR zM?Vydc3;V@#O+&u^riG_=lK3~6JMdfeZMur-pVDm`&D*lc||27h!g|QpY!IekDwTc zKW2FS(zxAExJW%7s$TkJ?t5=G2ml<3m%k}^XJ$pId(XigSnG)U58f-5&3)w900U=# z?+&=0c9|xl0GkA=$v)OCa-gAqmKxo-Pq67^yp}>#{l_a?3Smkuvia7OQZ2dIdG-GQ zOd-#;!gNGPy0cs?OAgZFq$wq0={G#RF-FJtx6eZLA0+cL>Ce&5ER)0p3yY1;sfW~i%(uoU8lKM+m6WMdrG-sG(It{GV=C9Kl(VWw%F>tNr* zr1JIEhTJ0~GL+fm8MGiBp=QT!9Hp!v?{ec~^IkT`L^LaInr90TivIv4^BS*eOKMT+ z?JQsA-dy2gqC4yB@<%|R^TE@VvswAL5C{lg+#=X2277~R_87Fdw5}sVSeDI%YI3;^Gaaj;n zi<_w_zS^u0=Y<&-2#C#mSxQJNeF+z~>mTQhEOmgnC^YvAPz}gw)pSY@qw*M5V^o&8 zq~ucU9JO7kY5Kb_=1Pi>-7)FUI1jr@P6M942>$>{BU{?wJ}OF=2&3Q`355Q3rGxo_^tzpACz$-qB04E^2lWUZ zCdUjj`eEYOS+X%_}r>i5-Nmbil^P8(2S7 zG5nCen? zCt-hn+=e1Ot){0Sf6O1|NAj5yjWmoS-j*ObJ@%XRE~L|tTgur^$~XF^5Wge;048r9 zKCJ%$jF^7={_$F-lIfwCq?V~wHK{}T6>|q~c*KLdDH0Dj z?o}b>e-Fb0M&HeW)gP%+HAMSK4_!yyXieF2Qo00N=<(5-8+k7?X~u&&qqF?VPCwIF z@PDKUBQZ+~ftVHuxs90@xAn6GWMS9G&Hk@cyg9dgW3C_lrxjQ`URN80o*U|MNkaI6 zR4O+hcx5^-@Y9K$4wS#`7X62Mg<#nFhQ(J-Tz&6k-xeFLP}bUWHRs&LUb>Jb)MQHK zZ7r25-x4(ZaoVT=rJ;@i`mF=KMLvMmVv?a)Jf6 z^2Ll6*>kr+DbyjMA;dl2EecrBq>Vi8NBnTrMT=|-CYG7u*CT>*!lG6tITl1}bf*~( zrs~t=MgIT{N6lRe0c+eZWoD$=Y({u!%#*#7{d&RfaiPZSi`DM-s`Nh8S4 zXaEnrCmu@W@-T4fYySY${{SIglfqseWNp5#RSE5DGNTi<6!FR(uwdleot3hX;q^{Hr?RlQ zDyp{MwK}&{;@kkVZE~^zx!4RsV*HT4va(vEPzL?7+$O*P0Qb7O7gO1eUg_}480jBK z=MXb@v}m~|=GSjuP4rES=h~unX5u#vc!NpGaOv-5sWIimVrB$TOAUerDrCULj_&ZIfxo9*tQi74IN_;!{{<)5r z=BBo~M|;k>>nP!g<5E9YCp9Jy3B*;r)Os7YSxSx2H`sye%v=5#5?Ly@tnzQ(qv|BZ z_xD^~e)Z>rw)Y4!mSF8k#+t1bc4=<2B(qqG!!Z!#IUrLQS`rne$O`Usk*ehCwmsL# z?$uO7vvhID*84Gb5>GgOao5WE+V{~|X!#t&@wk4_Wu3LR8ESrp31&2G;FX ztT&Q~W`UFM`|S?Ppnyl$#^~Hm)ft@2Q!K`S0#Xu!rb>cHRr;SIcEeKaLt4<{_#d7*M0D$>fyAQqoXfcd2Ce;Z@d z#9WTBxrDb1XPXTEwF!|>n3z0mgoCJ|TtASo zIU9=9a6be1;#4zx4RQYfyg&Bbv>Ht)XYc<2kOZSut1{d+-EnC~tEl<&IEs!kO_Jb% z?^&u7%(;#*qUXZW+graW-qs*ew*r!gB?UInUN}llD-A*Y$oheAYlr8`Q_rj_r4u11RhF*iGH2qai zw3X5ff#8|7Nab1cD=TfFRPFI{77U5sn0P%c{{S+Qk>A|hPwhAEy;JX{NeV|}$@kJP zo2UcCFTSZC<&M+&Gv~E!{{YgL(W--wyZe-&bT{v{a8I(riaSttZgT9{XZi->+n;Z1 zA3(7&{n|tOrE(eD%st+!YWie>{{Y)g0Y7hn)@?np3|H`spQO+P@qsz0sz=RC%>F(wJN1pKkSg% z4mQ#oC6?Ap?=8fkrp}PHB|xnp6NswJN7(=x-RB(zz`j@1=sNnszHC0k)Z=x6lAP{< zE?sNbblT$f*26mjbPK1n4#SYmlkcF_{-lYNlIl2~bQXnY?)D zT2`=-rM{rxNqE~FV-?z+rIJnBGR6WK%G8BrxLN-IF4yGADQ2Z2x{2^#J`E@_%L!Q0 z66jDvi&!g3At@S5l$&Xc#~^)JW8))(3r}5$Holn==9RSHk@n8O*-Z^pj4mwxpVd)%5}rd0*LG-^~92rXn%Oz^32f zcKU^^PO0I77kk^oF7;7!Hh+0_=}}>&w_zdD6?LDqP&Oqe$a&+TP++6jhrd#}9xu1= z6-Ekh3mR;C$kj*Lj$89wW%*96OR=`2dwE;amM5_2TG3#+uG!VTceY6mXQg6xuCCufK-?sk%@-=VM znw4+W@hb-{7;D{cWd8sVmHz;O6~x$7F#iB6)LmBWwZwW|YKcX5uay-hds1DFu^VM~ z2}+g-+=HcH+}L#HZHd@9OQDh{&A96`cKUv?AbErY+>T-~)W=KhS-l`}2b>2o*Bv@I zAH^j|?AgT_uC265%FeixxDEdR9S3YY&QY-9FK2ystbZdo$R@>x-Dl*Jj{|tc0o1(> zbao^lg*N1Q3TbIJ=9LD)2bjI^Q#S7mcBR9+@j*|?COXlRT7R470&dCS4;GN>-hH?$ zeMakXQxUC4%&6{8$C>GZ*|u0|$T;{E2mTl1WAz*TK@)R@d{kfGOkxyoy!2S~-+VeY zyAQptd&Tk zkhC-;O?l^*qaBjsP&DoUQ3Xci9$c`PF?y$SrD#N1_W;s{+22*qi} z4lCk~t#Dx^b6H|CaJd?XTz0DolZ8euU81#^<;Q-uBOu8wuTbv}|lat*nTgRjpO z+K6O21i6^^jT4mTN+S=XJ3WMy^eX=VTt|Oo%3E;M2iV+ovO}qH2P-mN@2ap=&91TE zBo!y9=g!v{Hk$&eHr-*r42mY1v9~!K2YB!DT(iqH9wTs%I##Jv2OO+Iai`mZP>!Hj zmkniMMYSIaR8!z_AcBz{|s3TCgX3OGu#tVqchn9(% zk!ju=rME8*8RT*Vjt*^)Nh?ZTd*WSgK3|0*os}tbo>WSWQdQFu6n!6S-l$(uDHlt7 zslosqNFNfGj}C#=F9qec-DuUvfE^d&PC#6x;#*CPV$!yb1~sAXJI5jbXdA?fla^Mt zceT1lVjkkx3(~|oO)ojoJEj>#WVH?l!H^T&-w&0Aje%u%^)}uECoYYh|P2j`Cds_Ue*wbp<2aGegC@a#kuP^R5(6m*=IY zKyZ=awYn;T*bQ<2+eh;o^!kj zDQ!~aFHk)!kPZBXE?XH=43dB@NVk2*$x7_~EOm8M^wK!BznU&1D>bp3oA9;$S0)#k z8iP@Js8tbCWe8FcAq_1FeL9DTY692t$7vKUd|*`J_PY|4fvOCAiCvr7ge|{sXa9#0|p2Ifzu{y@~PN>{bLdK77i{2$cYC_aC z1cT5XeSb#)8~}rpO_ho*yoP4(hYO7Q?bJ0XcO=Tnxw*O3%H~~p70HvNy7ZZm(60S! zaZ5l!u}~e|tU72`8pO=hghdnifC~UlKnou~P`aNcG?-O=AKEJ^Z8M(oT=tG}bBl$q zpSf*+Wb`N8$`=o*sabl3mRfT{mI|S@;vinf!?%_1Y(__kRmr2=d&=rF?AFgf4*fhs zQ|>qA@l~fh%E(A?xZFB5EEON<(h^iH(IXceELJ-#%JW{{RBYo8@N9AGpdNul3av(B zLrbE{p2|z8wUDHw_<%PDZN>3q6i#zUm<W?vL`OFZx>%~9Le_%So0?@EFWQ|{{YlKaIUqo58r&283FNCFSN;n{{XZ<8eMs% zNka2H(h{R|j}X`lbzV*y{{Z%Wh&L-l)gO(vot17gjq9BbeHv=n@6G^CJqyDmR^&vFRGF*Rhx`SgxS3Y7Nl60Ul zHEb4Ix!Ctg-op6wza%vE_)Z%78u*LO^Q5q`vq)y7<=2<(bd(iWCk~2?Vl~*5aLDJF z!-;;a-T}P8U2m?ne_V6n$%R46S)5l){{X_5I_6sGNE+v1mG7Ub=HG8#+7fuRk|?|xqGjxd z4uLLYY{^ocOkSr{^9pqhG7+cj&`N^Jf{0K+01pv9h~!6-M&XrD#paR4?GB7Iivhd= zalaG5WtHxD1?HYyjTHmo!)PXS+wBi7bDC{u5E>fNM!fC!Se*bCDPlU3kD;QVgxD$E z`sS;ifA99c^k6B^{{X$FlfnW20I+^*ow2vsiJjN}cCJXnOKV!2c|*HUD09pt?SFXK zCSeJH>Dv-jMsTWkQ_1EilIyEh$;AUIHt z^ruNxh0k7m@#m(Dl@Oi_3?p1vYf1kAnrw~vT_XxuO^FHy%>;k9ic=%PPY!0N4^OXQ zkK>CBAdDsF{_}tKo{5G|YgyJ$kO>noCFL^u5TZyWJA3gT4X&nXNWIm1%nitGb;b&; zhiUQ}z4=@MKNO5u=_D2eD`g{r3?qRb5U+V?78pD3+Ijx~WMY=Nn_g~^I3HNJQmm97j}Hh#bG=B^OvPb)& z{v+^G*pqF!-}j2+6iEA$hC=?|t1uZbJNGdxWwimK)It}q5M+*zx9H!PC_a8OoEvBuI zK^0{yTIRXp)*``5`ZPR8hNKs|v6ceZ^KX=R@*Gyfd zsE>CdxV9BRId)*>uh#@}s|m%j!qS~eDzHA7O)TIN5%~5T=P{)#(dpgVv=&FPVn@vM(wIZV(q+9mX1&z9&t!XPG zsDuDT)!NHERmy(O42;J@jz=wX8cl)o1z)Yb*ygo(B|$kb_#cvpuEUAVscNyt4j!<` z4`)oQ-#gxWe7)De(BL(WIY#dSqnU(A*Kar|GdA(Zh7FfL_r`|lzNLN5mT*PjCtW&0Klm;D;%s<_4Wjh&|kQlyCkFTJZgiuHFZ8%zV?&#wj5l z(eh+EkI7Wp_Z%|SONjYh*a(v)5TaC>O~`NBRQhA-?)4CEJEe;ef5ROblVmCxK`*qh z-Yo03?)CjbsY$nVa^n#Q^W9{P&mc>S2)2Cc7P`svJuNi7hplC@E5sQjy+}lW=*F zb8Yb}O|xfs6C17Apli88k$ZO-5u3vYaV1VoZSxd3+z$EjRuI@tgK5taP)OONM99*Y^L>Zp0twuCD~Dz?c2 z*CTvE$AK2OmbSySS}Jy(@-?kac8KoSmlR4xep~5UV#2GmA5V286J*G5$y@Uq zf#rM^78uc++zqtqC>gPro9+hy?46!Y9e)GCKarjDJzdhv&Qu#=L>(qT>9{|15^rv} zfqnr^5V)~DojvN{HpafG#g21>PgvofM^BnteyLGwFV!GSZlO>_;Bx+*~By(^?nM?f?{(TSohvq0I(b9L6GB?NqMT= zbdY9Nqh8k{MMxdJL0>Ea>H>Y%9_IcEb{tj^e>pI}TgS~AR%gmgDJAU7l~_<0-cnXY z{lxm=zwElQIkCIJ5bdSH+U|7mkLCptk)CoZL1h`Xgh)FjmKL&4xFW|7M}XHIVt08T zG|by^gESB-TiglsZfYm5XZ9+!VIj)1Gom#aF=K_&n_qT>q$CmcV(PL&b*k&I-A8<1 zFx)-SLK(Yb$3Mk!>|bagmaX!(Qd&*NHuk?oz_8(_LnL=1@liP{RHUl4no&iN;vAO> zAVX}(C|g={RKH%Uq9du@c{SY(w@7}^_m3tZ=s@%Z&E?Do`B(oYrcDMab?%8b7P#y0qN5VU8Tg zBtOtU%u{+tXo?-wVC1@^P9NG_)|)5%DL;lJW90rzbLIqp2|o~_X63F6erlE7?;iky zkJ^H-PXt1tAOXlfRZ@k#t=-{La&H6u_a5ef@k6thdbWO!5#I8C2=MJ;nJNlQYK~pY zZa^s^;~m#S$w?L}NheOBd+wVbjquaCEv0DQjz8hco<{jV&HAn zovt<0Jr`p;J6BnoQdB#wPd5dm+>zr>wg4bs&sUdEEN~s9r}lA|bl>B3(00XyM$$Ci z_!VxPa4V9EwK*JuPwuM~rX5FH7dRb#4)aafhf-Wvv9mR9f7VasgWBw}{p9lni2nfX zr3zv|ml1t+jH&0}~p?rwxRT5eVBJ@g=WPfgQPw>NXN_P#v zW#4%(czbYveV$Ay(JhdwwCA*>-mOw_k<|TM5&2=~$al#7=Y7OHC-Y4oy3hT~qV#x4 z@2J3}_#VrrmiKNugnYGrIAS@DL;nCz(4s1&$@b`*%!Gjgn%h*`N)vNvbe671l~rr} z$vD(56=h74rWVdcO!?Qq~6+uQJ`($YO`HXR>LB`#)K1RwUr}F1YW~n0 zXF0arI*VtMhFSC_-0j#`iIf@HVmut@k#h{XrA$(xjWQxEI$CkvO4fyd*h4ENT8SHM zZ-y{FTCnPdO6SD(Ha3@^b;8#T%ETLz;cU8evI`jGsz_|3~U~YJ2 zmT8X@g`8>O)Lm^~9Qd&uV4>TS4|+L#a=N#N8z}P03P_LB1#7w3tRA z1as582fUrY=I)nR?zmW-$4-1Nb!g=Mn?Z--55ua9jW+M+2!8O7nqJ4X#2wePj%|V7 zxo+vYk#e7BEYHSjRVKK{7N$tc?>d&#A!z4&Y8}IAK?+D96s=;)+Ssk*JAX}s%IF(cartA$O#9aQdzGnSN@0t52@L4&#Khcp#t5P!E z3ZXT&9b%&4kr}FXST#A$I`YBqg5=DmpatsDrAbbZyJ9;D|*z>X=q*g~qResRw3$Wj`4 z+7wi*gKrU#3ceS@xoxE1ql7nu-EZm1U->tT$t2?1HK)w0NuK)@aD$08EZxLfjYo)7 zY{OcXs*|!{)mu|Zd1?h}Om|vRZMg8?Z#;8650-pe4vs1a+Z*GFp`baW8w=PM2Ls<_ zCyMfYK|Jr6K1Rs%azlX^>S=9;nkuctF3r4K;Djn`^g6n@jZU4MD)~+)P} zhYO9ZX;PeRD^`%$N;H$G+zfUc4{h+=UB}w9*p|Z_#NO?*@^+vQ80?8BK7pzeWp>9I zpp;TI@ygQT0C6liW&m&w7q(zqyr@#4T+fv{r6!G%^5lA@a!X2P<-{Mi1wUq;c5IWc zP+E?X6JRwPV%0~6((bv{53@%NE;G#AJ{k)UbyogRc}>MI_eTX3Bigy7Im8EUxx2Rj z8#Lx}al1&!3*AI$jl>Qk)~Yo%(<3svOUa2p*`|gPryDEMm4vyWU>4lnDL(rViww`9 z!y*tl*}nF;-q%txxYTj7;q0u(cCx7AJA%;e?{2%<-oTcE1Asaca4+3ucQ{qq*R;i zwqxAZJ~sm3*EyuM+uN-z-EnhETwKw1k(f|8zbbgd%pvm_qhKVK6TEabxUsd5O|P#D zPW&qDDZ>6N_GrkwP_0+%va>Z`KGCS}ifosV^O2-f<3@G2%c?46;KYdzk!2K}Cttz^3dr!^v9AJR zu~>pK&R~$mMv}mF$rV-0lEcm}N($n<@KWQf1hzpzPmL%h_Qf+6qT z{4%#=V}cUdC8wGQ#Et;WD<=N{^gsO-z}*oWB_qe}kNw83Q4J`Vl9H6A#3f*nk4%tDI>t+q zSO<%7Ql?WVU1`*mcoxDP2|u-A*UxZ%SgvCqpt#^yN#yO{?<)~~=HJ50()ZB&7$b)K zWO*AUS%xqJ-3_C^%wxj8lqM-2x%ojx0We`E z#UwJ_uhYUXwYU4ks(r@n+>D>cT&XchLXzhXXEdOCr|Zav{{Sv7SXB|936Z0O&Hn(} zdX>T`MABP`4*vimJr6R|YP1?$5wE{{Y%Y zv$*3+&V8VL+z(x zGh{#D$^GN#6vik{^E(fzT*8KkdV*Y(+DIA*CrKlpTv-SwRULBO6{_sn+9tQz<2uC5 zRQg>PIpwF`jrGMUK@2@DhKlt2Eat&_8|m*m{PHmlp4KK-oQ!+$FZx>_Mi6JGz+_nmDVmu)88;fGuL$FnGveM4i9;TLyc5f0o^E^7!72lNP zHC8n=PRNpXUNhF$4hK#i2<}tkHw+?IX_ZXPmNSuU47k*V#)lopTnlByEMLA!AnO1d zivmwEhO4$a5iuCxk-45F*2s>fOU3zI!)t0=RAGBuIN7B2<_N!fh^yJRD%Epak}1tq zDLH8GWyDu`%W-yU$!AI1mz|VDw&M$z5Nr9&g4y#)H|zwYI)!g9 zJx+ayYgv6VKF7R5lV<$Q#>dkK!9`HlZ#;1GLgRZ^hRaN2rzYXt+k>Y`%gGmyI+g5= zyY-B3YYW-3NFejQ$hVlb9`-VCW-?WYF-mh7b7Td;>G4;KE#;~eF|;als%r}X1Kv8) zY)8Fck6p3D>#F9H+-W_>;Jqz`VU*PE=Q*JDmW3g=U&wUS=zlZic26zmGA-%%u?p-u zwoKa-kMfmaj~c^b9QoKkoxe1RSK>zw+GTy#aLcYQ{;p73ujk>u3a8oJK-%!T!2w&g zm^(rK^2`4K75x-BUgAblb!#nFnvjrrp|=zd%x{ZL7R}Xxn=RqOWiPsuxBa5IZF0-vk+OT_7y8x&>vU>}=f5z_5G9^ZbUgZbO@Mzd#Y&$|Qv0Hpkr zx({Vu(RBiY4}ttj(yhUA0BC&NQhyvr%eMGCb*X2K{Q@bwCy5i3BzD+(l<|YugS9N( zAv_+eLrFa3&W6t47fJju3;9RE8~*@4{f&A504jKFb|Q1!1GViR5}pvgk3Q6Mf_vPf zmZ|AK^!3(_z73^Zune54V9m@q$FYawWJ7i)w2L|HAI?4ir<5Zn8y}Fx{ym!b>c;f&nL1`;2xb!cP@Ld2RYa&(<)kE|8!5K6q`=$)aDhkLV!& zw2BHy5)Ysj{y4z|hoNlD%(Y*AZ4e#VeO)(LAC?j+Wa$ZiXpf-yr5b%|hL;Do!C7xo zZhrpP-)rD{nmU_)ZNt$UM_Wi7J+m%6Rp83`gR|19Q0K;@S&@<*$!ybE zVYrh0phkq4+m{>Ds3j=}V5=Sa^2N)O+Iq;DFsme+w%2y|1AR$cr{hK7cQ zy8EpS{b6g^3*~<0ST)&p@daPcmO5(UyVTZ7#f+3usj9d9-8m<3DP;2a;i^}X8`#gheo6X=UPpGn?gSiIU_prD zP*POLzs7l9Hzb!9JsSef}q8L)X&CO z#Te{v^p5`kDsEbjXOls*; zEToXAt?m{3BWq&7F;~NY($qw3=1DNUfz_-!9F6XEQaDTZajEV4(L`^b2R*$#2}6f8 zDq9(+iPTzLCSoOr>6Jyc7L^k8xD34WmLGI2;OeyV7qP^bDbczEN?otAYftjEzcnYa z^{jpi1kL?t)Ol1%Zrrr)Qy&fT1va6@t|HEn4Np}eXm*~kr4H!H5Set?8ylg}H}{JX zio~^r+ci;d)e#f?vcKjPTWl-sRfjkqd)qhsBjlY`#5kAL(>=Z_gy%e#oo-`RD)#cztU%Uf(hW^ie@_^t6q^Fkm* z;*B#cN>pOHhH6e@f|Zl1%F1p!@{k8!_MEaw+wTH*+Uv8-e7GF5w*1Oz<8k55osMWd zq4wfUg|u8@vD*+~am(7md`p^qE$8FT>W2>@Wv(^k48H@_uua5UDa=dLc?efG;e^aNAOd0_s;Yw)ZC3x$WS+ zObt0$JMRS7A_PT;ViEy+7U^J3nEbP_&3A*cT{@)4(dC({G^P}qtVeSdW-199yXX^B zF)0?mitv;++Wf9Dap&C%gHUp2q^y-De)LPg@SjimcwEpP@!>;PRhrnvjvS_(@wXdmgAP=tfCqe%JW?5S$aDEK|jJ=nWQO&e^ z)b8zVA?avt9M{t!-~hdl%SZ|%l2gp?TgK_@8I9LAM;8d`HWD>DwIr&*=~&rHLRW2nBACPF_A^Th z3xRM3P~&S{n=le`96NFVss(j>-seaf?a+9(+-=9}qdNu2uGbk}UTWdxQ;7_>3IkbB ziaEb6NrvO!Z5A~`rnh6Zfo~R>5Go{#fCBH=9;@W%5)k_eHA4!Jj?>Yx)iOyPxtSiHea!@M=)Rrl%UhBg`X@5PON6%WbO4jExvt2z{{Y<>rBbv1 z0N$LB=!gFR(Q3L>MQw$2oiYLc0OhyIa?UAI*g`-iR8?++&1ffa?*{nv#H|Oh^zYX9xITD*#bYjs%;T!N>_uB)#G#a4#E^T( z&3bM6BQq9SrSNNv6*|Ua%T!3I%_vIUn^TV+C;}54)Z)?yUy;XyxqqXX*tI(t$2gd{ zG@sXhKr66q>{872VDmdAN%KYn2Gmev)3u*|xsUOX0?N%^XwA>20!O&~oIW+ES+w&{2^rM`#UhAYWHp zS*y7k+A2DKwb#PfBhv4CZ*U3GrOwbE3933~R7mLJnS_CJXI~-5ng}}Sd#DRG@nQ#Q zJ}y>fWt^p%^0hlEQfgU7YU;1WQmP#PUz!yACPa2(^%23;js_I?2qjN#WAw~n@Jv^nX=M8>VKb0#Yuo5Y#eFRTAT~m0ZOsP;&-hp~OSCkKBpqdEixa9}c1tcjs#l zb~{eQ2%u-Qf;SQ~zUc#`XT?gFu&_p48V*V&&Yw%SZLG!{iI(#(3V3-9Ev1w7CQ)J6 zQ>N$^esbgmayLu#tm z5^|k-r3S5Bb&2iBeD{U--AKPT7Eb45bsToAW@uf9O9W00yGR*Wcn^Z7;PcMP=%lla zoCyG&#qZN@9CB9+Gx4vrl@~eYDwhnnjmF%Gm6WCv1_V0$iDohq%V{AIlC>Ze6sVAt zTIxMcIbIpEyEP>wY{VgkNk#XZST0WSIg$b6#OS&fJr*S`WRpV+y_wy%pRxt)I_{X< zR_!6$zliw|WDYg)D?H}>oT;(u35!>!u#zRUtTeFrVGa^I$2QeteZjUYyC1QeJ4Iji zyebAVUD7VnbT+uQ@f-(&sBICeWg!!=v=RnwBaQV;p3Omj`$=|qPzxvdd6&r4osav) zjC6ch+LB-4ccSM{8E4h%x^?uGb|URB+8eYjJBgec$P~Qkl-tUb>O$5t6*?r3X=T|; zSPV&dwJA1vyC`f;?aLK?n8s<~rYx9{Zcc?w;%sx=-O=uY>QBXU8ng6%_SnnC$7&sv z_{AX!AJWt5?Nn3nF_}B#%!sp`CY*dFy);D>of9& zNr0Z0DpMMN91_a-Uz1)BVF?at=vOO(5aV!LuOlkNLMhvAGjY(&iaXOn%~{{S#=^(kx=n7y@M{WcIkicV@f1TR|!Jyy8krf8}t+W9kS zb4AGmTMcs{f!kw}jEnQxVWmA#XejB{vcS@#*KV8i!JuD)8p7El9??;b5aUlm8n!x+ zq}+4Zk9IJ&>sRPTQ&g*@C?rK)fgo#D{{S3j=sJX>WMR6*^{A!Vb0uLnDJ!`fosQVi zXbA%)aO!=Ya}_bF;eK5pI|3w0MJS4^JKs48u#io(k?rUwo%9=X80S&#WG*cL9&?=C ze`kE2-*EVa+vxc;o_L^{uW0tqYa7kt;s(6}{zs)IQMe~psBt2b!{;O-I;5Byq@|DO z#FiX*StQulG)2kY_uG~auEtpvE9$ik4>tY`yzlc>8axfoW888Ka~g8boLoG&bzK?k z(ez+K;_nXQXYL|r9PgB4One$tdUSZeO^6#!-%)uhQbBjTN=>Y!qfpp_X;{|XRK+*5 zrH!NwN#54|L=7!|wPVNjZj%m}Q#wNA(b&Q9x^vl4f%-r3AHh1{{TY3dVT5YolIPp3y3F@)>2CZ3fgiP4+m3Yv zb~S_God%Q(2wUcUDcuw3{Op+vNN<6e8S7YHLkoDHezmIs%a3C2JnEWA3lvvn*33xV7&V2(%XIL6-+f^Zy|h9Lg{dZI)8?)j(Nh=K00qxl>1PS|_{ z;T;^L)A(tSV32n1Xz`J6k_IE>j$(Q8IQF^UH2ZORfaaTa06q!E)@sz0uLQLp6vIk2FJ$VyYl4wy-yi}pg%#Oe;OJ}2h1T^a!Y z0O8udD`_9A<37y6!8i7)4y2S>bht@XgH5FJ*#jH)O7Ir~GCbN~P zZWV!Pd5)UelkZQo6%Up*{?OAAc5fcWKmP!Yl7OVb&06j%6QG(TsuQrKg8gqpxLS4kPYp-o%dQ(+7}2j z7YwP-&CM{Umgl^MMN?Tqw;`s8D%6&=$3O}?h0<-Z6Jv;3wvv{K@`8T5MM+=v#TAq!l_GOvyC27K;SzmSMW$k=J)sV_*}> zSh3+V$nfHKZ`2>^@GsrotN5Hv4-FUZ-SlZbd8X}Bm6ahvY5F#4sn({cw5YOZ>R|~{ z=91!^zSkn*Ha6wH4T@Qu;v{9in*M<^G*L0Pjm5|KY&=Ot;oU)HNz$zzT`z>lv7lLY zZ?Wv9xx{%bm4Y-=(n;%iY21H@9$)1WFet(6ccS%eRQu78ezGe3N1mZdh_bCWrcZaB z?nIk=vC7&xLQK*>Au+>p5JJV-xi zhc-PDR57Q%&+i_HD%kb6K~gO>NmOFdu^!GF`M4E``6m3?{vY!MB&@T+{qORp0*G}> zCd4Un6gqwM1mEz((^L)f+W!DV5M_WJP4XnA1`1H5n*pV4oB3&Mrwzw9J*SP?cUwq?0<*<0C5j9xyQ8q+bVn7u_#Sbguy@(nKKpzzV(pt`SZooV=4W-f;W4vaKC4? zxxaPQi)QP-%MEQn_nQl?+CT%$g~d)TAgi>Na%m(bNem>5aw4A1{oL)2^?_A;d=ZnJ z_sgB*BZ`ix!^J+)npXb+%WE6`Fti^qy7$ttf-xmrXnhIfC2vit*N(vRE$GoC)nV$ z6#QT9Wt3)w)<1t|c3YWvzmY$%lHA%uh{{VD+ z{rD~e?RIg@8n&UM`r;$}sQBuAmM+)dBN6`q41oUtnM?U@hZM%D)ER`!Z^&{&opmgw zD|IM!Xh2$ky0oDn3-TPW-zinDemhjn>xM{LGste)xp-&NfY`aqFnU1lEe`&oUFuNz zK4fYS6LW=fKv<0yl-)u~lAscVGR#MmRyvDDSGd^YcwBFr1FNQJ&CF;Y{{XPj{$$SC zN*eq<=Y*2|(z*Wt!8&8LF3ovTT-7B~a}{o(Q-1rYaOinXRJRE|N48FM-}jH1$70%f znGQcNW{^b2kK?GRjf4LH#>)<%xxFNkeVQV5ao+B+b^L6(<=NtBh~-SAloJLRB%QBw z&+*4k+vvBNz<+7Wta3K+CaB%WZnSxo=))o<6ZkE_(2K2qQ9^WqxfF8rr!S4X8$-@NA2SS`iklIX)|EVkE=@a7)oY}R zqp2i+(T^c+rO9-zb_-n1-RR5Q+w^Zj_jrQFc=V9DW;i!%*yg)-$H}tBez^BtRebqX z$`rmZ@c#feW^BioGWJ}jWSaUMsLZG|+-10$q~@a|y(-dFw*zQOZc3EhfZH7wDk{15 zRxh)$&yq;#n_v#`vD|UBuV%Cxo(;_tHYUh%DdBkoa%rD@t!GCW16_B9KxVl?*&3^r z^88qJ?C|rkGL#z4Hbr55!rty;HyJ8)mXZpVKnKDBC!ohpF>1pf@VK?(#J7eh*?0=N$EM)wz7md}Nq@f`$z@^gDPl^lR zm&k4D4c$%^D^b(AHzRy`E0SDb`!NGCJ?}E1YnEW;x`uy>#kk> zN>|KXs`K?HgINbNWz4^k8>hhFBiYs@*>II5RmjKE_1|A0EUgnfg!bp_lX%B?to58xenA#PXl3$4c5uNFMVuJO{_KPE0>we zw~uOU*X<0*;=HO%brL(OH6jc#?$nk_?aTI#Ivy3smdsMw!jw4lz0Y^6w|tYca*d1( z;+VJiKpu7{%~tx~6L_hcpiQOM_=Chc%`w%7R~B7nLYQ&DLV}A$VFf^dLAAjEjljo2 zquJatjluNrM#lkbhBp2-y^m_DV!KI-SKsZE$nr<)IG*cX-I*&fmXGp}v9}ee^O}){wVJDzi^TY1JsG+>(myl@fXBJM3>*tZ%Z%BYQ{q)TQ!AGITC&HWuDl%V; zRvo^@g?2nn8*+bpf6O2IrE>YV5~MRE$?oP0r4E6!gULgc`Vo&wIfN41Wzo^ty!zMV z9eny!r^9*BV&@#MQmUc0X^hk%y(wurkQ+njDq6e{S)hye5wFLoba&}T%M>US9>UUTR7}4bWZoXtWY3Wr6Crg4 zCaRP)l_;Ae*c)Sweoxg>mSR5D#Qt@*xj9{zI-IU`(=|P}^^f~o+Q%Kx z=Zjq1QNU!CJ190`_8RS>A)=VYTraPn4Ukr^t)ZnU(g?XX*mF1=ZpXFT3_p82OBw0? zM`((qdqx1j+gv_N*naO72zCprQmEabWX#O7Sf?S_u_MR3D_*xvQYwOcAy(KO&e*VW z)lVe28(9FooJr4zvw9a`W+Jjub@px+?+M|=zBuuBhumP}j7}NJtHz_r&DHD8OY)dv zpo39yX>l#6w3Hhmw4P*yk0FTMBV#bUM+~HXAOS6kEUeN+qXO5l;%{yWdQGgVp>d{y z5O+zrb=(QdJ7h8f*9T29P+en*U7)k4cxTn$o2cnlpXWyQ<;~N5@oJ{o%mR&)C z@j^+qIQ+TB(9nNngYimPS1xfjzfP=CX64!SpKFCa8ssjLlKM=Pw1kki7LYr#!ip@5 zpt|qx;haMh!ztd$81UWKG~{*NzF7iuzIpH(St^Q~7OmfHKy!nd;d77#iwv#?S)D%s za=!qpKvchGjO$#osd*Z#;RA6{khV$*ByNN@k1yUMk>44Ka*>C@lja@PKNIM+LCLLS zz;OT$vPX~?^H%#W_B+qjOovFOOQln3GSe{*mZLPKDJe=;KKrRvzW)GA-=W7Qt>tct zD%Z%z7XmGFZEiXbQs`8hEf}P2B}0RENG93oew*~EK8M(DLgq!CU9VN7l#mq5jyB_Z zx9Fr_^4k@<&RR)fWDEnZG-Eb?fsPI?F1G@1KDc%(k;A?a(DHhlrbdSmO}N<9j_DR% zt!s-|oN`=qY38f^ct|XK9O37 zA-QoS#d%D>5#%T-4mRR!P_(HjP*AZ11AKe6@d686E)HLU;$g*FIxlZt*>nPd547(V zrM5p}qalitcIZT=T)rcpfzrEJ$ETrAws&eum8hReXKcDli1xho!~D*KfpJyIP*hdF z??+22_}>G&W@!k?q;@!`4O@{j~Z7Dcs-gVsYz|7A$@I5c1wyOC_)2r zoKB@FNl5VXBWwpY%TqKLx#To!p|I4Q6HculT-jb;)L%(WCxEyK(_mSlTnT62NqG^SL5QXWXy#Ey0X<*|Nb zA3aA@_SgfVBzB$ap8e-|_Aad!)#7Z9I9V?1Vs`UD1DfHOBQ0{{l2xbnV(jHa%yIK| zJAynpZ#6aChN><=FUUu9g4=dm%U|6>l#p$(C(jN|iOgd$7oF35xMpsANEhr;QPtFD z1Bkf<^^toVxQ*j((x*ibQf;Ut4Io@v*08YXECq*4U?TJ{vn1!5B;W*-rrz*y&i-lf z8YrV>8z=%rt$)i6`$Kf$ZP(5rHqsQ?SC=#L!buBuD@C$xax}Mb_l>^|@Rv5i=ODJ* zXc1Mm+Q}g)Qik{S!VgkV1REkR7Eq!4+=a!jhFtc8np;|UqH9x1B>n4d08ZoQhRfPc zY0cBYC#<$cr7+^GdfV{={{WsFac@F!H1wj6_hQQ`LuB+%@#ThJWVV8m+;kNHT2M}+ z4)?Yjx_C77qRMD#Aa|N?a1;lZzX{1GB2Oi0PLz|n{eyIR4~m_WYFtM$77pF#dY^(oJbmI5F0%DLHxH>RM~Nkx8)`hQxLU|pKA`mS z!C&l^5d=Sc-=*i_WXalUp<=+s_Aq=L<$kb;SMwv@zl=bKtlQAZM2g@Fi7tyJM%prDQclq`kY`(oYgIxE@#Ox{f}uZJ<)j zR@+GOupfZMghF_gPcF7rLHlc~Bm^K6({qIgfLI=QC75ba*K^_^a@(#3n?yrfV!Hv@ z(cOGUn6+viDrp4%(DsF^pO=RoY2_pVL@oQ?gXX1j;~g&{+1xlmen z=XrZy{PD_|EhqCg(GtdIpGfpZ*CJA@Ye|nGO{T8_wG_5U0Za=MCVjfvW?Yh1+moPO{5QEyYN8n*3ea1D@NSSVLwtnpc;QGUsqSQq-E(TBWktsa4>? z-3dr}!i`+O0_oo8%dQ%yo>zb)VITpZCr58^>+(U%1e?}5U#7!cv?Wt6HdgY75Oa?Y z>G@YO(6bM*IO}pz4r6aAlC=iOT2P``P_+h z<=Z)W^Zx+xHFjUH)hYQrB3LXunC8DRwYm^O8wa}tl?#r$`D4m_u1*Fiu6vC(@mKAj z1+Qx@m5-_ifVt}-#Nmqb=4;P6CoRd%*^<4STonq`RvZk;s5prCLfi@fMvWT6+^vFk z-vq6b5foI=J>s4?qUN#0U91J!fp2-v;;Gn1ArOVqmjS-#J5j`tJcoj2=MFP)s;kC4 zhUQe_242jQRY0%Nl$vUsk)`PpX<{fVN;G)gNk~=57rKf@;@z=34~iNYA*rWx0h*cP zdd5bJOvi_R%a|OG?6i&@BVU3oPwng3-z?LZp{aa9uhrR( zmLEhaG8$+;5(9xQNO_<}y?!7pd5(Aw7UXjstgM=?<~B9&bF+CS&T%6qwbsa|aQtoo z3^egx>}v(I&;i7#Gyec=if<8D+R8Ri$l1D+C7u=6DluHBOO6ehVKAKvZLLFNGrR;S z{{TjiKq|U=hRSkHEevrQnVRtDG!W45`mxG0Hb1FtINyS;?Z(^3Ga;sAWv+y{l1R^Z zIkp`Z<8#!S`D=^QayJLER$OX-3+Rz383QpAZLpM~H_(%* zNL9t1uGw}~R#a5Ek~z~i@W(;g@xAQ(UGP@kN+c|F?i~#EDn-uSs<@if zG~{SJUgGpxZ8jjKMTr)u>{#&9(n1o3!!IVtPMZ%6>@gRE@^Oc`;<8F;i{5vMEd-nK zHwQD?s@qw$H1O0<1RLS^a3F*Xv3A5`~o9*HFSuyM~o}UbvkhP!y=U4zZUqNmH%a_&SQcrm$7XWVGr&Hf@ zc-;Y=-Ks(C4cRLvdl`*VU1Ks+ii&KJ5)9`h>DX*bm6p!^5NvdHxxMf2U!vM|uE)vsUhdCVP&I|p&TQKj+csM}kbDPv zY*Tnun`B984yDLsn}9$T>%Jss+PX$JBY%pnQ1W4l%J3dHLFxL*S<#W|Q!=$4iBg*x zP!3d&M_Z{X?*y3BrN|;47k}^ysehv)H@#LT%`1w4z7a

rb;%=Ts7I`0;X z>0o%OEu45=O`){IXk9&SD(WTJgj=BT0C78m?KHEJ?CsQ5-)XrwrKOApmgoCT_=G}> z!Xs9sEo@Su%u<|51~j6D5EL!|C-WF8r)o@6+0P(#kCKbSc`=H5Yx^MMmTh?)bMop_ zM+vf%hb(c6hYVAyOs=^WT|KrWsVY}}CY4ofqJRJ@w3u|xgoOm)YOF`vOBGRY=2+YV zZfoN3-pSVBcpDA}=c--KG1pf5EjUz|-P#PS%+!&q?vQG= zC0hRg5sGFHD2tTJmpg=3{{ZpCt`+nSpI})Vb@qV&0QnrbjA+#gd1N#tK82w7fU*iw zpm}ViYWM7RwXx|VTNC-mQR2M!CJ$2y19XwpE~LDLLz-s9l`4%{_HOhAxgHzNqz~(* z15!rka7g!a#0D{48DVQ&FK#3c3+kqDoH|I}*(w2)&7ieEh5dt~W*0Li zajC@Nqp+D&64GpuGgwI)7oY@`4Jili+?`h#;9Qwv_pQZU5fQM=4tL7nj*-L@-_e?| zW7trAoZwz}ots6kp}nkZl)E+LW`Ec>X{c~=tol@%hDVbDqa}qtChP6wkq8EPH+RuVyDneNEuMZn*C;9bAl{5pC^Q?!>mJuG+;e4W;$ zFfEprkeOv8Hk~!>(R6ErU7B)gGdB<_`S&hSs?}+dU6~o@8*NH$E=XFmrrU&oNbw8r z?#B+=4Z5n9o*1cQEo&M90^;@ncd4vGX2e?fwL0HE0Zesn4&){q3WJyEpK!U;@ZJF+wc+@1Okcr;2( zWF=2gc(qht=}n9y_FJKl${N0OrC90&{{YG6orkz%yZ%g>?KA%X^knbU#a*MJ#kjV$L0xvxIU$kA!5j@upFG%XWUw5>}@-(ABcAt=(4g%#>H`-EfG+@S5; z*fEK;@QF+APG>7v42EvGIp!^HyQ=2K>}kcG@LpOQVUPrpbK*w;p4qdrcK|Z;b0D$W zpDDFlZC&!2o82S3WZJ;@cS@JJB!vswzyr1*Fs@Q@s)q-hHQR?Pf_3k)w{o;%oSV?n z6CCwhuh%d%(&pEluX|Y_xRTO}9ID_Ctyb7^DMRlgZVNv2aGzI!8^h8a%-raByDVU?Fa9Ld+zWX(!q`QZB1b3?CB_XM(j+B18 zgMNNWuyF&3HLfb>-0Q^b$tn3&?88=-T9mb@l3bADoVMejDM<84#g_xYn}^XwLeZ3M z4ecOidrPiQSw-TNziFv_<3|&2%>eEe38ojM0F@Oeo_l<;EpC(&*eyg0l@cyC+@E)@ zF!Zd@D@m3kW^NfmlLhGVW2T`Q9@Qc%>PU48-$LCA*dpo$8hk_x8>k#|j9#J)PEFwJ zh7({%M-V#tSEKe<1gOC&?KE8J04{Lr{Zem`Xt+6MbUB1VcND7BD%;eo#EOMt6U08~ zn!_t{{X%sn#^Y=BzSzjNW*Ufuv00>LmHr78n`bT`RB5W(=QP;n1^Wg-SpNW_Ovm%a zSaPQ-r%G!vWf@BaE&W~^0Tw%0G^WTr_ZR8UmM5t&>bN5e4-OrqeAPP{+8ipjQzN2t zNy|nN+iq@sI`^rsCw7eD1r04nrSbm&v_@8aNg;&Ovu#PXMTNIQ*@`jyC}O0YlQ^*c8Wc?sHspup@_AJa{i*Yhl$9r6eZ%+>y38VZQ|wBm-1gaa;ru zU`ZS7e>@<>?MiX7wQua}2?;9+U!Uvcg_fNzlkElWioY34)2(Vz7vxUZL*oTF$pFwI zs%=KZx}c%V+UE!@&s6AjvO$&-owe!PV1$8&O90Jg`VG_+fReXP`&RRH zCsDE2?!Es2JR#br5l36`ZTGLf8hdP3?uxkA-WEtv>1-gf%V6nrN!HYEbtG8dY&7N+ z;CMtFLV}L_et1C%#-0e1#b8_wE5ZtN1eWH?0bH%r^5uqJo^FVfsy0=UH1*pIKdCsu zb7YwLAgM=5Jx&>Oig*C4Pise%j?6uyk3o^^AdY|hD*ph_6I&^sElvLbsNYbdvAn3| zJzpi{dPK*N`@Q^!W407X3fQb%+u`YtV(wgBg6ov~QXQI4T|u2zFS_T<{&>%`xUK!6 zsH?NuT5O*7;DVAxfFqH@nFE2(MA4iakm;(S4z0wqU_cs5`E%d#!)~R}T_{wG+$C6f zwoEb$F=O80S5?p9wgs05WBuZMk@UFJ;WuasVS(CnKSNqe)cmVDGHrH@R~saHEiPPq zjw-YAYe4X9T-P2q2jIF@CoTBAZs)a*Z${~Ffe!V$Y4LB0QFxO#FI1ya=hEJf3322y z^3vi`vhJ(4vAU8)w@^tIwYuU{Wj0;dY;s2ThX*>~*o=U-eSMcz{l43VV>3fMah4Xc z#1amB#N>F6&<|>dlw47{LiCf&Ct-_th1R$Wo_k??6eD`kVx+7josG6Wm&W9Q?H6#e zTyr#=0-R7?yN)OVq7!^a%*>ROZqGG;`zLYZBxJh9cDSdxV+os(nbif$Xom28aPo<_3v z%QAej^F~%})*A!MB5S1U7PeYyZ{(wTN75F$wD*&k@>GgNJ5H9~Vx;7?cx|N$Xr-V= z=EN$=@;IBSH;3|zOC#+xMUCkMJH%LP(iDjk=>y9Uz-m1b zu;D7*W+DdX%&Er@KDdQc>-dz3% z9R`bJs^B}#bWwr(F$16TSyk(Hn+BDpIO!bm*C8Gop!`&0RnBs9)=I5Kp=Ea|OfytX zZam7%s+}qvfRK^gzaa=Hx=KM(ZnL2qfGvwXH3^!juBelK?c5=wbP?78An~?HSh|mPs~`nP=xNKi6|5MyBRrn>0T9<&=$h)v8;&_;uR?93hyo-yF)5jM#T}Hgl_}SL(@IhqEM)**<4@Wawq%_;)IAoT zooez#mQ02mQBt)dQby#SXwomg=Z(u>NfT`AAFjKpdJGd2sek6d9mINBPCN*pR~fBw z;hKvt$P+kAoN3OfrCD0n2`{BfQ8yqMy2-h|k%6bhVtXSIogCL&sib_sQMkU#)kf{= zfuowzdfj_<<^d^bo)=4-!#`+6QcttOZlhIvrDpD}O_d{Flf96ScZlM(U$rfcrWk{B zkdFfG--7A*2PdA^3d)DSn+UM&5&J{oycAc$^#warXaJpIkCoTc2W>zWVeR-S4ojbR z%HEyNQml;b9OXsEi&T)~tjn05{d$@OI+UG61CDnN(A5VR#*NFtcACA7!=L&0j&t)l zUo)D0QlVt-8dmbQb#_G(9Xf?7i(Yl~oeE(rR9JE60RvZsq$ryLYzP3G3}y|9)YMfr zH(kZ$?Y6hO2m_;4TjeWd=*G|02t!(17;v?LyIcu9IgFC#Wq&H?4$ir%ua{*pTg!QS zirJPeDqLW#DrQw85`vPIr6)rDoGb&l7T6PWW5ioEF{O$5Im0oRNIVJB{he!*>$>NbPKMljJMHbpS|0|^f}TmR++yL2&Q3wM7EwDNM%Eqk zCg~0u+;CR+hgoMcR$6LoM_828iYaWN)Qw3S553QGY;ufxww^Lr;bJ9h#gxpX$&J$I+Fr{P(OjH|zOzGTJaw^SZRy1ud`#i0;z4b%3ke3NP%x zV-X@oxul+=U18bA-HJMs1$X;NE-4`fEx_AbJ>u4Y?u`Koz+~Kup|I;_ne zytb0{m#Q<<7}ksG*s11{r2hb5;ufoDocsmV4Ry;U-P41b*9-T9ZqlIfN5c-;L02AI z^zR)+wbt|_!B%cKWcc)o(-9-8TL~7pajE{tTy|>JG_>P#Euq~_sg;6) zp=AyEf6E98R~Tz6nr-4a6n&1fn@ z+S~R@2>$>)Ls#QF%^~c|kI`3LA7LYD0kZFQ@bEsXuRICJbuJ@QXf0+Lv8$1(6!sd7 znu^shw&ISZsiindnn@{5(3J$-17JoxzqYvJXr+I#8>T`@wpSen!~?FR5}MmxpTn^B zGzMAjH@k8gbY?sCSJs7)W76TuW=>D0PK@JWx4n_Tl`S^}U8lBI;D65iIb)I3Vxm`E zy}*u~{KbgyTxPUA&ju+8wwJ%C`6tyXk&&ROhDnc>93#TawoD|G%;~yAYCU`^I2qAY zOLD{QI}Oi=#hfc9}NT3X+fM9YU_Ff!(Con_JeKXMsAd zvI$y8F{alxxhJyv1CZ9}&P;I_DVaCgH?48*i;yj1PGAdMap}0pSFGb$n#9R8rzA~N zX?&>;yo4p!TeZPjkOrbj=1J*`E*nhOG)MsK7q$AYqJY|p$s?8Yw8hw*tauSc?Ayjlw+raG(l&XH)L}hOG|G^gdR~3<%uewC08F$n zi2B{qi>*rmAS7P>$)?&#k70toNcRo*b;;Iicbo-fZ;*y}PBm$LK-*M%(5=Fra7;Tz<9N9Lj z9aP4sDSEXfKFuBY^JFE}yyAjK*_A0PQi_5|u)W2tiuk~ziQd=R5Yd^qZGai;U`4g! zuaz93R80wq(GB7V4r`rATnk9*1e1L9&{!&u zA{Jqxxlp0cq|1jXxgX9x!9~OQ?ed>3{ zZ+nw$^y$*XYuFaCXQd~)eYZlEpvnjq0Yr-@dt2RyC5YPST6y7IDB3tEecGwYwh8;3>tl*QLbz~bUR@{aw{SENL zopPNTBlb%{CNGltDVJ^TCA-N-&1PkQ7udZ?G3TZTxVTIc<$Pt%3;v*n&CV2y?ESEa^rR z2N`>K<; z1v(Odp?d`t5pGuZk?zA1)_^y+a_>^qxJ5(AiGY%O06-k2ncQ$ka+sK|>K8I3vd~W5 zNnQ&3WLCI09YFk(l5F*Hce8spPg|M?nMPyfX>2m$B|=YKH?qp3GbWyCUv2(_-1f~$45u9%IQ zY2G=24-&07b{&OA;A)HC)o{>zKm_@v%HC^`hz+|9rlC@u?<1uwHXB2M8VOCt>K1Vq z2cE9GkQ_9Cy2m$b(xtIYp(U1up{zMr04_Dpi5)jlgo#QMT}(Lcynx{d?*So0r!^bw zMkIt|cJ3>j%xx(Vi-2;!M~^~r#W{)K6A~*huec|>N1u`b{Ba*7l1B!rO?4I5ydKpG^}E!3!fY7Z7^~syRi5{bMfDGFvdD zFotU~DzaoLV4^~tVV0W!0d$lhr!H=9-gLt2@fJi3qgz_t{{UVItPf`KRo}X@+Dfp^FTx>0eRDx1KxYUvjjj+>Sjv0QAXqyboh( zYkjQsEiQ4$5=l7@H#hT7%5F}gsyrD%sJ-&i2}`Yyz1IH#jv(rBiG#~-Y_2prCrTr0 z!$r?XTABkes^!+HkegdH%A1K&Epn$L`zQmbB+RxZa9Z~{lDNF&)Ai)3TYG`EkED&U zw(gmuE-AwvMLI9EBkK2~sFfao08f|46?tQk`R`qII9&{PXl0XK8PVL9=54A{DNR-1 zs<7@_dAF8AP{jC-#c|P~f>eaMRtF+d04Ztyv zFn%RY>>NeAD_1ENFZSg47LO){UHDp3lUsCv3R+|X>pdBaRsvMWv^E%oF z{{Wd=7plQ@>J(t683|z`543LQ^$?V@6)KwxvyW)1MH(`o zQ=(^DbwLr*vZ4c&>1_poNWIewbI#`bUwdXRRQaQ(l2dnY0e;YO^UFU)#Ws5lhYs2) zp@F-td@^J+&5mif?ich_a+g(U*4}PNQ{%kc9|=1Sy>V}69l(-YA(X46#R_{sc=#$TFx#NZH!`=3&Z?E7NqxM$A3X5|~A5yFwY|kZi z4jfBvQqbBRou~k9sr5MdzLj#ji5$9e#UgAyqB@2+aBui*-XTk}hyft|t9c826PGb@ z28%)ChXov~)MB*dYL{Be@qukcF3G%EEg?jtBwV)Un~`I2*ADFd6zO(*6NC(#?4j7@Bf8=|Gj z{yF@YZ>5Ei)5jZz@_6M(a_`isJ^bjdgegC4({CrQBXD}#mLu^f>OHQ2_FN;RfyKhZ zF7Tp7WC$779z)0xU!pDC$D3Vk!b^;(wyx?ZHsnZ4&zp6{nV8I!&z7w3erhpo4wrYG zf(sqO;4n;N0nGrCGZ)FkfzoZ3eJOW6WyUEyCZ^G{#WD9X+F8-_c}R+h5(76- z({tVEx%0jwu*&}cZHhi=-TB8fT$_x@ZPfK97wmAh(ZwEYYewsI8E63`A)wDromFu} z_$&l7HYCQ>4T4>4q^FslJe_;1>n0)?UB^E29Z#Gt*@O~hK_@PG1#l~E8#%028 z3Y*doyr7$YAC4mHB&g75_13IdWE2~~3@|ZfH24 zqpo>qYurfd8^g~(1uIWOOFIX)Y1HRq>N6fHGplAC(#uP3yOV1*7{{BDQl|8eFcGPx za{bie!A}(=nm9C`zk|MNP;50Mru4NTr=i*T+5YiJ!POWTc-=6pE}qcr%rR4x44;r<&)omqzl*L2HKs2tEeI6qQMSLQ1=>NxsLu zDIV*a{upXDn+xUIv+W>c5=!YwX|vvm3L@YHBH;f3eL#K*!5h(!*1ur4nfVolVsy*B zAiWiaoCs<(R+bINc!e!P+(;=O1mWMb3v&g${eDYNCyBq;*A>hUrPWRz%sD`<^}VY; zQ-#XO9H;)v{`_jUw5}gve`|eLp9&MsM4;CuL6XF2h?3KOG6->{ zw6LVvTW(N~Tzh*Hh(1;^;^2eg2(qs1^<1>@%PbB6iwy0UHwRwj9M-1HtyGgTqNwH6 zHW@-%PK4NB@!a5=NLdp%S)t7Vi{jGON+g=s0@({>I=;G;)4(3f{B7+?nllDY;C4@s zoGVnSXV(+Z{Yo?jL%va1mYKIcD9QLrxi8MA*9#_w78R& zjcB>G#+Nyalt>`BGWnbNYP}ZZjFA`U*z7%*axG19DtvOybrn^saT|BOW@UNid!}Hv zE(5GZX)UE>Y_&F{zj&N^3v2eRn_G(go{_{rGkw8s>})2%+(Vp8i-!V0KI*e<#vH~u z)fGm<pXfcdKa4xFXp*<&xskwF-V#xsdCiHUqgk!@>}qCflA$4)E2!*nNd85 z$FX)Z3KUpmH0>L`aN9`^mDyv^8_i_WE28#ebxTx1z=A~Puh{c^i zkfgp4Wy%(F;$Bb6S(*BSG+Km*W4d9ymYpl^9aCW|Qd9-pLuoz}u{O6c4YXC$Qqap$ z00qn!%n~&h$PvS>Pwb`{4Q?q-M^Z>+k84ec2E_0uAbUYpmw{~X!-c$H$=SwpYtBu< zulq7-b$Gqf7?75h;|oK~7xgGQl&C39fJ#ydMajoG$MG7>LWT6!3=VC{X}!h3ActDO zgJJ>fE43a>sAy+&k<4z{eU+XEH13iP^G`cME<Nk5(LT{&ia6q|4z*v?3`r%1sX?f^ogm)ICf3~Y!w;LGI03bh zHyuI!gr8G}UnS0oz}%51UAWi+0KdBl-aQFzZ%QKToh+m9z7f2Z0w&|iB%c-iPb?sn zh>MS>UY$v~>3)9i!tV9FmbmxK(kXR_bkr_e#W3#U&a_&5f^P z(E0jf3wjpAvq|naA1X^IOUl0_*pu)WVn+aTOK(CUE*tS%Tk|6-D^~mOHd_|;Qmj85 zFdMviru~`vCxgi0lBE5z_=&^ly_vg0<@NCshsm$Z$<;Wp>EgEyQl-AOtd^T)E)}h0 z>=F}mY+Uvy3ab9qrj$nQ*1e>i(_lKylvY1Ge#IcTxg8^=c~No{r=E2|D^Mys`Tnnu zT$vg;u379)LI_c$@2!=D>~|yh;RdO_DQ%|UjiQX;0BX{A2gkL(7-#K4EJ6lF3Mk0f zsYyQl{{TE|?L{a+&~#Z;YI9c#lEM#Bbc2lc(j35B_CW+>Qd>JLxxI#)pUWF8lzk|7$iECp4Ghw%QB0|QE8T2v9~^7EH?HAk#e1DdX~z0@(4=8 z4f+p}{BYY?+uW^`kj$stDnsu0#@!S7Vn9UEmGV@pf1}W1k*5G2Jgw@XKx|1op2eaMK>dtp&dG+00vCKLaQPcYoXFu9pamq^qZ=RL{ z;1gqTf~C^nJhnrPC8C>MN)`j$ZEu+v5~ij=2?}2g!)fGj4i>jh`X^+JwCm^vg*3_v zYzp^J__w&Dy^td#pF@V|PuVb}xTPS(~# za$RojI(>B~tv4XjqQY(F6C$3pOm)V>xA2evB${C8Mfa)#Mb6r_(H*1xaA z0n|d^?d2y)Gh7P@wUa`#S({3((cr?TyHRSQqXBDe&6!qb>T)KQjik+ zM5&||1OT9+%iKKim>~_{(?iv&Z))kED2N-|cwctsLw+5I&P3uYoWCDB@Xe@;sSMVL z@=U<-LSlOoWT`>M%13xzOywjh<3@sa)wI2Q40QvZa@=!B8Fc(S*FT0g{{XbK#jf7F zCmM9Mro@|OPin71n=y}RYE>w@oq9ss8y=jk{Bb^KW2{*I3b-)YV-9SP-#td27Wk{3 z!iwE4g_9!=@=Pb$X=(vUDH^PK5pm4pm2Dj?lGUCdkh}KG7+Ht0mLkK7pYoi#cvL-bv7F#A|0B`#DRXCc`c6!M*uAUBH9c@=)}fLogs+u5WLKs!*cW>5`m5n?R*FtJz8s z5J^6FKuP@hVh*N?MuWKX*?)mn+DhnT=Q)G7Rn~sWCV~8pdP6kE<2JX+vkof2+sQzY zw@i7nlvd03&DXm299IwJUCwn_5xXk|aM@KoUIywDqLE>xdyW49Lmc;Y-3yPFylMeT z4>vKUfsIp*F8qRkN&ReakkC!YNQ|_eaI%H?hgh${3hyY*&2LBSr&2)lwee!X=^}ae ztX1=7DWQ1!u+rEy;OhZ<5y#S$jV3$F{@-om*&m{TPMk8$1THgNDDqd)CA_^0{o zDbrcKlpk@qR~|va5bs{D92PgFH<_(@JV-5fdzo)ARD7JC)60Kk{nOQP{v6xj0#}8^ zyp1P7mYTH{N;f*9_w57kPl+S^UmPL$L{B`@S=(n15odW7UaaK%5sx^W*Z1z9JC8Bg zl-jBa!_I#vYP8(M*dNfqbq_p&>m7Pt^8Ji9{hp`|-e3-IOHKT;Bf(U03JIx=s-rRg z01&Y8%z3Uq{V@6e051*=@XvNK%O; z0urJd6kFYOS)D^g$3l?+k1}lZ!4a*$Fp%*nr4WRe4IRO@>SdpS2hZO&3T7jhaZ?x z{DynSU3u3|lc{bkO6AOwqJAdl1_5DV+@R*WoxF1=*R^{9`Xf6dx!KRNd_FH`+Bgn+mQ071(m z`($6Ib*v**tS+2Qzfj}NJl6JJ5ZXy%q} z=-s*iB$8|z0L+uw(cyb989A}srsiqQ3dm8a@bWUMQcW3}f`X;B1H4yom&mv-2J{75 z``>(hRl-G3ri@m&I&IOuTke-wZxPf^imO&461YKq&KKzV?@il*a&8Y@@gRp%_W&VJ zsBzDOkg1Ne+~+ea4(&<=0cQe;+xn?k_>5P!x@O0O)V=x!JJQ2kwINRIxp2hki#7I) z@^C)|Y(+CE*7$`!gI}Uo4PH%I5~+{0B)H5~C6^YZXh1#Y;k6WKbl50Z0lzJ$nxT|o z@ez=*()#tBucmej*c(vF7UM1z8`j_60`362D-&UB+UCOc9qLJXi&~w-ciIlpAkrc} z>oCk{s%>l~M^njhNK#t~xK@;*2qNUGer-uRStkDgyyMby%w=JpRmJo3QTTi&wjo`J&2@l1blBF>7jTN-n& z7Md(9lbn4dBmi%Iy>Kyi0jgLWQ8OiahyW-1yZrEt@|Ic=M<*btq`uHsL3^9~u!lI1 zZk7Pm1*V|tN}7dIb+8AC46K3i0@z^6FKeemIHp!%&ly82pszIN44svNcP6J}fv*h0Pk|piIHrC$g?yB{g2rTp=!`Zrn=0x3A!ssdSu6MY4FL zg6*ewc(>lNWz}&FTt2%LCtvzXJ0a1-qCXHbR%BTFzgA^w%LMrazLryOjwE8_>lJy; zfxWID3$00tLI}CjsQwBP%s)$a0^s+efy0h4<|PBMd+PWJQU3GqD&O72yd0KRyWf?A z`j5aN-;Tr%K%WH(SH74@>JBF=_I{@}Hwj%^Gq~lw>0&Ae$yk_!l4b+>%7S|uJ}xLn zZ4l9|@jrru>;C{tS7=I!EzvTs1~S&G>C;Y?HDG(*m875UVqzXj@aE?up6>8H8-Fw- z_L5B+8a!|6p^DeiH`@NXr;yhbTcHoyMIwPtj9b0!xQ?ZcN1D9xD-S2QI|t~uyIkY( zvSrxqNzm!p{;Cnoy{&si<`)cX&)Nrz&-c9Ab1_Mb-MrM6mV|-t7R22)%Hi?0@YAy| z-3QeO{9<<<*F2;2J`Hhmij=rHoy@^UtWss&Ov|ZPT$Ww2vFPEw`GRk7MgXhYIw~m) zJES;p4Y$ZE1un|h)UZpu(ANusU3-Jse+2%db}-D+6vAd^$|OGVWgtPNxaPvwxmuh2 zx?&oqlqzz?s&G>()ALI67_LW=FWAtPL=0(P zcBS-LBw0S}OKjHF$B0f~+}$8wTk*F*=~eBf*`tR<**EvMjkLE5^p#FrtkfLay-ET7 z*1-Jli|D6dH&oF($<+bvvt1`!@*F$!Ss#`a?AkO78uCtS;omH7g(&*m{&?2&Mv3s( zTSPKZAQBU$BbLBpMeQkJk3<|%TJEa_6TiO>fszgS-D=VbmXrVi=sdBGa2c$Lmo^D* z5L0g!)Cnh%J#aMb-fhA?g3hr=ZKRZX{IEDVWSk?VBE9x<$s~jwF(aF z^Wu-P@(=*;5(T!~3r8{F8?A(dgJ7*j+_$z8;0~!xlmTLgPk7)+OHVBGxJfu+&JEO} znY`g(4}v<}c|ySv)`fDp7Pr3)K6Y^^w$jOLf_2d{wR!0==R8(eX&^QMxBqk&(2%I}^lyDcG!$0icrzEx=CYq=0@tW84k^ZYcBbDaYV&uzqNGd{KrdjYV(; zB`Z#$eISExUpyuCCE%u`gV~(=ROLcV79<7`^j2gnD)!Z-S3Y;eQnTAR=X9$~Qa5(V zxth~Pl-!0HWwiZ*hyfPWV{yv{Q5hRz?vu%|gW`VGgv}7Y?&ai_(+?#~TGE=d>LbM1B4cC=^s>i< z;8#AWqWhuUh|^7nyzcLoAKuH)MBJ=!&Zy$z)v1#y^)Bi{RNC5fa0B$UmB_jh|;Xe4v};er_GCC#SEiQ%rM zC)zULbcEYts5|Dw#LkG}aY`#l?-RH?Tc7LG5^kliCAK+s62+4vDrw{Pk;=j^ZGRA?Uu%le(A1a(IySpz z#9MBgei*HooNiiIM0D}AhRDq_%d4|5BvQ3dfa}qHo^$bRN{U@%5C~5zrpK?x1<_MI ztvk)V`qX|cT??Gu$t`Y?ai>+1?ovm612R+}ZkJYD2#(7p=vhX$3YJr-Y8MUI*z!0V z5j&xgw(Z`zI`A2FYt){?1D^9Eto_k!v)5fk$BK!TOme5HdDf2H>sb-eMR_ot*r6IYD1>6!g)|;P8+Z4*$ zX+5EFXMy+?)iCOqOhJs6_x+#`n7Wndh1qi8qC9#`#0d#lEK3S0vAx1n2dKqrtOm4~ z5;EcymL@&JKu1x(z$toFcQDcEd8VR7sOy&{xT(INki>Scq@;qC4Q{CFzdpFH$3+Z= zD1Zbs6W-(Fpiu%4CoVbejJk>A;1wmS=PHbf`-@YJTT#?XcfI-$NXJ;D!Wh^xRZ5Mi zlvMyNVb$@?P4rB{iL~@I>n$xON>2M&+vSRtT?uRM7evGH5i3C<6wQ+9_hPk*#=;3|>ANdYIIxoA z%zg{Ey_ZXn@&sirS9})XJDZCiL%8MTk0kPf-Rk3EqOEd`D3Y{x1#2l&6zbj!Y^@jA zr*DPtVlhLK3jkL)o;v}Zk=mTC2y7wul5cIzw)`-bL42&6q-RR399qp1sSl}a&h7~U z%DEfu*5Ah$t)0T#CPXfqvb5ptxffMpZxn0pJhaJnJxQC8q^%9e1bdD4-`|dvO|vkO zc8$_|Lg+rioueQ@>Le#_NY7N-oklw^P+=xQ+yr+`t+jIo`gwVDz__jk_@gclHc*(X z`^+|Z?^dmKs(59x)6ccdSOe*!C#+;VbW~lX z+w3Ne@k=N#{g!i&VFMxFR~tT%y!ys=;#OQJ&+{|%I%O!trY$td(c>_}-R~A#eruiV zjZOpNODwhmQW6znwR)eDEE)KHE8=n9B=d-PcRJRv0Av8S>k>`)WW3SJ7AXZ#WU)pg z2KE8AB!1I~$dkYUkkpc@Rz#&aBO~z+wwTIr$9@c3n*sxge2@vX@1;9mdkcDA@v5*Y z;EMW(9}VB2p!Q;)dXBDJd!eJmoBTYLoa`0ZnZDG0tT;x;nai1=!dTnjg`qRj)K zDW8f>qquR zJ%jlx>~lm@{rv0myf_6a1lY%?a z?JYF$UCgxr0VB`9UOOS!56nh zjznMaRZ3aqWl3&fq>ytvqV2uK+y#Zk=;3~#q4{+Psp0ablOOEa4WywZ!s62*7Eo4{ z-(!EhzWZNnBAyG?7IpS;?^jGm5hD*|!o&vl<{Sm@aC5&LjkCRoPi(PVs(XH3mdu9| zw&lT7t*SGHB&`cY&6+8#Bd|)hiVsS=M^t&#!dhNbIRc7 zd$(M*BwG0{^X4C<1F_8nJKu5pEY{YI)S4_Bg-GAgQRArx{8)gCWHA{t5fR!z^()GT zX7u3R9*TpOS-f zC(|L4o76I|XZ+<+BXqe{%@JaPIueM{DqgV^(74^AmOX6-*1wxd7F*7t2L?B=FTYRoYGG* zI=?z#KJe?S7@UI$YJ0`v~GwF01NA+{{u`Zid-G3L~k)4LAnHY0{EX zWV+=@A}^HMY$y; z9=$LYCrJI<$Dda5Z-uR&dX&N{qu#Wqbad_q-1HE5b_Hb^&-@^z!7FyLSFcGa-MVgA zjSo@W?xL2*<4bFOKDt5?RPvQ

AKeMv&U0Ku(c?uM$2tSa!JK|5y9x{r1q1<*E^)%{zXnvV|A3Vh+&Ym zqlQ-V1xj#xP4UVhvpi6&l1ToRp8;dzc=&^tV=%XC0PZdy=2Fq^b$H9#p9cQ`@k}l* zai@vAOrg{0+1od{m}#uOA8D^(ioim`R4z*`qWdXZA3sf`@J*efqL9f=9_b?5lj=%~ z#j%>2u+~oP;4{z+`+~2&#D$GWMYifOVKWd^duhSYEksQ#9ucRY{{UQU&2*7Zx^HA< z3@dxy{{XG<)17jN5t4*4Tq#i^_Z^dyJm8r>ew21Bm z5&#R>^4npAw-Atv-8nwZKpnaP?^-vP8DN5j8VfvviL$-d#yY&a-dbkQ2=m>~ui#V0 zjUmW7Sv2$piL!za0X;Vd_#c)ct0Ldb9Q#$b1*~)yjnRv7%jfVRNs%K{B#3TAj{G;& zE>f1wm%mhtkJa&Cagn6&8;`Clr$G%gn~#a5u039%Tb!{8Nu4JVS9tM z{#fS~l74WN)@eX71{}x((J4JbL$2y#DiY$&$`j!v+j0T0zqs#)#Z`wm=$ew3JG3PE z)EbKcppno5)2O)}HsnX+f+4Dm-{7MvD5bF6@3_DEAk!YA(@1SCDQXrPbrq-o08sba z8!llwtu_f-EJl5|QTA(_A*lf_qe6pr~@??$n7~{sS6rFHslqy-uq&+ zR@)m8x;7ckU~q5;h~e<~CH~?OK<^he3e0@9m`RJF zNYKmFYOGYq)?B*|mRDiO*dKp97aeH1613Z_T}8=+ynyOi1@C;0>GDbmt!C(2+nR9T(nGWXK1Qnd~}kBUxtUX=p>wzmGdTFa!}U$4Uj)) zzu|tpG+7jr46to zcU*Xd7VJ)y(vm=TP;qO#2p92EEftKWGQDHGW_1!whl13lByJLtKl2^+VivLWKHa&8 z0Ic|RJrA*anhbl|+-KsVS($Cwa*MR=!qoI%!r59BIp1)1{BdND5Djglj@~MfSF?|n zP3n4$@z8949|D|FsnY4`jKrv^ErV@o3QfJk#lOb@$unfRX;TnT%?J#UqTM3n!(UYE zpv_!q(cETSmTa(83P~R{n~#1YDHpAagv$TZ4(brvNQuw7G2 zvz971`=kOk1LfABhNL{;Z`!rU=TqEZXd*q?xU!qm&9pz&yQlvOna_dcZYUk9Z z#2?2NhS1FuEfj=x6&Tv;N{9Y_08dS`QiT>iOq3#)9Ft8cDN#b4)CTwKVh7I^>Zer; zgPOPDv{bIKk1rI{&pB=N<=SK_U3!@}y^|taiBp})QkJ_7m9el$*dDl(!YkS*bEIJw z0Cec9`0fic;VgyPl63<2^|FsM#BA9_%QU9d0%FMKq_n4-O~*1(aB(q%x|&*t8-d;j zl7QORVicHsZH=YGYhXyXUhUK=3o)tJ8WQ3rLX><(Bop!$BHp;U$%lb*!*8yY$0@d; zxQ5QlpRO4n5T@*lSc>Gx&055)Cqlqg$M#e%IrF|Mc+C^HOSN5ZW$}32cXT}mBVRgM>w^R}m>J_*ON{2t5H5N;B(*UTiXem#}c|j0faYFY{Nl5qe-{p>}u~7#b z7g=@Pk&w14jMWvWRpSTNWNJdeJGzl@Z_to`Ja)=zXIn2MULlGANN}>6I8`+Zp9r?J zEhBKfO5f$Ea`eOo(ZOiV!l_hNZxR~pm8*qH&)L=WM9LXUF=hoJR#M|cON=sZ2B%*- z5a0Kd4*2I(m@A$`fXL``1GwmuCeY#Z4c@Gotvwg#9>$BGD=6BtSm91spDr$Y;?*v{ znIovB?7xsuinPXD9NLpT!kAH%IS>}=hwhWMIt4Do7{GO7#ABf5?*Z|$t5kBKQBVtT zYEt(8+?>|<49|${O&Oh`D)5{!_+OK9w+c`$5muIdzpo2zsWRpx*`q73*hbr;u{8{M zbs*lRgMHt${Kxc*4-%=h(c*RaVs%b)PwF$6-z?3#?xH#NuGieo`Kq&;b9Qea16Z!q zS5ue<`z6Qjrqj;IXrUvOhb(Tp9IUEuvzgrA*#mXH11}^6HVI2i0D*v8j;F-_3V?R9 zn40cnr^;N0B`t{k%2bAQAH8xEE=~P$*10opvW3k8*EiRxRJQU4`zX&5bz8FLVye)o zHR?@0##IKOrWYlb5Dulat3e95P`0&PZV0u(AQ91Ps3V?MO6%`3_BZLZ{W_09RD2^C znvyvg5$3!LgVmVm`rucnZ=kb*NTKk9h#jG8bcSP9Gi5~5X^wMO)YSD7;?hpwJ20$z zh{(s3d0)lj#CDD>9mjeKRvBd+EF0|{4|7=G=Oc+iwo@#IP}>pA9c9+uJ;KAebz_Y( zUsh6;4Yl`APIVE17Us2Euj<(G^QvDg&Zv{#aV|ZlGNqlHx`)F>;j_1fbCbkQ*nOn4 zI+my7jM*}k5EJ(z!YPE(N08GPZR&CL7m_?Jv|BSnPBr#n$F=u)^Ky;a7k_$lKVSy< z0r9%32z|s|AtK`2^~XnT)IFBzlX3kTf%6BRGkb#7l8rj8DxESU`gF+NW6T2wg@cL$yuo;c);#~`~-M0AdXMHO6-Ov&LpuW1Axf;g+q#LU$x@xq+pDODM!3Yl9ek_Qi3$xdgG2@_vE7$ahc2KLTu21p zP$*G58-fjpQbb5;am*43u*&L6nehVW#=&l5np)=&NH)tNEIRFbTKXvcLbWp_R_Tny zisOtqF^~$M)|02a0Sj#fYt$PggUIc3dj*FvwLp?{-O}BL;G2DDr9qxb3$P2fd|2-S zO@S66yWBJ5oq;JTo+3S#McfeH16c74}?c- ze_7@D2hRI-$47gkh=#qu6QH(dxMsYV$4MYAV-1HBknbkL%@vVehfhkit}R+&NKiyI zskRzDbv)vAF5mTNxAn26Z%LRNqpx28Y&*^Rk{u%&GdstiY2roCaJO1!$;?%Um90{t zN4X>=4V74O+g9fi4T+ryCsasHu901+PFb?56I5@14U_yZ=nWbK)8jl)A6QEoQYO`u zEo*yH)2rWNFsrwXycTYer>abOg8q^j2v9d9lBFx+Z-*N_vL!TN4z+>|LRqlnEz27< zKqU%Wo1+L}Rv}HI{;BKB4H}Mwu}<4)f^rp-r6{;a^|1I1Y*g?IA4mW|sK0QnYgYHL zu*1NPDIu=1(6XfxRDwmVxxNx4wu!+3JTgaFPK`Q3fgVQ`H}q;ck(5%EqK^!U7H(5n;}Y%+VpcdzR9N7>8L zmY->8p~q560+8>B4={YaFw?7=Ij2N?TRAZv)tR_ukys!s`~nU39%T=W|PO}G=0%Z2Wwno*5Yh$HuGt+axbprkKNzj*{) z4gJ`Gs1A(Xbo?S%YT@>I5Hcep&dV?eaKK#pE9Nx*C0n zrFL+;aP)BSIe1wF-c6}QOUZtona~w-y#na}013UnR>O|PBYlfqx=>JTjYJo0k%z{0 z;yf*J=hT-VNM3*<9PDlWurCHN?pAA6SeAp4XDpXGrX1CKP=t;pvzTGt5kZo!tqV}{n?O~^q> zDrj4tlj72EexrVPVbRFPT=DwuTM{sYQe<>Fp*CUU18Wg-Y&m({d+t$C2Uzt*G_>2tJHR9lo^&=%y;c#og?$f-Fh_Vz_qPmuo zCL<3weTv4FU(smW<~PE=#P;8G=k>s)e`-SO2LbWFXv@iLy6fyMUdcI@m;%}#QBn_( z*J}@ru#rI54tq)L*TFb(XEgq8;@x*6#@QtKhNDn-y)fl9Ym)3_A#JF0Ch6z+Vd$91 z#jX!+?t`k5Y2TYjJuR^s->mv184=t>X=q{QlKPTB4qXb{ru!a$mJf`AMaEr98lnfa zj`nWn9-D*bm6MYsW;*3GI1I;GT6P`f?Qez0pHYasYYm0=-5Dvpn8#|F8EmIcpY*A?Er_Lp02Ut)`oAoy4em|BOj|wdV*}RU_e7i*12@}E{)((B?HQH8DtT>Go zr|8e94+Q4^&i?@0@ekKb64~ma7jrjFc?v%2qZ|X?|^CNNja>Q)7)jX2gZfm;2nS*Ak>EZtFLz{2aKk7aT8F`7&XRQ?q zYNM}Fy42A@9JwA}mt0zbW7*6%T;f=2Aef2bwz=4kJjbGTQ|fZtQhlCW)|((GC1})= zeGSdNc#EEugk8WDXQRZWY2AtL1E7nAAy6vM%ap*G1{A6;2|Jn0D4aH!6(;Zkr~OEl93fkZagZh z01%?l8q!Bk3FmJyh$rH3NbZ8I7<_U>-O*#@quko$#*+!BV^p9m9uk!xl$(o?d2PR3 zOyR+%NEPfCtIt}6qss-CB}kN_uEpEE&f~e!suw@_QX6U@mx|*vbE8> z;qp=Ql}2W`g;uGQx|kz%Jiyh@_r;VsbG|Gsq%0qqac^Px8-+orua05`WKDl-N2t%u zOZfvXWqOpzN<}`k1=TGk?7kM0 zyEnnuD5T|Dd^TKp)Pz_mX|lc}{I8EJ?IVn}PI2P7w$?w|RTnE2Nny`tTct+V+>vjN z`{KJO>Qox3Io1g-w1dRrVb;;R7G(BN_4kSjZBb|q2h6rEAap*AD%kYSP)}w!a@SG z8*7eWTkE}9UKk@RyySTf!Ho%e3WDRlP(9jK6X)cAEL3)dl?`bD*SAf+3bQ=H&*o~S z8nDaCnAJiZdR0E6fCF-wacsJi(1)8TP__K*KRRV4Wo!p_JC4iS!$oAL!Q-8qwGg%A ztOM!_D>FUuDx}z^)bo4}AkosDD2bV1LasCtc_k)mOK=Wfbz`5q6LD<=+BxGs%OCLB z15wG&3nw^4V<(Fo=J^6gl16?OMv#KtcY+xzDJI?SU7tRPwVtX=DRKD3?tkV$^yP_ zIeJs3-*pJ8G|Ng?qFy-bTNSmC!V6s+$mwVg@@d}x01Be*rUPF3c9|e^EtSoSPhkYA z;R~^6YRMI<6$U>8v+|Wys79tlt3!yIg)z6DaV??6&4{O%OGzsVD=Py^PWZo7@|A(X z3+Ixdqi`1%o3-`=_6>MxBr`w3M^4_`>3#Y>qd0kZEy;cfA0bB#~Fxk6V?ycnmw-p!+VPGQO0QVKibz6 zmpK4VR%EI=@fNnV%5h|ibh@$in`32TuzFI@ciG3}&^`(y9e(|6uNoH<rrw0Lpg8f)~> zBceop{51QGs{YR*j_{oYMNXAnM3m_`;~0x~F=^T#&bT?)+Tb27b*vXHT?l9dHE@{Nf}xKSjY zXO<&obdtzpb+RCj4vsBUH8u-N6UmLb=0zFFo;xi?NoRII%X$LTTZ zI)y<^NiH~+T1Zhz2Ksj--rsOo^*fvd>bYFuBZs;O%H*CSER@4&so{XY*KZkI)0k)j zffoeao{~C~ZiSTOihEgkEkdkLWtk~G%<6lNI+M8W*^-^fvOe!y-uqy-;w>^6?ph7F zIB?=dia!&>93qbouyfnJz&iQdSP(VT3)me_i=WkL^}6DbCaqMPQ&{_|dDSnw{>bE? zZ~$YcK*-`7v496x$EA4|aV(Z|+)r=TUC9Z`dzRfwRqb#uz6otg9n@sz5k)}&-$^Ht zC-%KBhd$6#PNo2?rWicXq7PnJ)2BAoEJMH{Qvle2(ni|@<%a#KIi-q2#4Y`aV4wtC z5G{-{Sgrk(<&qFn-0=XDtXpez#*CdQ(MaoLw?!dJP*QG`^)}>ve6W*IVPyPxYo0~4 zOYP++<*>5TN-G1xPgB%jQ;3Ef)btxCEYX)n6H)pLv0qazH>GJdAzKmWiAGk3HseKH zp9+#G3~m-9r5|R4tlLV7*p0svhM!nKx;~!?bJ2rC0BR_5+w6H9HfrywL>v@>auBU$ z_Z?P-8+mKF0Vn|ey5SD31K!a)4$sK*%>h!VOYFm#OP$aKybt;$m!M06qL$XH7Hj;n^ApyHK>xbY{}%c<@6%OABQZ)PN8{33(PNJMU$D z_Bd)fn;pPWRTx-we%jd`2+Vk-*5WwY%3HID9PT;o*W_?ho-J~s*-r3m+`@3ul;l@K zjlQUA^HK1td+cqu#{g8cI9TSa_&kn<;mtdT%g5%Ws8DFN_R`&Qh?kwUi|IZjl6^NH z9kE!bo?3Ig3;UO2qr>QNCI(kP3%_fP`?L5a#dC!-q9y56Lgh*z1wjf;oKK+We*^Et zl^)Wu{Mosur-G*Aypc}`bhHQBH(MN?+0c8eds8u*9Ohb5-LM9U^eZAAiqraoMJ>s+_ zw;j36EgHZ*<9vqP_t^Y?c!qPg2`u2Rus~*OMD028A6g%*)82`e(#Lr7MH2x|>PR6; z9PiHH`tOLk+(er+NPl~9Vba3Tc-a2AAPYN_@o+Uo@~wIl2LV;O`tgy#-=i~ zf>NsuN%i@i{{YhiHS9S*A`_v%$qmO?as#RRh}B|+Uz%<~>B&6s<4c=zGY9K5 z+|6m%R6lnqNGXt8ePCRbB!Y=fYQ0s4 z$`0`6_FSSfGXDT_-*zXMPmmR7Q*@Khnv-G5_v^m;?veRru9OM zxNA#qBA}NNuzjVN$4F7=6xm(B*Ah@v?`Fa+&`-|F4ziQk^JE71_k(i3^6yzSpMxpK zROAYqvYk7Upbnr1B1YHg$YM%48U>!)1zw&h3B5eK$Ge8SJGde-S1HLNJof#K!YqXe z0lSNGCj0g2f*!_Od71g4DW{RrZd47+>CU&0`dwrSvk$nqNs@%Oa9y&{rpV@XfC2Tm z^}VpM98PH8ovMLwTu-NEbE z<%ZirBR7nZ*nR~P{CLzea-ocHIdg_z)ZCAkl6+8T)I~yh)?bxK)D4?iFEXxQbgYqY zPI!Q>sh8SVfa`J4_u9o?u$(%W#dyh#9t4*+`AH;e!8K-ODvDGbkL=Rf)Id~YV0^AO z{4I$X@i@*;$o2fxC05K%4cJ46?&ab$@*y^yk2WHb+<@y%6J#&EDoH*^Yv14Hi6%Xd z7izA_1kjz?G8f3+_wBZ0yxBQp)TYx(egl(fk0AgDW4xp&0sUm#({5Yf2`L!-pJF-) zQ?b{+7Ttih+(9G2g#M&)3b5;XYi-jjk;t$zTSlb~xu^lmb;J#30~<}@C8w+CtBx18 z61}^%7I5pe{vA4}3Yf`L*RDl6PF;|zS;h{e8}$Gawg3;SW{%$-YGv_Z^BcXphomLS zM-O99SAUmt9U7{kxM@ykSUy)EAD#rM#$3Qe(UI1lrokgF-hunUBG+=BVW!Da<GEGJlG!~OBmcg(us5lS>w$^WP-lkHGaokd;JH4pCqZ+)@O_O`A82{B}E z(yeA%x@uPmopZx0A0^b>idtqPFqagh0#Z~C)*g1-%N~Bk8F!Zhs^oa-E|AG}VB*13 zbfDl$`?3j2ib93G{{Rd{zMa5c%7|(jW!<`q^B4;b1U}M|Ii~t?xJ8wciMHUmt;giQ z_0#V?NqNRWP2571n;&{e-=92o{fD=*uy%b5sME2(WE@pnL|9KrON~^juwK5wXiCNT zi~Z6^G1udctrV{fv1mQ#{S_w?g}1mQc732f-FL9*I0E=sNO>hy0$`r<}P+g7U$>l#8oV<;bIq3qz>KBB%4f~ zQKmU`%6UW`^%9*U;s~&}y9=p`IR%JPt&o9Znv!8=s+&!Y@7ha5TIjv`9fgg$dj9|n zR;I%vzgk1u!=01bkDHr$r-XuqfStpQZk@XAVgCReMoEXbJ0%ZW5G7PD(zTjQsL%tD zT9pv3Nmq2?0Y=vwU(2nr)pf<8I9 z*O9|6V>;KEw(Az7ab*=KcJ=P!cmbdXT=hN6(af+Kr@>$zdfiv4JAVv$yBVnVcpVJ` zPrPK6-DSiyU$`GN&n$P`?!n-zw;hzzlv)!@UYkx2Tz<}yH^o90fCaVZd_HM29Iw{A zmv+&VvF z;x!lyLMBHeV!JF{=07aPc5_ZgEP=4G=~I8~vua}#hY(H0z_{j{XV7h9(a@=+p3hM# zYd^?7IPg0;Enjjpx|8{SV*2R~RGMkBt8@;L>yEz7v2ML%O| zZ)lty;y({G)UNh*tZ|ZLy`n(X*N?%cpOKeLaagej98;^dv}?b=^2j@qns>k z?7LN;;dXM%^z4xfEY8r)VHHUaCtGZ{-?ZWpr6I+UsaNkjDmDP^%-^4F4jnh!O-lvZ zS_c!DzW^6`#IFh(R*453>^CbecjV*b!TCf{YHt+6-=u$)oD(*E+l8xsZJ(3w53V`YDgnMRqUl3 zDMIGgz69QIq`wiCV?BFw9jaz{8gXcxGO_198bdeEs7BGQFhQ>T+F^sSVx(fbUN;Kz#|w7Ck1 zXELV|wuO|rVY*edw)p-B0nJz#Y*gMKv!*zOJ#1fwv^EuC@2*Gt6cQt zMiKVy;2m^$*;hp3#{vlH;g$Txo*WQspgP}p+sRTNPT-_o>E<@;jBRD}2~up+nY_n~ zICtg22@VuhcZIN*oJjGq2i{LbC-;ZD{c)1L+;r(rQBcHpw{tDjTT#+FLSD<$%d1mhqe59^>wJ|)6+?)s-Ec)9V70&_dVK%C&b2B_r_sz zvxbT)vrPVLzk5#{(}58dkYcVfPQ^%Ykf80hxK|^~r(Spny8PgKm6|$Y2eET{NI4LD z^f~@3MAQ5CP^ix_8bIFVHs$CC<}Hje*jd8iDVT7P;Kbn9AY7t5AxST~3V=6KH&4HB zTrIUga7;^pzDHhHXT*&=e0q?7$=93il-sSfDJj*gq>!(}kq6<0eXt!x!Av)1p34@H z9yr+8`CNm3(a@v}u~TY5FeWG1<)u1eJE>s?$@hy~k6vRC%&(FcX0CNO1wCYZ;jbHdjZ|e>$n3*X z;=BVbkU(9E&_^&%`=`u}$F3|ixX$?AZhlLf;=2zNWX45->bUaMb&STTll3I(Jf)Zk zTc{vwj#L)gp!2^Vq@S4e<%w}t$y?9y;;Cl>sG*daesJR5LxCCf7M=%D){jSnPnyw0 zMnq{qo5OVYlja*(u|DIt^1$z!Mmp{{9V6m6DH>`TYK9g>+dO`u;D2?(#OJ_^q|-`N zH@oE}wZzq89R}X#EN-7GkHZ8Cfacqe)`~RX?uJ1j0J8cU{{V{Gc8?_Fg0)iKX>nBS zXOn-iQ-p#q`wHqX49xa|&dhyk6`qDuOyKB=YlrIJ?w`X&CeSl-q=1I>Jm4beQao0@ zi6B@L>IcIW%tr_VsdVfEY8yDP%=B@$W>@VQ^(TDz6l}{C$nsJlN>Btig7qUJ@kl)y@@1%o&32QpN=P?$33QP$f`|7#WpK8 zOSz-#hJ+@JjJw}1@Z=_0M!~lf66zDnNhg0hUlOdY0CUB!$ARlrDy^E|b?M!`ZU-j; z(bL&B{QiZYC8!r z2ntudv%gN4g9OVZ%^bcU&xdlRsu`jrNhc_X?P{o+ihb>w@dQ}r(6#tj5&|5iZ&rA z$eZ~ml_n~ZvXl_Rg*FKxB#&Dv-F&at5!1UY^HMO&V{7K0aay6uV5X-UZ+bwdv?3?WLu#v?+?`;UQm>f#ELZ3)R_d8AxiF?Q1s@5@ksaKkL^aAws!8H z5#fka6ge(f8mmwXq^O?vy}Ejg;6vN(aP|@FQ=1q$uXg zd8ah%OJ3WA*Vx`QcH_-Rq2xfkYX|7II<9rlpm;-qX+>J|G)Xn{h;K z%v;Nqx?7gmmX1+sKQ8%@Q*@bpbh1nzF* ztl`j2{eRVBPD5pS9Ee}1x@^C&SWe&By}m?pvGc>xVy+oGoI3t$3abNQ=S6lqsQf-j zyZnmG@E?-vQdLuFTb7nPvaR{=bSu{PwXr4rr%xaO!pEoUFrgWQ*TDBVj^77{htA;l z-92d(f|!bN+HA@^(t)nZE#G0e8=&9R_2r04$(rZn;Nj!=t8N(~_GGz+HxZu?z@&wcxZS1aZWqg6#0IngVW8nIANjaP=a&MXitQilRcf%Lu`d*gd|q9eITVOX4V zd#(>{`avE@XR^!*WP;kEs1j^|ZGN{w-{J_i1#N7M7CEWeaO!z<=JsiSuCzfpTAd?(i2lm@nbg~baHV2UF%L_P*Nw7PncOn@}BzXdR4bqeqzZGh6=$+%5N`r_u2i!rpJ9QWmb~V}+FF{cw z4cy%NN7ejOrWSavLUFW2ohY`FoA-$hAvQjI$1F$5g2M540q9h#wt?+Cx67`_{S~Cj zd4;;HP~Ad>L#h>kORYs6;TIc_6?=|%wkXx~CMRAexOEbpnaz3L-m+Mq27uyTjK$+z z1EmW99-tP^=kT}VfMJ?&Lymu4Dn^o1mWmb3xlSvRbwZxgY69lupeEezMaQ7u%NA-Z zNx{7Kqh`ZIoZ*s;vqcKE8K#|WE;xX07TF6P^KHK@PT{qXmlnA|V>C4(fzj~jQ5joJ z%u1$1LLN!CcoJGs-}jN%7N7}dG1+$?RnHt6*{>r7#~ToQLcHCXE0Y+W@?+FwmdX+e zTvvH>euPu>8(``zOqLutX2jzTTU5DlQnJwG?zco>-uXC^MW6$e? zv9!8|LzzWVlO^h5AP==`yV7^&;x0C~QabzbQLlorj?;VVxTs1QA~K5$CQRJA?B|?* zlTu=#;#0z>5P5K-%$_XVun{E74GdY7N z8+a{|#T<SmWLOnjWByp{dl!S$Q|B$M+ry5R;HZ0NwGm@Sm&I2e zrjnYi^ly+!>`7oFF_AiTT$JtF=7(C>Rf~-6Jt8PkZDu$sAweeB9LMFh0Hw&?2|Kjt zTweN2xJ_Hkye7+BW6JdSvTM|7Y{r&{Y!f03g$CT_dA9OfU%6}Z$9&`uXdwgL2dLZU>ZIiH4~WmUn04ID?s$d~VIMX{ zjamm`wyBLRew!$J@f>iD?}Fy@uXNwGW1L zN?YPZ9n8;1a@PhOO~&kf!=?0K;#{h0{(f2w(i66GcK zYV#P(Oszr1{cBB&A;$;^7xs?nE`w&$E*1^1zUL8!1~>FTv-n&<@V&U-O;p+0=Ez8? zceMxh*vB904Qqk0weNnk^n)$ITi|g4OJDSjgw#7X9aKBu zS&u6iE@9n>gt>C%L^V=WDLiDgAL{%rZRi9Jx@+W!E(Z{Ed%nyr#IbQ_&} zN6dOIX^G&IQOw!g+cx39!t zX2j+3`r@qASZgXbO?O^vEmtpxI#XM^{4V>9c@0wvrsUQ#l1A7Z?an9D;t|Dmhqe}hKd8&UO*vPR; zZQ$N`)6=8lnu?vtv!fxjKMm%>7rKqYcI2z_zd||r;?~?%j^{Mq>y*i|6GQ`KX%;

1j40z&W2XKSKe79OUkWC}ftEwvgr1^F^PR9;5?rgt&vaK^ETMc_eklM-!tw z-BFTC$=ka8#=nizQlXNB){`QP^4(Y(gJfA^-E28~$Db@n)8iYCw4qip4UZEuewEpd zAL*yoNG=Ey%==hJSZal*+Z z`1KlcP>K2-ZO{;xS9PJm%G&LzKiN~+9G#e%slnC-U5vW{`zMxfotG!Sk%+!Wk%+X<3T)4X-*A0CuV9SfyrvuGQivf+Dl zTE^p~5tmm4X_|X%yo4=owIiBtcRocc7Qee=>4u*%Bcvjr!J*^rgJ|#ayoP)v}!xkvL<47n3mN# zdoIFJ2nu;Z0EIw$zr>!K?YO?!iKxR{AtjBox|N~~d{eXTu)Sd&i15<^S zE0LI}(&}zB@)T51ile{hK z-U;Ya9S)q!sP7_$wp5X)T7gQ_C{yrrnM}Q#KYR z!C!XWJnh$P0N@?PK4vV`WR~N#O_UNAhhxigz=zsZ)~oce-Cp7kE=PPa;Ujd+4hfkh zg~sD2bd0{c(uydVk~DNlg7X#lgk3$_09u9lfwEQ4en$<8m>liLjl<-a->8qA2zz^h z{7-^CO_NKW;ez21r(aKS@F=nV_USkGVG~6o8@@AdO4K-%PbH^bg!E>X`)(ltg`wgA z@j0ZC{Z)(k5;^;=hLWZ(tG$JB4kfUmG2^7Q}SH zkI3c|2Cm}CQrXH8D-qJpT5W^4Ess?R`^vt&}7*w%0AUB&8%IrASJHLd2y>a0>u^ zYz?`a+XqwPfcv>3V8qBA-tUnK2-4~CR*_DcTbQ60chXWt$Oga#4pvDW4)!=Z?Gm_N zL6VVWW1dg683dH|If=+=E-~CnO01V!X(L${8kcRaa0c4Cdf=GnEH`Pm`sw@;LO=l{ zieT4itsiNKnC?k%izvF#ZT*JZsGVPTn73bcC+H9A4zl$PV!472TR zTg20-?$*+eJt`vGTwBpCufIN* z!r3dEnam*Ssu=D&r?2YDN6Neu<1S?ks*94U&qdaBy(xr6jHG_|eECWU_iyQqNm)t{ zII3PFwp5hD(#$W=ImgQC!(~py_2ss;=FTG1V7OEiS)<2P#cBQ0x4X0?@5}{!a2(jo zZv7iC_0*`J^B}{g*gRmfK3Iw7Q;gm^UugBn33H$l5yrv~%ch z=ZL9lp5uKLdb^aYUUn9;K*9T>$Hd(DSx)qNH8zD$Zk0fT7cbJ1N|P1?DT>AVoehM7 zd2VibUie}6c5Wn{6)O(tA=RoH^hoiaonq#@_uN9mde0! zYo^Re{nC|dW05vJ>OsA%FmbOI-g9&D_03;!j#z13W>v=Kb+?>6GAg;|-W6udgqDtW z$+?n=6-pW$p3S1Tt8{rKO`b<>N1rSle3M-{ zT8$|>y@*MoQQ!nL;#7b;eo{IB9C`k)(kg!K9 z1*Gmz__0c?#3FOZY5MFaNoq`I+21gZPh;B<66siZ2q6TGU=he0@6+FZb{L9H-o-0R zHM0sD%?;EbxLsjxApmOy4uXO@jSA)PII__OJZI>SeRJDw)A3O)cQw@zw5YKe$pDZC ze5O`MrpK42EEHH&Zp!&~T|we|O*3?8C$zNpWPK7`9Kgjz#3rEIY)Dr&p8)zBC;@Z5 zy>LYI4~$)zfzXDc$0DC8Y}XzRx2m@s6N^X>IU$!AjH6*=a8tzh|(<`w$CNwzb8PfWz}dvh?W^<07Fl zjH;y}DkxN>GF(7CY@n09x&wSmO#?~xalX#zy01x1Jm+d{e1HzipORvmWNXJfz`cI?9v%^^5u9%^nQajnj7ywIdb*iMUyzt?~IUr?s3WG={3#qQ5zfCh1&?q+q|apRC0$7gWfD^2FSI^>F0wVLhdA41l^dE zf8X*yG&<-t5`~b&gzB}`-Q6SN6^6wb975ciDO1Bv{{Wgtqwtq8X8s~%8f1Ai!wybh zrLf~}sbH)sYbheyZMf7e=YwkTNa--?E<#&62Y9I$Cu^B44+JX`%c0}WiIrw7cie)mZ ze?#5KWm>JyT&4TX#9^0RGNq{{Yl7T|1J9i?PaJ(_l-7i%4E{9LwR* zQ>#!4BXUKLmiE3q(%Oq^7qXdyZ?__YcP@k>BoId1B&AnAoqAzHsFI$S0}XA*z@QwZ zw%(Mike}?4afLEcbV12N(QEusHB%ZVOJ`9hVXI-!Uhy(v&qd1{Ykm?cK2)=IPctJ4Z@p)w27&>khUgt;dvX<97aUAr?|rE`O?pTdw1g#Jx3e zAhg>rFnE1-@Kn5S2XT0FE=!I?TblORgP9!w_FO3vh?P_zvr>@CBe<2R4wrxveL{4$Nn*JSnej})?RI-{Y5g&~EYx`EhU9x=l1Y6d| z*wrp1VQ_PJXtNdB_^TmwGHBsr>zCJJK^aD&4ofR`=vqkobN5^TSX}L=9aHk1hPM4>lp} z^Wd#eVX?^F(QxjIsWU23Ux@T*^0g#s*sAv$M)J3D=S zbg4#H4&zi4?a|i4i-@y`U9>&sPe7rH{8-DFcTaaxcA?C9AdP;r>rt%!95mZ&6uUIE zoA(PgAdB1rIdbc^+{P?4_~|5jPZa@CgfKLS(`)%UhZM9h>j&Lp$Q}dHd0@B20SW*HK^CIO*{kul3ff_;ccsyQREz(DxsK z&e~l`w4A!*B@RY;z|zQ45=l+d&z-D#Z_m#VbJmA@!!

0FM656xp(qetLi)`_G?0Fdw)^^9tYEWNWu2&!DA5(`jmP0jbGD>%NpJ8>sWO`E(}t z_hBB=OI(bC9!VqK$mUdsmn0e!jJ&96mc#1M@=jWX}D2ar1{zdyV{^67?W#$7Kt?F96WY55-nBz3~Yt;}}( z(}h_*MF8oTq=buxe%UD(-&WVsHr~VC%K(b|o*s#Z;uh48L_hLD=*^PCN}3~5TUz_Z zm1AqCkx2w}vGW^aQdS3Ken>y<5qye27bsKXskOv?=H%)LMU+YkfEP$m9JjvLAn&#r znziotg~DKB(rE00b19mXGSkj9sVw(uN|w@;;6lAr2o~#oxdJ-cG&P4zlA)|)^1s8u zGIJJOrPUlt>eP`1>k;0vHEAG{NhfQa$ho-Q=bfbJ>m-O?H(V*&I-YMa*X_&0;+sG0 z<2BAR5kjXuo#UlNWrhO1p|PzONnXf1b0%zhsZ)vBy=nv@4m zAdGdNuSDaC*?T=z%WJB=LCWz|32;tusq_LaKFN;pB;0Z;N%iN81XH=WI9tCX@l|^6 zRq*J=p$?a@Sa`a}$f;{C_DaY!*AXJUR?Qj3>k_7d(F#A_8{ImcBc{q7eD*9=yp7O2 zGWA^h66IRHT&ddFhkK8mhn1A+m-us$@^afwUCSA2iy^W~987Sz7qy8~>nm{IeZd#s z1IfN_TvZzo$7?G80ODkGhe0`b3lAjiQx2_R#UaT{KeY3Q?_R(s%yJz5ctc{(Afm27 z%gJoZYX~I|%8-pW8byHTz0UUepF!2;FpGqe;I?8TM^kEPlv|n5O4NFicL#m?{{Rds zD8_{-xZ3FXjE?mnl{kTZej$H&+n7A<*ADIiY*QKAC5ths2}+Eexq?(Z2_wQbK6~LL zG%N$9Y#en>r7372g#Z$u6}9<`Twj;R8NN;Jl5=nan%GBZlBK1h@Fajwc&|IySP%2U zgKw^@Mdv}H$+Jd7DUoC?Bq_CkpsW7!2R^$Uw>;*9=OlJ*?1HP z;jVni5L`u8jW|I`xRoDNi0&W*xl(@Uu)l~W^TjI?qmVm!%r@z=x>D`l6^OhTB>~6v zUCnj-@;%o~S*mgrKF zQiZ7+_x}K+aA{S%fyCVKZ)VhHwvG5ml-TP(p|Vv z(3P^Y@CM)Oj(4;Xg51Z&bj&IDio)H(-XV0igz~4QxM~kN(!ovKC;)@7*-5|ap~n!$ zSk3&ECuzeinzFU3B93R&GMWLuBaMI@Pp3|{wx>mXHSo|z>1TMe z1x_l@5)?Oz1#!RYVn2;4IkMJX2udA9uWaUH7t6*VsEX zu}!3e3YU9F^nR%9>Fqflg~#@jl`6XD?y!3phJagmkKT6N67m1iE|Z6U_mjUtl( z$88}EFk+*+X&n-TfC5LL7`KWT##^JoRjKxtMi0$6j<85RsU=;UvK>cK<>aiQ;a59F zZ^iElPo5HR$Xsu_JdwZnnTW-cDj)qv=7Bw!cwB*{d^yTa0xfT4Z6JI6HaOSzJRDdS zJC#gW=~VIj#Qahu-pKqImbHiYWtGQIvy9jlzx6=<9BAP1e!@DNmE38AnN;r+@w#nD z>|4NjAO|>WPq-Eu5_~@EoG@@&WBRsIOy#2;zI8qze3Nb7z`O&L;O??ffUADWm$$CV z!!N)f;c$$HmrQM%$kY6-`2?du`ad{9MXS>D6d1K8sxTz8HG|$AJ>yFbCh0vjB{;5a zPRI%H+J;8*9AkJLyQLLb%kCpJG?cS7g|alXn`dVY(`y}URqt!<5La)!O1@_J`L@UJ zG!^IPg2QWQiBzBNBS~-_CdYG!h{z!v{&7`bHMr!dYin6c0a})MgXxE&ZL>P2T_cIz zIwF~aB2&1Xm8<+Uo!+%bs?$ENf4W)M-Kd2s$IrlVd*qJ|`)1NpL`|$*@9AkiwpV?X z#ovWiLx{!!F<%JTviCPH)kL!q={*W@c90Jq){V=g4E-{{Za!+oY$-cJ_WyH3#0O zjP`F2^Copy=(EZb(6qqLIl`3aB&AA2FQ)#XX}`%>TzD*__3z#IqTkE@^}VG#PxBwe zLzVvkqN6=RQdO7uheWKfE-fYk<2sSkbgfA~TU!a24XJ3mY46+*p-4qz~^PkH-T#il~05@m3ac#k1}jymqu7icEXx_TwS| zEX2!YR_N)l-^>r!EN|?UI+ppPfAMQ)j%|7Vn<9^P0pn^=f8EqITXNbaC|rA#lZ}dM zui5lT9I0&kjf{j3;VHK>uy-68D_`9WE4wK+xG5*EyVnu1Vx+WZVi7E3;ilpjzJFbPNRuEr zgd5pV$5PsPgy}g_1MVz!QwAmuG2;EyM zDc0308i;iw&60P&J0Fe*ro=34<)+jEZ0%H6R;0zLB~heD5oVx}l?2^QA1m*1_#9Yi zsvRFWhT%CRVZ$`qO$MUuXAf|iicxYD2}*|6(r>Zl<_094TEO6MYpnKGZt2YcGKF#* zPRol4lrj)d@*Q*)B|o+I3Hg!b%yLt)NL+VZ?*a2d#TMzX8uX}(IpyHk0m4VGX3a(WW?HWWw8Ur$^NpPpc6sFP% zutSO`Qb;3XxIC?Wv3P~TW}W9**6-~dE11?gN+-wc%<9oNIro8&~Zfx=!G9IE$;pK^xGU zf#uut(OEF8e1{0@8MyPqP*3i=iQ~F?*6C096}Fp@(j4xdg5fC&R>WVSzb|$nC8H!8 zY8A4Z5gWi9+IaCxb=6oIrHan%pPOid<#oj_M04$)%avnH#URy z>NphwR;fWQI)(vab7Jkga7&wzLc!bP`WIR%yW4o(r-wr2n9duS_I%j}ho`9Dhu0Jx z3piCN=rsAQh9@HBQJ*Rac|CN4efjdS{?Nmf*gR4nXv22&jvc9t6KrZ?A7-c_1ot=dJ&QlSv?Yiih#2EkKScs&ZK*Q@om(6u(slxq;KWd!3j~b&3jL?&#Q<pp8LGDOLH8 z5#Of3^tcAwVjgz_cet!KYfDd`%}Ufpo(*V{lrWF!E?YVkb7S{zP_2gG6qB*F?jUN~ z4sH7R_G&zkGD=E#9JvOg8OhC3o?D1YZex&_lyA!7=vX%A=eEZIim4cP3$i^rDC(Km zO^!tyrn4eLtBlB}du{ZTNpIQZ9gUK~0CT++lOV-QDARb!Oh_c7Bo>WIGj1;AE<5pTJ zA!%AkTW84X-%YGf-W&6`D2l3Boxp;Dnx%wa-T0|?5*d-)2NgDnAZ~@TrD)rq?2vZo zdV24O2_bV%mkI`2j)EQ9q)KL1W@ANYT0@E&irP>Fn+|DJ(odluEDj@a*F^`FU37+gBIm}1l$#Cr1y<>6?<*DoSmuYdxpn30Mf;RPwxSE> z{Ko;E`z@D^lSXDtqQ$yoW^1&fN|6d0V~YxE9$^(l5Vse8_e?;cbM!>npYL1_l(o4IZn9_n2kN#j7K0gkOF*B z0UCE8_%HpO9NMs48t<;vSeFS7CPqJ;dUf$X6)sa}(yH;6sVqc8cpo^8mv{MonI1pTd$Th8=h%9 z+3u_vh-?M79jDp0qT^ep{d{M_E%QHy6xl79SlG!HD@00qdnr>WipvCmRI*)OuC~3C z*RA<-!-3`g?XBXJSQ>0^bTVTFcYfA}-5?NsrFw`w{^%NPKs)1RE|Z^HY`=qY`td`! z<53J^wRc*`?#jSQiMHBN*KyEWea*3w002LN(^yI{)Q42jB05sZAt7oSFSkov+is+M zFu3G4y|P*{fNjNU5Tzugvf`4xid$E9lW#Q+XO}DW<;w}N)6*cf9)UU2Q{B$u4;9c(mH{R%9lixF~GdF{QgwX8Yerm@78yNg_S zp+iv!Z7(p%aVF{9KcJqX<48UbcE3zGU2MM;i@S%*@mr=s;;KWiNQRHu zbe7#f+wXX}ayH#mkhU|`+n-|U7*8#A(6lnDCdxWTFpj%s)8G{&;vZz*&{=U3Cq&8Z zXE;q8nOS0Jn6)5Ze`Qc^MSxAsn^muiO(gIfujBIS;-~!f_EzETHk&Jmb0DIy@{hC9*QAswI!cS_=X8an zl#oJh0lJ7t+?WkSO&iTS=ZEb702e{yYh*Rd*-*A#3anOlM~8wVZm#LRKsSN878W-; z z#gX%WnbvofUG_`mPXwu_rKf?64UD|!eMo5{r zbUnQ*kV%5oUm@l`X>vAs%633aXds?i(m=zFwxFo_unMzsS>Tsh&RI*F)!JG?*br^* z-)ubBv}&VYfz|%23N)P0mWf$ry-H9geJ=xVei!Xq!64xEL*9(|EE=sH%dA#ujuXH1 zi9ZZ9=Kw*zOMhdU*7j$?U{0njDpDs?)IA&+XqMk3ud>biC&37cwKXd4y|O!M=nDD# z@q~*loIYJ#)W05U3v8*mksU}I>0eKF1jJl^&mkJ_pG4f0t^&oREJyB(4iyEJkPCJ@ z4fnspA1?!`9NEkFxAOLO()-}MdrY@4yZgzhoI}OC(_+6Nq()mRSlkj#`C=|d@M=tS z>>}<}$1#-m#b|9HU!DDNro5Ayl%=JL_W>^O$;AxMOxe8}UQ&-BEZVF|_~W#GPx0?< zOg&7K?%U$Eh_rh*GIC>u?PnvG?azvP48kSO-FHOnpEA0_+h~t zO2bt4=W@KJLdc+Lb;#*ur3AJ$1I(Z(U#+(!f2KUcE-W-3S+Al3uXL3n(6af9do|uZ zkG!L?Byt;dw^P3?KvmRlj-*4Y$;$ev9(^S$fQOrihg}18X%^?b$6kMzt|1|tb@nEf>jm(O<1fin8w6d<)TGMS-I|Qo97xhWzgN7gO5Z)gTl2d>@;FLurFGGET z!WOux2}+QQYe_25yZL~Fx9<`;U~@j}gRg7-6t8SsYn@Yn>eal=s%d!*vakXOQL+j1 z1m4??yt-|I%=RJZf^V2e3`B9DCq+`xDIm1sP5f8;;^6X78od^ka@B)hASZF!eR_Iz zrAG)EqVgJSw9Lq=Y=*WVCtydH$lJ?tjj_ZM*KtO*yh?OaB(3E?DYf^z0U-d3C|G!a z9|PBG5!V1zMgR>x$>!TdBGPIusc2}rlc^vf%~$sVdY|*eB=8MWGe9?QWY*95Jl5D$ zxNiF-;dQjMTm>jx**5Tp6NX*((@J!bd$7JB2aB2oa4W%n=l!IpOG#$ATdRwmkT!tX#i*?sTx(jAh zTb}61?pHn=Zby!!1v=0D3cq<9?4Qfd*0lZ+S#!AUQ;&x2xhk^PV>dA`yrd~JR6uzl z7EFYlimm2t;vBl-_eVKxfYXOp*Gi}2l+BPA5zf)w4jnyr;ye_5x+I9sCaX%7BuYpk z$v`VgZ*?HwWuvZ>zsBbig1N6A%2Ah>Z%8VZ>;dsf_NrGM@s>j53*U79(BosDMtP;Q z)T#M`Dxpe?e@hW5DM0GfFz-@en3*iOJ) zZI=Rd1*GN()C=zm5-tVMP-+UIOKy`{Qe{$CZteHg<7H&rHt6ZKw6EY#QsqTIw`aA4 zS;K(hI!DAI>FW&y!kVz&Curv!!M;s*NAA0hhlS^QEcCXX>qmIitfZ)dQ|dg%DU~pj zo`rRKIYyijI_6eN)E|tV@v;iiY%Vt3c@gjb04zC4h!8hQ8oLP&=eM;*)qJ{T*{Nwl zR)qz#AvOh}=6v~D{_eQ4QP=xC+1?co8NkG#2{i`b@gH0)iAQC~G8txkNBj2oM2PNa zD;)28Se>nVd+=0`YumiTk>EwtY&+EM4VkBWXy)aAo%(=MZdZgxC@zW5WJ5#9#D zLsby&Zxr>Tz4Gtkq+b z)c0)f3_YXJT=^+l2LWqjnUWu-Q&U>BL4t%o?4BurcTJs z;b?Dme?K(L*j$ne%;HZ+50{cme>t~WanGz|7TR$5B&Jzec&6s*O0RGT$xgi z;!BUJ#?gB*&G5FKh`Q=Xi5-&5>Zr1{gcV5&)}T)OH>Xo=J@Y14r?su*yHrEV=J9Y`bd znI5l~$qQ40!dtxFVeGi`%qh-;pQxFY=HW#4id|1gQlL$1xp^mJU{`c3b021ZNsKcKS-Z_qunDY3iF@pQ%{m z#b_ZR1KK^8T7q?JdC

U!fp?ISUUX%Nr1e^OCp8xLOAtamI*mQ+EIc^nk1X09=3u z#`j6TEG!%5u!{>67;Ct;hlxqLmECL;u|6dbeMuvp*FI?kgre)^W%Fn;l(*0uY^6#a z;z3i6SZ+CLH{1?)Hs!b}X|cG-{dC=9x;0pI>UjxtvXxs&Q-f23+?4oId5_`O80R@H zqbr58kRNp>#J;x(0ZU$xo_hp~+mYJWJB%{sIQ8}Z3qnh37OpoPE+s2<$!fYmOKa|u ztBZhCKJX`?Hn+zbcmrJ4!6!Wtp*hzJO-5;k$g$F!ZDKc3PeITUcd@nkY{*@cMV5OZ zsal||a#Y%sR-lqfl9CAZ)yvH84#WfMf_d4koe*v~k~N`z0jWqsU|oJgyf(2ro}}`^ z9O1;Q+$d!!uHZaU+81MF=+so3pB2czFb>{cm_xwbxDVtTxWy|( zbC%B0`&l~Q#4X}M?J9KPy)P~BUn^Ae&SI`HRfiqfXr-yRszYu2BPK-2>ScEd_H<|o zNV*px_EBMurHJp(c(@QQ>s%%D3OVXjb08$%K0C$Pd($RgjwlA?bvTUXv>?W{P$02aB1*eNn zz2Ml6uz=cqnx@6*jTFVrAcry9hP}FF#2b*{GqEWyRP4<_pwly!NvNSu z(`sz7_Hqynl!bw1Hu4E6TTAgFL#QC0;LKE!zQMfHp;UTKOfYE)f?14HGTY53F?|Rm zShVWb42bAA2zEPPXCyR}Glx*zAv-`B*Rp8Mda@jjNP-^gVFJj$XwZSzLe+WJaD z6H?&&N>I2}lmWk>)4m;bb8T|e(Bv(L^flgJirF2nG84*eb#4+&jOBqP0RWFseR<$R zh{b<(FnJ8b7!R?n2gl}>qV}!e#KoyHvK3}Et`w!Rg#@0W=~ec=HZd3pu>}V&BDnhn z+fdWhbMXlJyG!sDDR;fgS&6fAyKJdf_-e-8TL|e)w~_s%Bz|>=zTX6tdqnndc}w<& zGc`R#6sW1EQEy$9hDIYyc>1G9CA(C?b~%K0x8#q}_{-U{eL*eKGnHzQR66+dUw&P27meanY^IU!d$Jrae&jwavU^oaOy|@@$46@$1{0oHTE|j5 z6{X2McFI|%BkpC)&z3WNEND{?(HhIl^U73pE zu4&>L9czpJsj(UYMXkcIJ)4wS%muDGgQdqo7aheZ)w5+bYE(qF(SO-lC|Ai=BOEgk z!zyUxG1bQC+&DNE`ur7IdRgM;$kIN#MZ$)2MShzdwc2b=SKjWVV0fdxcUcWd2F*?C zhG=Gmm)p!zbNw@g)X^!X1#WViM0VU=!dNSCgz9i=+WdjXcH|ELs*@T1(2nhjHoO;G znq1%jZTOxh+5wuoY;_x_t02>wj%ho9*qd9O)6L7Y7AxHLs`DoZhRoS z+i&^MPCcL>Dbe9{Id*-l7k(Ejf2jI2r&!GShAJ3!-ZkZ@tvi0{&un8umLqf$;R?4d zeQ@WrkR09=Z}x02Bn&m~Bllc?$`XWJ;g|sf{j%iAp;ppXSG@ z`gD!%eZ1P7Y*jOaq!qEB=u%uzuq`-+3y!wbeKCjH>Kvk^p{HTz(mW2Z;qXV>R}q{_ z3t841ootrE!oAiw&~(4FhJV;a?@Hn0@<-eM0BQ+08J?P_I)z2I|Omx(~07%&8fF*P?pk@W1*!HW6*QPT5Rl7B77yJA7patTsx%F3zbJ{@1b^e2@O27 z+RJZ27XIh}`+>)krkczS953d*Xz6Y*(3Q~{q)m#MEwY3@r64G3R+l=3blhD00QA75 z^G&*uESA8II;F-NRCW6+t}R6&DzO6hEjD``Ft1 z9oz$byROKM6U|^56p1yYwq2D@33daXM*Z0*k>zjo=YryKJlEtxA`o|XN&+IL(;q{T zmQXLaO^8YHTbblRHa`q5P}hq&KNC7rf!R`$({kgf(%;@F+rho|>PO^Fy5J^83Eg@) zH$4-=gP0z54Mmb!TJ8OiM=RWX2dBpob(BH8=B8(%1aAb%;x18BgjZm?3T_kNY$UR; zQhAP+{{SpXV0e%N*t{kpkJ@uiP8@nvQy(!(g6oi_sI>LK1t5gE&dU9D#g0k_ya&gk zavGz3$pwm7nOS~pbm^4fSU@^e`&Jyb#Cs`2O^C@#F@sQ}S@}{eRWcO_p%33tXH|+1 znmmpA9)E@>BE%gL0ooucaE3?;;-dMqS#;@-B}qzB6q_jBDOI^@19Yf$y}`wTDz?3# zs+CR}_+>d|6@<J3w45|NR5bV& z^Y^0Tt8%gI>pzIG%_k}memZIDjASJQpha~3qD8qjRdRo@cjsf%1c?Jq_%`brca1&@ zK1yKrdji{m`lnyzCingm{G_KISgOC5TWmVyiPT$yjU=scQ_pd0lnuw2^&El3DO)+a zMW5t<6)l0*NkYpwm&4vm2YzL~W1`ByQf5Pz!+Io_m{*AbvwnZNe7RzmR|A@Kv0XL^ zn)cnM;P?5hDmg+l?;8pQM0EzRE|G(%A&1_{&iMi|GerZ$r;x?*`a2?KdEC zRqB|%m>?U1ro4!m@f~FdE1MMaO|>NLxIenBj{SYx;u{mDWP?i;8ti4v1&%jY4RV-T^jwTn(;0IX)>%mQQ z{sh*iA-OYIrDc}XqI67sEV$A04Gl{d9IlW}kC4R8CJ#An*!XI>z9Y&VD-EJ? zko60L-;ntw)_5z!s3o~I%H>L}FGazHXee2+=B0%#R^M=x{4jf2O%Su>-oHje&g!dI zw)L|cLl|doIQ$2yQl`qwR2QN%lz2}|n>48*66Pe#Sp;r%I94t|-oZSNGB7-Nv~6$@ zKsUNhXU_gWD9V{CXKDLYNw+}Ad0W*;(sMR?l9rP`8lpF``$Z9;av%e6J=XH`wjY}o ztsrlU1pwMQM&`qEla|#=N04c9)UCHEmiFxC%u-#6gZ zx!4Pm4g9VwMj+*LPrH}bLa|deT$a@07QfZ}8raF|MyQxbqBolOv#gDw6 zxLj>K0+Nmglh4c~rmAkaCAMELLUm0cC`?})hi?%f7DqetBauE=eXYvlMYqBLxXh!_ zH8W=EYbpp9TD`0XRI7^uo9h+{<)*{WmDPOin31B_c+B;E1Yx>-YLjmX6)x_2f*f_{ z%3MN|Vl*XcH!1{>ZEJZRSarGRl4WyrZeJAWhZYSHw-q7kWl2Z{vZ#!#?ghy}K)>D^ zK{o@;VaCQmBUBWwbcWh?lqEg$UWp;rqEnF+TV*ldNp&Cr_lQ{r_7+Jh1D*(A*K{nD zh@3Mw?XdM8X~hObV<<3|qt2S$G%Z>q)zpB+g^Cb_Us4h`u}Xm7?to4o_O4;E)2#&~ zBb?p9U!}VI2iB?83yQR-5#8o^66l33rt^~0<-{EKD$%bGWt&igr9$n;x_) z@MnzTHyszT`5zngC-iS*t3__9PIZdaIni{;X<^3PPS>)MZSWru39!Kd3t^BubJ zx_bUa0OWOrErP#oSW2}rg{81;n}Vww=_K+txZkM2&S(d8o9edN2D%{wOO2#1Sb!5^ zhL}(abJ0e`+V>-ozq;7oV_cFqgScO<24)7ETOnmm={mJIr#rvdH!9|KJgvy>gt}`E zlU-SBy5FA_(SF z=X;VudJGMz$!tHl9%ZMCEj0{|Psr3*60kBp^;WjqL;epcS7{6!NuM4M1*8c#4Jli>@yqiU$HT&wb zoR+WvQ10o?EjG2r(wsrPfoR_cHxFPRoj2+kc$1f}at+l$evdx9$K&bRQ=*xdC^Y9| zR3y76GF4eAEK8`QI=JNAYGsrqEXO`0Ztb)XE#fx8jh(G0a9AFWzP(U#dq?Esmqu3Z zA2hunH7(;ROtzIBAcqLpsGIX~Lu1zD>g93<*wfrC&N+R3dZn}wEYY9W*P=Lx(TW7* zw#1dVv!z8FZZ}oV^Zx)_!^lPb>n~L{9Ig>Jo0QTYREFA0 zRFI;8fI-^(sC2!z^|l>kttXmL<}qvNr)~^!?>BHUH3lgZh`Ez4&k0Du3&r*&yJ`Q21)1O>=y#h%=snT3+!rIQ10qrb>Ab}yV zxB#gsI}Wu$h(%P-X-<1V$;0Q`GF)NN)nTGDUv@Fz7Rb^}Zn(Gvn{GbZvDUfJkc)<* z-N7O&O_Zn&zUmG65TUo0=a}^vk!+-?kt^&A3D3t)qo0B#m~`E5m+8oOC|Lna0+0pl zPo=+(6nAw+8i}koA8TuxHmtjq9&HK~I(T$!Lv4j$adI}>kiP!_<%Udg-$W~MiuStR z(A%t}hjNxvmiR-ox@)HVic2cJjl4r{yvV{H$kdB*>gF=-c^(e(IrtzXaH}sklpm|* zJF4B-Z%d<3QNHH?0K?;fA5R(3Ct%yEuzm15Za%xxaN#CJd0`UiG32x2ORT!sZoxv` z2ExTc{-?_k^*DSIH+91zw^O#*gprnSek=ssEarbh>&@iGSzDNtwUJ}hN-wYbyteBT;l6v?3|BqT_Tw;63l#3e}}YPG?> zDmz<=;uKU)jm;sV)^i+5Z&7Q;W2lW5V)Zrj@!#3K?8`B@;jdUX1E{XXa94-4yzM3D zoXfJ>F`ts2Q7D#qIs;7&*W*NoLk9HFj`FbQ#j5-v}=ZUM|(f-h@x zmn7P~rIrZV9c||wELv^TU~SqIL-dK}?#diE$^13pH)?9GadRhTNzTcS8i0tVd>7%Q zDY*|vHYkcrf{ySfXD!TX1J;w^^D&XO=h*;9HLVsSOK`ClxEYMPQn@q5t4#9NQ@RnO zKRO;`cJa9vXt*ui4&J?m`d6K9An<>M%ab;%!Wz|IGG$HQVTmDyOG^iPsVEv$Kmy?) zTHxCihhq+GhBpkaZg8>hwu{PxBv|cQc!F@}o5s%+sC|6{Qt6lU9(lAd&kZFT?Ny z43@V*jf$eO+~VQaucm$}cAK2D{8v(#yh6`$2prQ}l7ctr3!C-At}S~aBQle^TzmdR zE|xyji7&!=Z%C(4@7BstpAY~7M=KLxo8OhTJu&6xfG#x%M!$En=9Oi|_Q-Ljw7%v- zbggJ7NCx7{)964ue+&r|w_RjdJEq4(ol#5C8eEp8NsI{tzG&Qy#-x=Iw#N6jP4Kwv zPr*FN3zK9!H&y z6bBG8MYb%HVxnE7&T+%tw1+o!v)scI8hEQyEO^%PC@EeM4w=B#5fyzDNq6JF%r%W zC?t?cTBcDrO%NgwVvl^nqZ>l`Y=!GekQzgxA4RKsrY#cwd(vR;H+SmKVBza+% zu)U;#roRK?mU6HvO{LW1R3enT5V$Uams>s|Zk+my3wjZV3YZ+z&D8-{NJFkQ8Y=z7 z47MpQsQsepswz_sB|z!xq@F{m#m@$=_KmU0%O!`}vv}lGVBK7xDlC^)2o?(mapiHo z+hT3h{6I~?T0O_=jzgMVkfPa2PTG`^JVX5O%&`k*q+yqN_6hQsRQmj0<@QnSKz;u2 zNhA(R=&VHV$qTOC5i-IaIl|#EbZc5Q5 zg{^18y-2n9>utV1et3{{jV-%Mhiooutnr!p`YF*wiz<-wuG3WO@Jh;_jcX|fs*;hh z9_$5D_RKe5sN$q)V0|l#qBmy|(bfW;@{Drhq6hM z(lxk1z4im0y#i^l%G>WSZuXmbYPjCea@&DI3t^DV@7!r1_PE#&G~uRpG0B;3Q?BOj zEx5%kOHf2wT@`LUAxZHmDO<}4769*{+okbjZ?g>!sFzRKX+Eoz;ykfWI4z>2u|Kx_ zhjT!`h*EVEA#evNDQso#9o2J&soGw!mpBc7WLW#H<8VN_AG1t#* zN6(H`JZ`buqo8D~M-6}-`R?P=P0xwB^G&yZl+UfzO{$nhNiHR0LT)X+`S^bFt}XEx ziY|F1xaN(tAPvUdD#C|OKW6&k&$@`Ul65HUWdz&iZO`VGMBmm!%J&?=vVO9v99WGd z4#tYRgrNYEQj@qFo>m*4n3ajpHJO_p3N`JO&dd~9b>ueLSaKTM0^vmpuotmA3z4|x z*VhR(hTIBY*%MNsoBq?gLLo4e8y7=?8f-TE#_7KQ06p*wweEX`hk75+!2s^$Zm=IZUPSh$7g5cH_2_56s(WxW=I+TJyN{9d` zi|#bhY=;!cd(XN&atT%xxG{uBo2XS(%2I_QJBx6*2_Y#R(|d!c-o&cIuiiLb@g~+o zzBikK06$z$;G8JV^)?(aAW`0qd$Xx&j9Usj0FkFv(rspq?Hri$5~Px*&rVACuLh>g|e~D8>2S#eQDgZ=I=Jsjq&xN)ORQ5SmR}Jm97SO z%P6igfK+O9)S^C=sX&6Li%~^|(w#|lB=6uP?C~%-UqX0lqbhI~-}6tg=sz*>9qMzS z=B%rk9zrtxUOdT#pyQDoM|UBFmAHhdOehCM`?c8eAtdz#q{3PShFvNOdP!sbAh7Dw zOX{31Hq#BZ+c8pw-$-q?_1p`KfRnK2)cWBM-2<94O*Pj%;(!QmLu!$vtSDuqN)Hz?Ca+??&=JBOwExOgTVP;X{7!>{Br@w!~gxptXOtW+yhcuxw%f?1JLkX)5I zSU<7fxC@DIeUOE=-L2smYC6eYJ)P{qm4Ed)UenVB&gcSxwnMxYus;a zJk~Q)d-k97Np|q;@J681DV1#3k!b;l%$b>zmWc;Vmg+=+pci`+s5G63{jZijnUd5~ zQ-SZv7(JKc;G}Tr9~^ZPgYHEk9a(PNc3o92)YS$mZf08KgrY%D6l+aAWa?5>xGaPP zToLzKQ685V=hbq)X{sgAx>uiH1 ziB76hYgH%j8ZAZ)ht?CYX{iWtLc?M#f}%W=>x*_254B?&H9om04Xvf#!YZO?805F~ zI?0iorshr_@pC=3N37JSIg*c=>Z~^|4282HhZ>yl3rf&IPQVaB9LJt6P;W#d=nuRVx;5 zDTNK}RwBsnZ)n565&m#Fy^jq&>2VZvz?SqW(B?v#+JIYYCsKE@@Y_|n^B5)ey_0fR z8JHg%gPKRzSDHB7NJ(kd*ehDpk*8XNmKO3m^B$N*fkT<(E(Q5pyYNZh%poZu^%fW# z1)FJXblc)Qx#7Lc^G1l}lzwkAkH(t{LI@Qm9t2qd_OGb#w=3ZiH3LtpMNV5E*W7sdlg z=l~Pjmy%}_WK{%QUf;9<_!S;!ju`OkHkTEf_-BwRau)Jlc1-FUFFfLbBo#VRR!{f3 zoDnmN&E&c7DBKo#A^j7{09w};9mHG7GT7j62vDMkoG;;|LgkXRMue6%->s3QH@Bac z!x8dPLS22|GF7TQrmut&$({#Ahs*FtYV4oDCHIP73bNBT=iO_y{WL8n*8AbZLc!DG zQ(x_!aM9!XT0KGdBu6_Kc6gm3N^^(t**y}~PD4sIw%{pT_T_vy+EIN^iAj@fD(!Q- z{R81-i6^k{XW1iAjh!gZDHlj)sN#|j_riz6rZCbD4nAKb`*!BMI^m3P@ATe94`5yb z%xERKxzE1m#1U01ljn7TPmwho3K!$+r5ay7EBJ2Yk^beI1-CT8iOjjPn_> zby*VGC$CQK*hr*!KK1*8PyQpgSQtiqZVA6p`a04yqL65QyitthBon&$#lMT@5?sd1o%~YT*-Ai=(v(2%&5op<_O|2PPCT#;ta??g zqbrnpH;6E9rKZ}#Km%3mP5n8Ko*IxT3uH+{NrT-o>2V$Il@zTPN{I42#@zlMcx*|0 zmYU$=I@EPth*@q&bfw7+9`&n$ODpOCvG9;mYz?n(Tpdp>$OTEo85Y0#ucN8SkFhZ+ z*4b#X;JCU{5-nr#7UzvfH$iRDaWFR|uTyQ8qsEAmQ>=|?y{%)gK7I}@Ar{mi<8V!|}%9&mHLJG#+mho%O7jToGmI4*~XSwx0BZ zx^K4T$KpNsc+)fu=Al9`zzWIkB$SxRaB`GL2C!Znv!~F2vk#3Oo>WmGz-61JZ zYZVH8Ni?|%T!pO!1zyRLo9sb9wR3-X>3lI+PRGaX2A+wwXUu-RDg62)W*S?~EO)3- zC14*CJe6a}b@Vs);vIy;CGM14x@=W=&Y2=jJgeE9Q>?DQEB26~({C0KU>+;2BvA4-v!wZ{|km9!7rxOkK$wI6Xbpjl}ATDlCON>EE z{lUF-&~XhXp-50WG4Ve+$4SfiI;9RFNeP!x0vvxk6+5J;8|+dEKaM4E*Hk~87;V#_ z@hhI=+^g*U04=DhYoGfpyWQa1+q{J(Wv`?9t1CzH{{V?J`gjT@QK<1(EylqcUPHaw z2HkWkmMo0~9L7H;q%Stx(*W*Uo4Vg&}A z4~2+KCOc_Ot-EdM0_1sbgtkcFm%Zt*hsXG9o zPb-T9aqH)7V_aR8uhaF#AXsW`bOW@w^I^z|DKVUayM?yeO3vGA3b;4jHnG~rP5p^c!+J^c; zxVgR{>afQ!Z?iV`Y22(7_`_v8lVtA!)u$gWvarK z<3)E;iBp9rK`GcLNM53O3vIc-o$^L&Wwy10^{?adK`|a{O(p3|j_U~pApw$xk+($^ zYF_5s4eosg8Zq2mDoGl7t@Y2+!7G;(@korz>ngu{X4T?Zu^N(Xea72$=t;(aI#~=^ z#mTJdYK^7G9Cfyojc9ESMg=G;xKPp+WqEE@xc!lQEHHE;!`NH^@+l2whcPoGN1WA3 zwG!i{BSTgx-|+fBs=lQB?Ggb-aSapemEVwEb&-CwdzkH~Gz z*7~Mg>))I&_1ouUciCE#p%T!`PpL=j+zpQHM#Twi0i_lQ2FCvYyl!!@)WLZfw=wma zTC2-LDoaa~Whs5y9BBwaK>g$?I#N|+k#W7Z!-D(*!O|DJ$!W=k+Ol<|(dE4TEsQWA!=z2W))cFo6159R;yOE^$iB87BBTzN~4YoLS zhdjbeVa3*VpIRXkRi5Eka;DR;(gVz)3sZ@0DeJD+0>F+$06AX6812KAtX9kn0{gk7 z*P^{S@QAOXs`CERON^rnuWB_x%AkQ2;}>}`i{a)pj|JmhYMr{p;^-HldT645EN17Mq}2|UfpY>sIfP4*+y zlsKIL$s(f=0_?h!f7~ubdXp{%W*e(oLyor@O(p(iK&>DJ+meXbSlIIzA&+B#7O_x( zOMhtB0y}=K-iuUexn4tN3t4uK^T>2)TWo25E60+Ou$^dC)Dn^wHvsar@zMs1GyM~h zx?#>QBQBjsh|9SjV>M&-;H%#2%j6VNjwv}xaTrp8FCSDB8C@tPV$Q}<~3b#5@yFK|QC zw4zJ4elBO$o$g)JK?ZQDweK`PX(OR-JvSH7Ga<9BfD;bgHN9w%S*{{U35+9HVyL^w>o zP*Rt4klGM!vQ(l4z_B2mwl?0w0yu{o0d%X|Hy*`_m3Ep#EfWQptynmg1!ryDN3^!Jpyl3zjY)>ej!R+Ivxqz?%f+!6)s0Jb&lF5(<-rN0Ez>=-h# zQTD*3t|@J|BD9pLDK6?#2nOp>=yw~CKWLM->wQTV7t5=wi3za}iAq#xjlY@re?E|( zXLcT7fl}|5m6GFaI9_tgQQVY;3)uev3`F9X&)DRqtRvl)KCU**^#wbCv9dO*2{Ye@ zMvm722bw3vOrwx;tKFp_)FO(6==CUh)JJu|qLzY`%j<0?f9R!xN0NOpSK3TD{{U&l zC>U($sSp zD3uHEeq(ETd#{dNJ{^g(E0x=whhl>6YtuJ5%;21$%36yRrBt}`#-)<@KUMacbL)=N4m7kW&08(CBG$e6kQ6=sTViS)TN7h$ zF071n<2N)Oq)Uu^IN|+ztw!QIQ(2TV6+Ir*;dmgfuMi?6-9xI@TaPB*Qqp?kpjBh_ zyRlBeliCT&_IJAG_FPEy~7NX5Rdn> z@k;G(@RZxi*R{bG-0imBtutwB9s0Mjyhn%AgLaQJtw`a02il^ppO~GEHM6WqLz|$D z?v)L}_gj}dB3-MgVBRd#PNa1-O^crm@6=9Xh&>~*uN$?E+QQEMrma~uMylN z^p<;$?`S8bU%nr9hU~?jTa3nQq08Z&VA&-Sm+er}dKA6QDcp|`7RN`zc}T(M=FfOt z``M2Xr+t?K+Ad2rvVghRe4?p?qnQ5o-o*ARQ%T|H3A02t)?UH-4Sr;Pe}=J6p7v%^anbO?+6Zf;xVCw3uIHV@_|qeY-#RDFLr*A3WIr}$J25`UP4K+9Z0dZ7kZB7>P&Mt3dmbDy%pV-qN zUAtbQ=`}@VPunRSai@xYn zOVR+}3Jw-n)`BuEJ>gTB{2AjM_ZEhDU!7#tbSZtpDaty^NY|#@8;c)!AY;nV;Wdwt zM;uNk_)p1nzxMA0V?&)(z~0`Tt6x;Y%oGZST%7a@?KZ1KmaRBNBqLF(otF@zT-=TFRXnNLpJCq#HN@NeTwmDZfjPJ9EFT z2HYt4DP9~DZCI1+vYT6R)^6{2t3kO8gKfM!5tQr&EheY!QCX6p;B^4AAtY`Vow@#7`QRs0?z?w^(LEI| zSR?}nUg6OD*o#ROkcA*NCu1-B9uprQhK zk~;18y!Xc5+q|@m6T%=(h*8LC_k`K718%azijbY|LKIX~i*g|C*Ym{Da29LzHXi_= zd>{Z)Z1`xN(@#N?+)zSZWoYqexwt0R+o1&dbHk8GSW8|nz^xV+i?rCJN0iM@BglQE zq3aT|5AhqI+JnBVjAswdVdrdR9T2_Jf#Fk4r*y&*dar5Na^cq{`NLn<{jqg z$M8-P9boNJ{RXi7Ckj_3*4;w)4%i1e9~Uc;@I3GxZ73st*?K4A=W}gxriwoe^S2Tk z(Ur0$XRo%a1uck+?oqI@Q_8bobKd=Za19g^lW0MAW`Bp8u48*?gJEs|0C-hK&Z6L5 z+|hS=C%r4^`Qnajb=EU2Qzzt_Q@?#Oq#3f32KoyO0-Zv`a^SUwF4%n~O~E5|!>`~j zxL($B{{XY%H^oaE=e}7D#{U3Qv)7@j%jAB+{4B~bm>Hut=guNeZ0b^rjnkv7Uy*H) zw+Psc-MJoEdZvaao#U5h(fBS+QTak+?q{;442)-)Zu`7lmHz;hT1>N&vNlRe22;tI zXD$RP{rfPG9#OfnyX6HrdlUOa`+&nR!whG6j19L|emS2zu3e37J}HS@*VMjJco!Rg zNV9yCq4mjdsnUY+Dg>;Xt9$Kj)oqU4_r3^_yxrEme?y7v>QGHl_U)y^Z?rb04=Oe2bP`W0!DvbDd^@6HqjMCP@PIDX-u2{07y!;a|gnm?R~m! zhT9#1Cr-4tzUyT-p&XvFg_K)jTc^&FpZ7B_cpq*92gLuID+<*+xn9E2~~%x*1svFKJxV+_N6ws7grq-AgNC&#kQTdw#N1gzlPQ%3!7gJmWt#kp>F+iezD*sXgf zi#tK}orY;Lrw&`O(Rh(WCxHbrz(G!wk>XOX%U}{Wy}s@i z`w^PR+TGi52{rgCS{B%lxbjHQ6o!(O1Os5LLfVb4brO1jJlh7uA{gQNx+6j?nQb`* z$uXc)StLh7+Pa(Qn}SM`o%JbP`CG^nW|4H$o*Hj!yN_R9mRm5YYrW%C*QD0!N+G`e zPL!mO6m8zCbp+d0lx=V?z8x(WzZItk>h5Uh)g>+!LSx>-; zt%^;xa#183U~3&8wxeTfX&A{FXK0rA+B6xZ zxHNf{Nz9@wcT~2)ZMTta5rNW`Tx>1Q{v~SPt%esKmIzvj!yC^kK&wT1kmHXu`|mg1 zsn-_bS9U=+)Y<{(;jr4<4aKd7+``hMp_6>@x*CmFj^Ys06<16|s5})d4K}jg#3it9 zLaw80+o30I_QPYAiMonsY8#%JT=%CeS&?c`rb|o-PsZFk-8NE}6{gl5=iNnufLBWs zZn!2g*Ibegrrl?sUg8q!-Yzupw20ygh}K_ORF&B(CZrV#}+ zk~^3(`r@}&R~$mc*XkDZ&)}yRd1{$KisO}3irq~L1!`M=WZC9>w1KH&#=}uJBEc6p zo@O5$ZT#ms-FkjK=ebs;t#pT*&2OK7;DM-0c3PNiOg#LzQk5a8XFzE;76AZiMX!4h zk#whVZJC}|TnFC#KEKgB6jm7=y1smW)dbX++U}aEH1q{2rMlJBgs6)IplM0lbpvfi z)*FzlYkYtK`uh6SXvPj>>+8)25yzt;hh(9{NkFnAt-kFk0FVgPsYk*TtwP_jEx5pf zLP#e&@Xj)r94N4{`}e(v;q}JJi6t*+Zx62? zh4WNbk5T>=Z08=tu1wbK5C150#ec*SuKMjC5XK~ivCd0J%rI{O{^h)qurX_JB zNqoqvs%Gc;AnHucL%KrdeX+JV9twqY! zn!6IrN|Z`u`3OQ*r#B2arOj))lzd7i!~mi|y@3^7ufun#L`39V(0lar@G9@g!HR5N zDCHMph`wAdE;8v=Z?!)Zs#B<)n)_H$;R=>m%=vZ74rGd@B`I8nf|-{Q%bk{_hk)B{ z0P@A(XmCHZ%N<09&XtYlf839nPk7xYq$^&_#%Uw2z%S3Gn7yCEP1@n#Zxza{!C1`M zUa>ZpA1|7XF-Sx3WTL9w<-&0~nQ(%H?od;I>uVh_L=+UW#vbV`?FcP7IAnPA>C!A$ zS*6lyHD@59=wRlJ_!Nr=UtH%(=RL6FRR4Zn!}pEcK#uIa`|EQ;PmZ%nC%GAtZZ&A-k&?|@9T^>3`bcT-+%ro z+Z$Tip@E=E~-2*t)GlpEu95tIuuflp#{=J|~n2znJb#@pjQ%ZQKr3jf7Dq%$I z9%dONSe!o=9LU?vb4b2`>JGVOp|Q6`)ngc~t9^V`^svjkN3(x0=FHr~a87J>_+%VY z9-sWZb*9;n6$%Q{+kp~fr^b~4P;~EWg(UfmKPDYcNOMKcP*=P=55}bfd7F*>=M4V3 z(}q1Ju!!(Xtjw5kUS%>{YB*DXC07|?lZQ0txG&bMlo%ar*c|!yM?lZd zD}?8U8f3K8oZnn@17V>FN>F;~1e**p>>-Xf-2=Hz)8OU4^yHJ&6WSG4EO<0|UtMMw z)ci)?P(!FdLx8C+Ab^v}Xi)RDk30nZnu_*yyb<)2;mzJIwq$VWPi(uyne;{}bqaoB z&5cei+2D!gw58+{m82%twxAR*(P_eB*_K3ip>b*I$o+RGe#IQo?#*MU%mbn8>Q)OZ z_R!+~T4hnGdH(=17>cs=KKfoljg5dqsZuTG0!8s-qOGog?up#t&}>y#6XdsNs~viZ zW*2mUyXM;)_LVHszL2cy^wwimXt{o=t(O$jjIyY%y6SAAkXuneB_YB*JkxAXGic`j z0GQBv#$Po{t>h;L$UyhAc<4xQ^B40@xZg;9VK(+y6+~M4TP}wJ&8`3z{BZC4NblJ` zNC`hD+%)^o^88cgC(;({tVMcFBsdn@9!lGEAT6~Ef;o-1y~i*|Oh5k9IBFCt{{Sa? zQcts+BhoING~c8qjCl#miOCyKmD^Fw@@`4 ziDK+FF%#sSlUSB{S%*0yYVCAm95W1BD8xN(K8R zi7@+)@Jo#5e1$DO84yyQLs+o?04zDb2|&w)*G6nwZjhW(lG##8N`SZ{p|&{w`3$%@ zsQAWfT+2+R=3J9E`9)8IU$b@|+R|UP{WXJu@z~oo8SN$;&Kyo>hZ2`hQzbP(eKU&> zOHI8O892*Lr&2SVm~^NRsgVIBH65h3fN%S$0Aqy{&v5Hr-sJAGY3Kvzd2#6^ljfit z!nLZ8QE^I9T0&Hn6SyHl#2+zXe->Ax}Si3(qM zccYq4^M1IcfByhz)LZ`mn=%0@=uMNou^#cv2dmjeoVaUflsmQ?I@j}#+ zl;z0UQk3aST5LY=E?CnK=ZbPK(gehcN;0ltDJ3LFxFezQZ}<#J$Zt^{`bWWTr%;Hp z0$7X^002w^-7Mnm^26lZoBr>hN`vL`Ng7fs($W$Fd)D^1qJPZbqyGR7I|^HU=O)|U z6ueX6D0CEC_H6Biytct0`g|00hf~+_Myp?D^3;z354500);iAq{m;(}r59#>3A{kr z!rea=(@w*=D<;=-1X_i2DP@Yx_L?hc`EVL}#?%n)id3YM2(a8?SQ7*4170pV`xlcx zD6OHyAdA_-JJ{2x4sZueKr5uuo_SA}RO_l`4k;)>QPO|_umh$pRW2d>qebPI*aJg= z%%msQvs7C}N-gRGP4?t^VlnPM-Ob>NEgsAsIU-Vjq(JGa-wkQo49}6vuhb`_`tU0d znrTv2RNG{z4Oatuo$rQe05ayXcMt*^cn!W%*a%5V$pc_blZWaFZcWQ1+YQ-s){C$K z3j_izk#Bmp{DvE&9MRa9Hb*2opaWPS08;r0K10(ENsy3^l$M;?-jfr&xghQUO}#e* zZ-&?!EZ>@W;BKG~I47xCzsXp_Gap?kPSM|7LK)?)RMwQGDM~1LM3c;bEPePYOyd{m zDUtw|!4DC(%LrLQ3X7K}+moRu4M5SmdvW*#+ldc0P3Z)wF~KB&+h7CGYESv%Fq3}| zR7-RvA#rjnRq*#QJh;)+>n=ull@tP6aH$}zC!&ZpxZee0;C+~$u#CW*MPzp3ZR)C> zJ?Di=A_K03?k%X=!3iLI2_$>YDzdA?+6;FoMy91672E=~GA1t3m! z*4e3)I^qb9r3)yF9X7wnTMQ3z8r;agSE?kK>`14{c0(;b=!W8gmXM+dL!=v?pzrU) zWbjW&;vCR4zxjgQm8~?$Z>360{?;4T4~s^Ubl=M4n|ho+!Kz8(KDZ@Hi*2`2akkKe zH?4jUEIA&Z6Z1GsdX&?gcM0uHEb4?)pj4AjLSUs4l(dFMfR%S{8;f#2xGoINXqql_ zfp&T(;#8>8SM2Ij8e3~lgQrc{xwW}(zrPA#2KPn80X98;t5&2xJ=BTMy)*{W0C`1r z+z!5kk9HZ5oxpom!vjY&TSTozZMju?RC!IJOn&L#vv6upg}bL?w!{KFjx|14Apt2N zxpjJ}S1_e9Wdue@j)3%V17k=4rOWd3>w+9C_gkc>dNN$bmuIiR9j~owqEA|ur7bPC zQc2tss2hAX!sX(VOb52_Dm;|SOf?*^(`=VfEV@vTJHirx3a!s#dHLWr4MLh>B-s6C zRF#w@NOiWuue|6BWg}9F*dzcw)+BqdXeZDpfE9T$I_#8>fbzt2p6KsVtryd~TI{u@WGO>igK|0S0Qd|?LicVs z-#ZUdq@5dX1-N{Z(ohSgy{rXleMvXo=?tkrA5B0j-Y*7}a zr_T&ADBqWz&Hd*S5ysI@2pt}ZBuL{Rxlw&#_ut4oMqBSX^4CbIYb#MDR$)M1{OkZ9 z3_$HEyd)pmXWk0)utM0)Ex&2M6-AHp{{W)HiL0Ib^-#FP(S%*`n3}_3ebxTvptXah z*VJ#!A8LmT#g^`rRUbS-tMbRYgyE; z=@l0=BIou~tyUa{qi$yvO7I(5Fm7L?Kj8}VvaTj4U-q5xNNN6=UWrj3h7{qf|B!449Y_d0MjLiw?cMFyDeXD zTc4gKDOxv2*A>?-xk|vyg@&EUuD2XrH!rDcTGVOr?n%Aun_@%(bK0Y6oJ}(c=m|lM z=`gSRF3d>u9?-3Z8j#w=bt5Q0Mn_w|@=c6SwlYl!JGB-LxSF(d zCB>x+g*Yq&vVdz)2hPL;?*QW&?w;>d)7*WmW4F;3Z|zi`_w6=E-rx3c)|w|l&fSV5 z;xRCrL(s{H@doQ_!cULPVtWMv&a<;z`)($DY~GBJda_VtY~hL$?oR5 zoja$Mm9{KmjuJpD9szO+U82N2k^KAr0IA&_sz#z`-1FRj`RAKC{{Z~O?_cX+8R;Sp zuxRiK>xE+#Z;tPk$F;}iq}q0U&oSEykDT*k3O2b#MdXv=^j0IC3jxRlaf+fxv~Rec z(w&v8{hpPQOoeAm*r^`YMzc|SRji)6uYorKn{U zW@aX!`+*(+s;&jfGq4D<3Mm>ovIz$!vMUve7&T5@$f4}C@t|nX#SbdRNkvVZTw>x9 zl2WQ_>Kd9_CZ=ZQ7M51dF0O9w9-dyoA)#U65s^{JDXD4c8JStdC8cHM6_r)ZEv;?s z9i3g1CQq3GGAU*RJ2V zdF$b$$4{OPfBE|D`;VW$K>lK6V1{@LNJ2b@<}X2@znFm0!om*n7b88f2KE_o9%~}Y iXK;@p{B?_ghnW!=dCY