From 38c5b51d598a538e3be4aaf2c88b1c669401e26b Mon Sep 17 00:00:00 2001 From: "Pavel A. Tomskikh" Date: Wed, 1 Feb 2023 15:49:41 +0700 Subject: [PATCH] #48 add FrameParameters class for processing frame config --- samples/peoplenet_detector/module.yml | 5 ++- savant/config/default.yml | 7 +-- savant/config/initializer_resolver.py | 3 +- savant/config/module_config.py | 12 ++++-- savant/config/schema.py | 14 +++++- savant/deepstream/buffer_processor.py | 61 ++++++++++++--------------- savant/deepstream/metadata.py | 19 ++++----- savant/deepstream/pipeline.py | 20 ++++----- 8 files changed, 74 insertions(+), 67 deletions(-) diff --git a/samples/peoplenet_detector/module.yml b/samples/peoplenet_detector/module.yml index 1c3d4aa6c..694ebde4a 100644 --- a/samples/peoplenet_detector/module.yml +++ b/samples/peoplenet_detector/module.yml @@ -1,8 +1,9 @@ name: ${oc.env:MODULE_NAME, 'regular-cam-human-detector-peoplenet'} parameters: - frame_width: 960 - frame_height: 544 + frame: + width: 960 + height: 544 output_frame: ${json:${oc.env:OUTPUT_FRAME, '{"codec":"jpeg"}'}} pipeline: diff --git a/savant/config/default.yml b/savant/config/default.yml index e39b04679..9d2aad2cf 100644 --- a/savant/config/default.yml +++ b/savant/config/default.yml @@ -29,9 +29,10 @@ parameters: port: 2379 timeout: 15 - # pipeline processing frame width/height, default 720p - frame_width: ${oc.decode:${oc.env:FRAME_WIDTH, 1280}} - frame_height: ${oc.decode:${oc.env:FRAME_HEIGHT, 720}} + # pipeline processing frame parameters, default 720p + frame: + width: ${oc.decode:${oc.env:FRAME_WIDTH, 1280}} + height: ${oc.decode:${oc.env:FRAME_HEIGHT, 720}} # FPS measurement period fps_period: ${oc.decode:${oc.env:FPS_PERIOD, 10000}} diff --git a/savant/config/initializer_resolver.py b/savant/config/initializer_resolver.py index 49dc8f005..1e9229eaa 100644 --- a/savant/config/initializer_resolver.py +++ b/savant/config/initializer_resolver.py @@ -23,7 +23,8 @@ def initializer_resolver(param_name: str, default_val: Any, _parent_, _root_) -> etcd: 10 parameters: - frame_width: ${initializer:frame_width,1280} + frame: + width: ${initializer:frame_width,1280} where diff --git a/savant/config/module_config.py b/savant/config/module_config.py index 066a6d850..7ce76bde7 100644 --- a/savant/config/module_config.py +++ b/savant/config/module_config.py @@ -11,6 +11,7 @@ ModelElement, get_element_name, DrawFunc, + FrameParameters, ) from savant.deepstream.nvinfer.element_config import nvinfer_configure_element from savant.parameter_storage import init_param_storage @@ -153,12 +154,17 @@ def setup_batch_size(config: Module) -> None: config.parameters['batch_size'] = batch_size -def resolve_draw_func_parameter(config: DictConfig): - """Resolve draw_func parameter on module config. +def resolve_parameters(config: DictConfig): + """Resolve parameters on module config ("frame", "draw_func", etc.). :param config: Module config. """ + config.parameters['frame'] = OmegaConf.unsafe_merge( + OmegaConf.structured(FrameParameters), + config.parameters['frame'], + ) + draw_func_cfg = config.parameters.get('draw_func') if draw_func_cfg is not None: draw_func_schema = OmegaConf.structured(DrawFunc) @@ -195,11 +201,11 @@ def load(self, config_file_path: Union[str, Path]) -> Module: module_cfg = OmegaConf.unsafe_merge( self._config_schema, self._default_cfg, module_cfg ) - resolve_draw_func_parameter(module_cfg) logger.debug('Merged conf\n%s', module_cfg) init_param_storage(module_cfg) OmegaConf.resolve(module_cfg) # to resolve parameters for pipeline elements + resolve_parameters(module_cfg) logger.debug('Resolved conf\n%s', module_cfg) logger.info('Configure pipeline elements...') diff --git a/savant/config/schema.py b/savant/config/schema.py index c1a5c7f06..b91a3b943 100644 --- a/savant/config/schema.py +++ b/savant/config/schema.py @@ -7,6 +7,17 @@ from savant.base.pyfunc import PyFunc +@dataclass +class FrameParameters: + """Pipeline processing frame parameters""" + + width: int + """Pipeline processing frame width""" + + height: int + """Pipeline processing frame height""" + + @dataclass class DynamicGstProperty: """Allows configuring a gstreamer element property to be automatically @@ -350,7 +361,8 @@ class Module: .. code-block:: yaml parameters: - frame_width: ${initializer:frame_width,1280} + frame: + width: ${initializer:frame_width,1280} Etcd storage will be polled for the current value first, in the event etcd is unavailable resolver will diff --git a/savant/deepstream/buffer_processor.py b/savant/deepstream/buffer_processor.py index b40702f3e..51329216f 100644 --- a/savant/deepstream/buffer_processor.py +++ b/savant/deepstream/buffer_processor.py @@ -11,7 +11,7 @@ from pysavantboost import ObjectsPreprocessing from savant.base.model import ObjectModel, ComplexModel -from savant.config.schema import PipelineElement, ModelElement +from savant.config.schema import PipelineElement, ModelElement, FrameParameters from savant.converter.scale import scale_rbbox from savant.deepstream.base_drawfunc import BaseNvDsDrawFunc from savant.deepstream.nvinfer.model import ( @@ -61,8 +61,7 @@ def __init__( sources: SourceInfoRegistry, model_object_registry: ModelObjectRegistry, objects_preprocessing: ObjectsPreprocessing, - frame_width: int, - frame_height: int, + frame_params: FrameParameters, ): """Buffer processor for DeepStream pipeline. @@ -71,16 +70,14 @@ def __init__( :param sources: Source info registry. :param model_object_registry: Model.Object registry. :param objects_preprocessing: Objects processing registry. - :param frame_width: Processing frame width (after nvstreammux). - :param frame_height: Processing frame height (after nvstreammux). + :param frame_params: Processing frame parameters (after nvstreammux). """ super().__init__(queue, fps_meter) self._sources = sources self._model_object_registry = model_object_registry self._objects_preprocessing = objects_preprocessing - self._frame_width = frame_width - self._frame_height = frame_height + self._frame_params = frame_params self._queue = queue def prepare_input(self, buffer: Gst.Buffer): @@ -136,16 +133,16 @@ def prepare_input(self, buffer: Gst.Buffer): ] ] ), - scale_factor_x=self._frame_width, - scale_factor_y=self._frame_height, + scale_factor_x=self._frame_params.width, + scale_factor_y=self._frame_params.height, )[0] selection_type = ObjectSelectionType.ROTATED_BBOX else: scaled_bbox = ( - obj_meta['bbox']['xc'] * self._frame_width, - obj_meta['bbox']['yc'] * self._frame_height, - obj_meta['bbox']['width'] * self._frame_width, - obj_meta['bbox']['height'] * self._frame_height, + obj_meta['bbox']['xc'] * self._frame_params.width, + obj_meta['bbox']['yc'] * self._frame_params.height, + obj_meta['bbox']['width'] * self._frame_params.width, + obj_meta['bbox']['height'] * self._frame_params.height, obj_meta['bbox']['angle'], ) selection_type = ObjectSelectionType.REGULAR_BBOX @@ -175,10 +172,10 @@ def prepare_input(self, buffer: Gst.Buffer): gie_uid=model_uid, # tuple(xc, yc, width, height, angle) bbox=( - self._frame_width / 2, - self._frame_height / 2, - self._frame_width, - self._frame_height, + self._frame_params.width / 2, + self._frame_params.height / 2, + self._frame_params.width, + self._frame_params.height, 0, ), obj_label=obj_label, @@ -267,8 +264,8 @@ def prepare_element_input(self, element: PipelineElement, buffer: Gst.Buffer): else: parent_bbox.left = 0 parent_bbox.top = 0 - parent_bbox.width = self._frame_width - parent_bbox.height = self._frame_height + parent_bbox.width = self._frame_params.width + parent_bbox.height = self._frame_params.height bbox = model.input.preprocess_object_meta( bbox, parent_bbox=parent_bbox @@ -376,11 +373,11 @@ def prepare_element_output(self, element: PipelineElement, buffer: Gst.Buffer): bbox_tensor[:, 2][bbox_tensor[:, 2] < 0.0] = 0.0 bbox_tensor[:, 3][bbox_tensor[:, 3] < 0.0] = 0.0 bbox_tensor[:, 4][ - bbox_tensor[:, 4] > self._frame_width - 1.0 - ] = (self._frame_width - 1.0) + bbox_tensor[:, 4] > self._frame_params.width - 1.0 + ] = (self._frame_params.width - 1.0) bbox_tensor[:, 5][ - bbox_tensor[:, 5] > self._frame_height - 1.0 - ] = (self._frame_height - 1.0) + bbox_tensor[:, 5] > self._frame_params.height - 1.0 + ] = (self._frame_params.height - 1.0) # right to width, bottom to height bbox_tensor[:, 4] -= bbox_tensor[:, 2] @@ -540,8 +537,7 @@ def __init__( sources: SourceInfoRegistry, model_object_registry: ModelObjectRegistry, objects_preprocessing: ObjectsPreprocessing, - frame_width: int, - frame_height: int, + frame_params: FrameParameters, codec: CodecInfo, ): """Buffer processor for DeepStream pipeline. @@ -551,8 +547,7 @@ def __init__( :param sources: Source info registry. :param model_object_registry: Model.Object registry. :param objects_preprocessing: Objects processing registry. - :param frame_width: Processing frame width (after nvstreammux). - :param frame_height: Processing frame height (after nvstreammux). + :param frame_params: Processing frame parameters (after nvstreammux). :param codec: Codec of the output frames. """ @@ -563,8 +558,7 @@ def __init__( sources=sources, model_object_registry=model_object_registry, objects_preprocessing=objects_preprocessing, - frame_width=frame_width, - frame_height=frame_height, + frame_params=frame_params, ) def _iterate_output_frames(self, buffer: Gst.Buffer) -> Iterator[_OutputFrame]: @@ -593,8 +587,7 @@ def __init__( sources: SourceInfoRegistry, model_object_registry: ModelObjectRegistry, objects_preprocessing: ObjectsPreprocessing, - frame_width: int, - frame_height: int, + frame_params: FrameParameters, output_frame: bool, draw_func: Optional[BaseNvDsDrawFunc], ): @@ -605,8 +598,7 @@ def __init__( :param sources: Source info registry. :param model_object_registry: Model.Object registry. :param objects_preprocessing: Objects processing registry. - :param frame_width: Processing frame width (after nvstreammux). - :param frame_height: Processing frame height (after nvstreammux). + :param frame_params: Processing frame parameters (after nvstreammux). :param output_frame: Whether to output frame or not. :param draw_func: PyFunc for drawing on frames. """ @@ -620,8 +612,7 @@ def __init__( sources=sources, model_object_registry=model_object_registry, objects_preprocessing=objects_preprocessing, - frame_width=frame_width, - frame_height=frame_height, + frame_params=frame_params, ) def _iterate_output_frames(self, buffer: Gst.Buffer) -> Iterator[_OutputFrame]: diff --git a/savant/deepstream/metadata.py b/savant/deepstream/metadata.py index 5fd601f2b..c371969d1 100644 --- a/savant/deepstream/metadata.py +++ b/savant/deepstream/metadata.py @@ -3,6 +3,7 @@ import numpy as np import pyds +from savant.config.schema import FrameParameters from savant.converter.scale import scale_rbbox from savant.deepstream.utils import nvds_get_rbbox from savant.meta.attribute import AttributeMeta @@ -11,14 +12,12 @@ def nvds_obj_meta_output_converter( nvds_obj_meta: pyds.NvDsObjectMeta, - frame_width: int, - frame_height: int, + frame_params: FrameParameters, ) -> Dict[str, Any]: """Convert object meta to output format. :param nvds_obj_meta: NvDsObjectMeta - :param frame_width: Frame width, to scale to [0..1] - :param frame_height: Frame height + :param frame_params: Frame parameters (width/height, to scale to [0..1] :return: dict """ model_name, label = ModelObjectRegistry.parse_model_object_key( @@ -46,8 +45,8 @@ def nvds_obj_meta_output_converter( ] ] ), - scale_factor_x=1 / frame_width, - scale_factor_y=1 / frame_height, + scale_factor_x=1 / frame_params.width, + scale_factor_y=1 / frame_params.height, )[0] bbox = dict( xc=scaled_bbox[0], @@ -57,11 +56,11 @@ def nvds_obj_meta_output_converter( angle=scaled_bbox[4], ) else: - obj_width = rect_params.width / frame_width - obj_height = rect_params.height / frame_height + obj_width = rect_params.width / frame_params.width + obj_height = rect_params.height / frame_params.height bbox = dict( - xc=rect_params.left / frame_width + obj_width / 2, - yc=rect_params.top / frame_height + obj_height / 2, + xc=rect_params.left / frame_params.width + obj_width / 2, + yc=rect_params.top / frame_params.height + obj_height / 2, width=obj_width, height=obj_height, ) diff --git a/savant/deepstream/pipeline.py b/savant/deepstream/pipeline.py index 69a54b5bd..af44dd2f1 100644 --- a/savant/deepstream/pipeline.py +++ b/savant/deepstream/pipeline.py @@ -51,7 +51,7 @@ from savant.utils.model_registry import ModelObjectRegistry from savant.utils.source_info import SourceInfoRegistry, SourceInfo, Resolution from savant.utils.platform import is_aarch64 -from savant.config.schema import PipelineElement, ModelElement +from savant.config.schema import PipelineElement, ModelElement, FrameParameters from savant.base.model import AttributeModel, ComplexModel from savant.utils.sink_factories import SinkEndOfStream @@ -62,8 +62,7 @@ class NvDsPipeline(GstPipeline): :param name: Pipeline name :param source: Pipeline source element :param elements: Pipeline elements - :key frame_width: Processing frame width (after nvstreammux) - :key frame_height: Processing frame height (after nvstreammux) + :key frame_params: Processing frame parameters (after nvstreammux) :key batch_size: Primary batch size (nvstreammux batch-size) :key output_frame: Whether to include frame in module output, not just metadata """ @@ -76,8 +75,7 @@ def __init__( **kwargs, ): # pipeline internal processing frame size - self._frame_width = kwargs['frame_width'] - self._frame_height = kwargs['frame_height'] + self._frame_params: FrameParameters = kwargs['frame'] self._batch_size = kwargs['batch_size'] # Timeout in microseconds @@ -162,8 +160,7 @@ def _build_buffer_processor( sources=self._sources, model_object_registry=model_object_registry, objects_preprocessing=self._objects_preprocessing, - frame_width=self._frame_width, - frame_height=self._frame_height, + frame_params=self._frame_params, output_frame=self._output_frame_codec is not None, draw_func=self._draw_func, ) @@ -173,8 +170,7 @@ def _build_buffer_processor( sources=self._sources, model_object_registry=model_object_registry, objects_preprocessing=self._objects_preprocessing, - frame_width=self._frame_width, - frame_height=self._frame_height, + frame_params=self._frame_params, codec=self._output_frame_codec.value, ) @@ -503,7 +499,7 @@ def update_frame_meta(self, pad: Gst.Pad, info: Gst.PadProbeInfo): continue obj_meta = nvds_obj_meta_output_converter( - nvds_obj_meta, self._frame_width, self._frame_height + nvds_obj_meta, self._frame_params ) for attr_meta_list in nvds_attr_meta_iterator( frame_meta=nvds_frame_meta, obj_meta=nvds_obj_meta @@ -531,8 +527,8 @@ def _create_muxer(self, live_source: bool) -> Gst.Element: """ frame_processing_parameters = { - 'width': self._frame_width, - 'height': self._frame_height, + 'width': self._frame_params.width, + 'height': self._frame_params.height, 'batch-size': self._batch_size, # Allowed range for batch-size: 1 - 1024 # Allowed range for buffer-pool-size: 4 - 1024