Skip to content

Commit

Permalink
Add serverless function with attributes handling
Browse files Browse the repository at this point in the history
  • Loading branch information
mikhail-treskin committed Jan 10, 2022
1 parent 9fa32b4 commit 35dd7e6
Show file tree
Hide file tree
Showing 7 changed files with 191 additions and 11 deletions.
8 changes: 8 additions & 0 deletions cvat-ui/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions cvat-ui/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,10 @@
{
"name": "cvat-ui",
<<<<<<< HEAD
"version": "1.32.2",
=======
"version": "1.31.1",
>>>>>>> 041d80b7 (Bump cvat-ui version)
"description": "CVAT single-page application",
"main": "src/index.tsx",
"scripts": {
Expand Down
2 changes: 1 addition & 1 deletion cvat/apps/lambda_manager/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ def invoke(self, db_task, data):
item["label"] = mapping.get(item["label"])
response = [item for item in response if item["label"]]
# TODO: Need to add attributes mapping similar to labels.
# Currently attribute is impicitely discarded if it is not decalred as supported in function config.
# Currently attribute is expicitely discarded if it is not decalred as supported in function config.
if self.attributes:
for item in response:
item['attributes'] = [attr for attr in item.get("attributes", []) if attr['name'] in self.attributes[item['label']]]
Expand Down
20 changes: 10 additions & 10 deletions serverless/common/openvino/model_loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,14 +13,6 @@ def __init__(self, model, weights):
network = ie_core.read_network(model, weights)
self._network = network

# Check compatibility
supported_layers = ie_core.query_network(network, "CPU")
not_supported_layers = [l for l in network.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
raise Exception(
"Following layers are not supported by the plugin for specified device {}:\n {}"
.format(ie_core.device, ", ".join(not_supported_layers)))

# Initialize input blobs
self._input_info_name = None
for blob_name in network.inputs:
Expand All @@ -41,7 +33,8 @@ def __init__(self, model, weights):
input_type = network.inputs[self._input_blob_name]
self._input_layout = input_type if isinstance(input_type, list) else input_type.shape

def infer(self, image, preprocessing=True):

def _prepare_inputs(self, image, preprocessing):
image = np.array(image)
_, _, h, w = self._input_layout
if preprocessing:
Expand All @@ -57,16 +50,23 @@ def infer(self, image, preprocessing=True):
inputs = {self._input_blob_name: image}
if self._input_info_name:
inputs[self._input_info_name] = [h, w, 1]
return inputs

def infer(self, image, preprocessing=True):
inputs = self._prepare_inputs(image, preprocessing)
results = self._net.infer(inputs)
if len(results) == 1:
return results[self._output_blob_name].copy()
else:
return results.copy()

def async_infer(self, image, preprocessing=True, request_id=0):
inputs = self._prepare_inputs(image, preprocessing)
return self._net.start_async(request_id=request_id, inputs=inputs)

def input_size(self):
return self._input_layout[2:]

@property
def layers(self):
return self._network.layers
return self._network.layers
61 changes: 61 additions & 0 deletions serverless/openvino/omz/intel/face-detection-0205/function.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
metadata:
name: openvino-omz-face-detection-0205
namespace: cvat
annotations:
name: Attributed face detection
type: detector
framework: openvino
# attribute names have to be the same as in annotated task, otherwise values will be ignored
spec: |
[
{ "id": 0, "name": "face", "attributes": ["age", "gender", "emotion"]}
]
spec:
description: Detection network finding faces and defining age, gender and emotion attributes
runtime: 'python:3.6'
handler: main:handler
eventTimeout: 30000s
env:
- name: NUCLIO_PYTHON_EXE_PATH
value: /opt/nuclio/common/openvino/python3

build:
image: cvat/openvino.omz.intel.face-detection-0205
baseImage: openvino/ubuntu18_dev:2021.1

directives:
preCopy:
- kind: USER
value: root
- kind: WORKDIR
value: /opt/nuclio
- kind: RUN
value: ln -s /usr/bin/pip3 /usr/bin/pip
- kind: RUN
value: /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py --name face-detection-0205 -o /opt/nuclio/open_model_zoo
- kind: RUN
value: /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py --name emotions-recognition-retail-0003 -o /opt/nuclio/open_model_zoo
- kind: RUN
value: /opt/intel/openvino/deployment_tools/open_model_zoo/tools/downloader/downloader.py --name age-gender-recognition-retail-0013 -o /opt/nuclio/open_model_zoo

postCopy:
- kind: RUN
value: apt update && DEBIAN_FRONTEND=noninteractive apt install --no-install-recommends -y python3-skimage
- kind: RUN
value: pip3 install "numpy<1.16.0" # workaround for skimage

triggers:
myHttpTrigger:
maxWorkers: 2
kind: 'http'
workerAvailabilityTimeoutMilliseconds: 10000
attributes:
maxRequestBodySize: 33554432 # 32MB

platform:
attributes:
restartPolicy:
name: always
maximumRetryCount: 3
mountMode: volume
35 changes: 35 additions & 0 deletions serverless/openvino/omz/intel/face-detection-0205/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import json
import base64
from PIL import Image
import io
from model_handler import FaceDetectorHandler, AttributesExtractorHandler
import yaml
import debugpy

def init_context(context):
context.logger.info("Init context... 0%")

# Read labels
with open("/opt/nuclio/function.yaml", 'rb') as function_file:
functionconfig = yaml.safe_load(function_file)

# Read the DL model
context.user_data.detector_model = FaceDetectorHandler()
context.user_data.attributes_model = AttributesExtractorHandler()

context.logger.info("Init context...100%")

def handler(context, event):
context.logger.info("Run face-detection-0206 model")
data = event.body
buf = io.BytesIO(base64.b64decode(data["image"]))
threshold = float(data.get("threshold", 0.5))
image = Image.open(buf)

results, faces = context.user_data.detector_model.infer(image, threshold)
for idx, face in enumerate(faces):
attributes = context.user_data.attributes_model.infer(face)
results[idx].update(attributes)

return context.Response(body=json.dumps(results), headers={},
content_type='application/json', status_code=200)
72 changes: 72 additions & 0 deletions serverless/openvino/omz/intel/face-detection-0205/model_handler.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
# Copyright (C) 2020 Intel Corporation
#
# SPDX-License-Identifier: MIT

import os
import cv2
import numpy as np
from model_loader import ModelLoader

class FaceDetectorHandler:
def __init__(self):
base_dir = os.path.abspath(os.environ.get("DETECTOR_MODEL_PATH",
"/opt/nuclio/open_model_zoo/intel/face-detection-0205/FP32"))
model_xml = os.path.join(base_dir, "face-detection-0205.xml")
model_bin = os.path.join(base_dir, "face-detection-0205.bin")
self.model = ModelLoader(model_xml, model_bin)

def infer(self, image, threshold):
infer_res = self.model.infer(image)["boxes"]
infer_res = infer_res[infer_res[:,4] > threshold]

results = []
faces = []
h_scale = image.height / 416
w_scale = image.width / 416
for face in infer_res:
xmin = int(face[0] * w_scale)
ymin = int(face[1] * h_scale)
xmax = int(face[2] * w_scale)
ymax = int(face[3] * h_scale)
confidence = face[4]

faces.append(np.array(image)[ymin:ymax, xmin:xmax])
results.append({
"confidence": str(confidence),
"label": "face",
"points": [xmin, ymin, xmax, ymax],
"type": "rectangle",
"attributes": []
})

return results, faces

class AttributesExtractorHandler:
def __init__(self):
age_gender_base_dir = os.path.abspath(os.environ.get("AGE_GENDER_MODEL_PATH",
"/opt/nuclio/open_model_zoo/intel/age-gender-recognition-retail-0013/FP32"))
age_gender_model_xml = os.path.join(age_gender_base_dir, "age-gender-recognition-retail-0013.xml")
age_gender_model_bin = os.path.join(age_gender_base_dir, "age-gender-recognition-retail-0013.bin")
self.age_gender_model = ModelLoader(age_gender_model_xml, age_gender_model_bin)
emotions_base_dir = os.path.abspath(os.environ.get("EMOTIONS_MODEL_PATH",
"/opt/nuclio/open_model_zoo/intel/emotions-recognition-retail-0003/FP32"))
emotions_model_xml = os.path.join(emotions_base_dir, "emotions-recognition-retail-0003.xml")
emotions_model_bin = os.path.join(emotions_base_dir, "emotions-recognition-retail-0003.bin")
self.emotions_model = ModelLoader(emotions_model_xml, emotions_model_bin)
self.genders_map = ["female", "male"]
self.emotions_map = ["neutral", "happy", "sad", "surprise", "anger"]

def infer(self, image):
age_gender_reqest = self.age_gender_model.async_infer(image)
emotions_reqest = self.emotions_model.async_infer(image)
# Wait until both age_gender and emotion recognition async inferences finish
while not (age_gender_reqest.wait(0) == 0 and emotions_reqest.wait(0) == 0):
continue
age = int(np.squeeze(age_gender_reqest.output_blobs["age_conv3"].buffer) * 100)
gender = self.genders_map[np.argmax(np.squeeze(age_gender_reqest.output_blobs["prob"].buffer))]
emotion = self.emotions_map[np.argmax(np.squeeze(emotions_reqest.output_blobs['prob_emotion'].buffer))]
return {"attributes": [
{"name": "age", "value": str(age)},
{"name": "gender", "value": gender},
{"name": "emotion", "value": emotion}
]}

0 comments on commit 35dd7e6

Please sign in to comment.