-
Notifications
You must be signed in to change notification settings - Fork 21
/
video_processing_mediapipe.py
127 lines (112 loc) · 4.7 KB
/
video_processing_mediapipe.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import traceback
import cv2
import numpy as np
import sys
import argparse
from datetime import datetime
import os
# MediaPipe algorithms in Python
# https://google.github.io/mediapipe/getting_started/python.html
# Install steps:
# pip install mediapipe
# Status: working
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_face_mesh = mp.solutions.face_mesh
mp_hands = mp.solutions.hands
mp_pose = mp.solutions.pose
mp_holistic = mp.solutions.holistic
def init_model(transform):
if transform == "facemesh":
face_mesh = mp_face_mesh.FaceMesh(min_detection_confidence=0.5, min_tracking_confidence=0.5)
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
return (face_mesh,drawing_spec), None
elif transform == "hands":
hands = mp_hands.Hands(min_detection_confidence=0.5, min_tracking_confidence=0.5)
return (hands), None
elif transform == "pose":
pose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5)
return (pose), None
elif transform == "holistic":
holistic = mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5)
return (holistic), None
return None,None
def process_image(transform,processing_model,img):
tracks = []
try:
if transform == "facemesh":
(face_mesh,drawing_spec) = processing_model
image = cv2.cvtColor(cv2.flip(img, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = face_mesh.process(image)
# Draw the face mesh annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACE_CONNECTIONS,
landmark_drawing_spec=drawing_spec,
connection_drawing_spec=drawing_spec)
img = image
tracks = results.multi_face_landmarks
elif transform == "hands":
(hands) = processing_model
image = cv2.cvtColor(cv2.flip(img, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
img = image
tracks = results.multi_hand_landmarks
elif transform == "pose":
(pose) = processing_model
image = cv2.cvtColor(cv2.flip(img, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = pose.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
img = image
traks = results.pose_landmarks
elif transform == "holistic":
(holistic) = processing_model
image = cv2.cvtColor(cv2.flip(img, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = holistic.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
mp_drawing.draw_landmarks(
image, results.face_landmarks, mp_holistic.FACE_CONNECTIONS)
mp_drawing.draw_landmarks(
image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
mp_drawing.draw_landmarks(
image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
mp_drawing.draw_landmarks(
image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS)
img = image
# return just face landmarks for now
traks = results.face_landmarks
except Exception as e:
track = traceback.format_exc()
print(track)
print("MediaPipe Exception",e)
pass
return tracks,img