From 0d6aa10b825312e4ab6b04d2b242aa8a2cfe058e Mon Sep 17 00:00:00 2001 From: Sefik Ilkin Serengil Date: Sun, 21 Jan 2024 13:52:31 +0000 Subject: [PATCH] cosmetic changes about interfaces --- deepface/basemodels/ArcFace.py | 15 ++++- deepface/basemodels/DeepID.py | 15 ++++- .../basemodels/{DlibResNet.py => Dlib.py} | 58 ++++++++----------- deepface/basemodels/Facenet.py | 29 +++++++++- deepface/basemodels/FbDeepFace.py | 15 ++++- deepface/basemodels/OpenFace.py | 15 ++++- deepface/basemodels/SFace.py | 30 +++++----- deepface/basemodels/VGGFace.py | 15 ++++- deepface/detectors/DetectorWrapper.py | 36 ++++++------ .../detectors/{DlibWrapper.py => Dlib.py} | 2 +- .../{FastMtcnnWrapper.py => FastMtCnn.py} | 2 +- .../{MediapipeWrapper.py => MediaPipe.py} | 2 +- .../detectors/{MtcnnWrapper.py => MtCnn.py} | 2 +- .../detectors/{OpenCvWrapper.py => OpenCv.py} | 2 +- .../{RetinaFaceWrapper.py => RetinaFace.py} | 2 +- deepface/detectors/{SsdWrapper.py => Ssd.py} | 8 +-- .../detectors/{YoloWrapper.py => Yolo.py} | 2 +- .../detectors/{YunetWrapper.py => YuNet.py} | 2 +- deepface/extendedmodels/Age.py | 2 +- deepface/extendedmodels/Emotion.py | 2 +- deepface/extendedmodels/Gender.py | 2 +- deepface/extendedmodels/Race.py | 2 +- deepface/models/Detector.py | 7 ++- deepface/models/FacialRecognition.py | 13 +---- deepface/modules/modeling.py | 30 +++++----- tests/visual-test.py | 6 +- 26 files changed, 196 insertions(+), 120 deletions(-) rename deepface/basemodels/{DlibResNet.py => Dlib.py} (67%) rename deepface/detectors/{DlibWrapper.py => Dlib.py} (99%) rename deepface/detectors/{FastMtcnnWrapper.py => FastMtCnn.py} (98%) rename deepface/detectors/{MediapipeWrapper.py => MediaPipe.py} (96%) rename deepface/detectors/{MtcnnWrapper.py => MtCnn.py} (98%) rename deepface/detectors/{OpenCvWrapper.py => OpenCv.py} (99%) rename deepface/detectors/{RetinaFaceWrapper.py => RetinaFace.py} (98%) rename deepface/detectors/{SsdWrapper.py => Ssd.py} (95%) rename deepface/detectors/{YoloWrapper.py => Yolo.py} (99%) rename deepface/detectors/{YunetWrapper.py => YuNet.py} (99%) diff --git a/deepface/basemodels/ArcFace.py b/deepface/basemodels/ArcFace.py index 13cf10a2..2a596369 100644 --- a/deepface/basemodels/ArcFace.py +++ b/deepface/basemodels/ArcFace.py @@ -1,5 +1,6 @@ import os import gdown +import numpy as np from deepface.commons import functions from deepface.commons.logger import Logger from deepface.models.FacialRecognition import FacialRecognition @@ -43,7 +44,7 @@ ) # pylint: disable=too-few-public-methods -class ArcFace(FacialRecognition): +class ArcFaceClient(FacialRecognition): """ ArcFace model class """ @@ -52,6 +53,18 @@ def __init__(self): self.model = load_model() self.model_name = "ArcFace" + def find_embeddings(self, img: np.ndarray) -> list: + """ + find embeddings with ArcFace model + Args: + img (np.ndarray): pre-loaded image in BGR + Returns + embeddings (list): multi-dimensional vector + """ + # model.predict causes memory issue when it is called in a for loop + # embedding = model.predict(img, verbose=0)[0].tolist() + return self.model(img, training=False).numpy()[0].tolist() + def load_model( url="https://github.com/serengil/deepface_models/releases/download/v1.0/arcface_weights.h5", diff --git a/deepface/basemodels/DeepID.py b/deepface/basemodels/DeepID.py index 7a4da5bf..71b10b13 100644 --- a/deepface/basemodels/DeepID.py +++ b/deepface/basemodels/DeepID.py @@ -1,5 +1,6 @@ import os import gdown +import numpy as np from deepface.commons import functions from deepface.commons.logger import Logger from deepface.models.FacialRecognition import FacialRecognition @@ -39,7 +40,7 @@ # ------------------------------------- # pylint: disable=too-few-public-methods -class DeepId(FacialRecognition): +class DeepIdClient(FacialRecognition): """ DeepId model class """ @@ -48,6 +49,18 @@ def __init__(self): self.model = load_model() self.model_name = "DeepId" + def find_embeddings(self, img: np.ndarray) -> list: + """ + find embeddings with DeepId model + Args: + img (np.ndarray): pre-loaded image in BGR + Returns + embeddings (list): multi-dimensional vector + """ + # model.predict causes memory issue when it is called in a for loop + # embedding = model.predict(img, verbose=0)[0].tolist() + return self.model(img, training=False).numpy()[0].tolist() + def load_model( url="https://github.com/serengil/deepface_models/releases/download/v1.0/deepid_keras_weights.h5", diff --git a/deepface/basemodels/DlibResNet.py b/deepface/basemodels/Dlib.py similarity index 67% rename from deepface/basemodels/DlibResNet.py rename to deepface/basemodels/Dlib.py index c440f19a..6ccfec89 100644 --- a/deepface/basemodels/DlibResNet.py +++ b/deepface/basemodels/Dlib.py @@ -11,7 +11,7 @@ # pylint: disable=too-few-public-methods -class Dlib(FacialRecognition): +class DlibClient(FacialRecognition): """ Dlib model class """ @@ -22,13 +22,31 @@ def __init__(self): def find_embeddings(self, img: np.ndarray) -> list: """ - Custom find embeddings function of Dlib different than FacialRecognition's one + find embeddings with Dlib model - different than regular models Args: - img (np.ndarray) - Retunrs: - embeddings (list) + img (np.ndarray): pre-loaded image in BGR + Returns + embeddings (list): multi-dimensional vector """ - return self.model.predict(img)[0].tolist() + # return self.model.predict(img)[0].tolist() + + # extract_faces returns 4 dimensional images + if len(img.shape) == 4: + img = img[0] + + # bgr to rgb + img = img[:, :, ::-1] # bgr to rgb + + # img is in scale of [0, 1] but expected [0, 255] + if img.max() <= 1: + img = img * 255 + + img = img.astype(np.uint8) + + img_representation = self.model.model.compute_face_descriptor(img) + img_representation = np.array(img_representation) + img_representation = np.expand_dims(img_representation, axis=0) + return img_representation[0].tolist() class DlibResNet: @@ -69,38 +87,12 @@ def __init__(self): # --------------------- - model = dlib.face_recognition_model_v1(weight_file) - self.__model = model + self.model = dlib.face_recognition_model_v1(weight_file) # --------------------- # return None # classes must return None - def predict(self, img_aligned: np.ndarray) -> np.ndarray: - - # functions.detectFace returns 4 dimensional images - if len(img_aligned.shape) == 4: - img_aligned = img_aligned[0] - - # functions.detectFace returns bgr images - img_aligned = img_aligned[:, :, ::-1] # bgr to rgb - - # deepface.detectFace returns an array in scale of [0, 1] - # but dlib expects in scale of [0, 255] - if img_aligned.max() <= 1: - img_aligned = img_aligned * 255 - - img_aligned = img_aligned.astype(np.uint8) - - model = self.__model - - img_representation = model.compute_face_descriptor(img_aligned) - - img_representation = np.array(img_representation) - img_representation = np.expand_dims(img_representation, axis=0) - - return img_representation - class DlibMetaData: def __init__(self): diff --git a/deepface/basemodels/Facenet.py b/deepface/basemodels/Facenet.py index 44797903..1c3dac81 100644 --- a/deepface/basemodels/Facenet.py +++ b/deepface/basemodels/Facenet.py @@ -1,5 +1,6 @@ import os import gdown +import numpy as np from deepface.commons import functions from deepface.commons.logger import Logger from deepface.models.FacialRecognition import FacialRecognition @@ -43,7 +44,7 @@ # -------------------------------- # pylint: disable=too-few-public-methods -class FaceNet128d(FacialRecognition): +class FaceNet128dClient(FacialRecognition): """ FaceNet-128d model class """ @@ -52,8 +53,20 @@ def __init__(self): self.model = load_facenet128d_model() self.model_name = "FaceNet-128d" + def find_embeddings(self, img: np.ndarray) -> list: + """ + find embeddings with FaceNet-128d model + Args: + img (np.ndarray): pre-loaded image in BGR + Returns + embeddings (list): multi-dimensional vector + """ + # model.predict causes memory issue when it is called in a for loop + # embedding = model.predict(img, verbose=0)[0].tolist() + return self.model(img, training=False).numpy()[0].tolist() -class FaceNet512d(FacialRecognition): + +class FaceNet512dClient(FacialRecognition): """ FaceNet-1512d model class """ @@ -62,6 +75,18 @@ def __init__(self): self.model = load_facenet512d_model() self.model_name = "FaceNet-512d" + def find_embeddings(self, img: np.ndarray) -> list: + """ + find embeddings with FaceNet-512d model + Args: + img (np.ndarray): pre-loaded image in BGR + Returns + embeddings (list): multi-dimensional vector + """ + # model.predict causes memory issue when it is called in a for loop + # embedding = model.predict(img, verbose=0)[0].tolist() + return self.model(img, training=False).numpy()[0].tolist() + def scaling(x, scale): return x * scale diff --git a/deepface/basemodels/FbDeepFace.py b/deepface/basemodels/FbDeepFace.py index 0a436edc..30a8aec7 100644 --- a/deepface/basemodels/FbDeepFace.py +++ b/deepface/basemodels/FbDeepFace.py @@ -1,6 +1,7 @@ import os import zipfile import gdown +import numpy as np from deepface.commons import functions from deepface.commons.logger import Logger from deepface.models.FacialRecognition import FacialRecognition @@ -36,7 +37,7 @@ # ------------------------------------- # pylint: disable=line-too-long, too-few-public-methods -class DeepFace(FacialRecognition): +class DeepFaceClient(FacialRecognition): """ Fb's DeepFace model class """ @@ -45,6 +46,18 @@ def __init__(self): self.model = load_model() self.model_name = "DeepFace" + def find_embeddings(self, img: np.ndarray) -> list: + """ + find embeddings with OpenFace model + Args: + img (np.ndarray): pre-loaded image in BGR + Returns + embeddings (list): multi-dimensional vector + """ + # model.predict causes memory issue when it is called in a for loop + # embedding = model.predict(img, verbose=0)[0].tolist() + return self.model(img, training=False).numpy()[0].tolist() + def load_model( url="https://github.com/swghosh/DeepFace/releases/download/weights-vggface2-2d-aligned/VGGFace2_DeepFace_weights_val-0.9034.h5.zip", diff --git a/deepface/basemodels/OpenFace.py b/deepface/basemodels/OpenFace.py index 05ef430e..1ba4d1c0 100644 --- a/deepface/basemodels/OpenFace.py +++ b/deepface/basemodels/OpenFace.py @@ -1,6 +1,7 @@ import os import gdown import tensorflow as tf +import numpy as np from deepface.commons import functions from deepface.commons.logger import Logger from deepface.models.FacialRecognition import FacialRecognition @@ -26,7 +27,7 @@ # --------------------------------------- # pylint: disable=too-few-public-methods -class OpenFace(FacialRecognition): +class OpenFaceClient(FacialRecognition): """ OpenFace model class """ @@ -35,6 +36,18 @@ def __init__(self): self.model = load_model() self.model_name = "OpenFace" + def find_embeddings(self, img: np.ndarray) -> list: + """ + find embeddings with OpenFace model + Args: + img (np.ndarray): pre-loaded image in BGR + Returns + embeddings (list): multi-dimensional vector + """ + # model.predict causes memory issue when it is called in a for loop + # embedding = model.predict(img, verbose=0)[0].tolist() + return self.model(img, training=False).numpy()[0].tolist() + def load_model( url="https://github.com/serengil/deepface_models/releases/download/v1.0/openface_weights.h5", diff --git a/deepface/basemodels/SFace.py b/deepface/basemodels/SFace.py index a9f36d14..2a59ccb7 100644 --- a/deepface/basemodels/SFace.py +++ b/deepface/basemodels/SFace.py @@ -14,7 +14,7 @@ # pylint: disable=line-too-long, too-few-public-methods -class SFace(FacialRecognition): +class SFaceClient(FacialRecognition): """ SFace model class """ @@ -25,13 +25,20 @@ def __init__(self): def find_embeddings(self, img: np.ndarray) -> list: """ - Custom find embeddings function of SFace different than FacialRecognition's one + find embeddings with SFace model - different than regular models Args: - img (np.ndarray) - Retunrs: - embeddings (list) + img (np.ndarray): pre-loaded image in BGR + Returns + embeddings (list): multi-dimensional vector """ - return self.model.predict(img)[0].tolist() + # return self.model.predict(img)[0].tolist() + + # revert the image to original format and preprocess using the model + input_blob = (img[0] * 255).astype(np.uint8) + + embeddings = self.model.model.feature(input_blob) + + return embeddings[0].tolist() def load_model( @@ -74,17 +81,6 @@ def __init__(self, model_path): self.layers = [_Layer()] - def predict(self, image: np.ndarray) -> np.ndarray: - # Preprocess - input_blob = (image[0] * 255).astype( - np.uint8 - ) # revert the image to original format and preprocess using the model - - # Forward - embeddings = self.model.feature(input_blob) - - return embeddings - class _Layer: input_shape = (None, 112, 112, 3) diff --git a/deepface/basemodels/VGGFace.py b/deepface/basemodels/VGGFace.py index 921898e0..fa548a1c 100644 --- a/deepface/basemodels/VGGFace.py +++ b/deepface/basemodels/VGGFace.py @@ -1,5 +1,6 @@ import os import gdown +import numpy as np from deepface.commons import functions from deepface.commons.logger import Logger from deepface.models.FacialRecognition import FacialRecognition @@ -37,7 +38,7 @@ # --------------------------------------- # pylint: disable=too-few-public-methods -class VggFace(FacialRecognition): +class VggFaceClient(FacialRecognition): """ VGG-Face model class """ @@ -46,6 +47,18 @@ def __init__(self): self.model = load_model() self.model_name = "VGG-Face" + def find_embeddings(self, img: np.ndarray) -> list: + """ + find embeddings with VGG-Face model + Args: + img (np.ndarray): pre-loaded image in BGR + Returns + embeddings (list): multi-dimensional vector + """ + # model.predict causes memory issue when it is called in a for loop + # embedding = model.predict(img, verbose=0)[0].tolist() + return self.model(img, training=False).numpy()[0].tolist() + def base_model() -> Sequential: """ diff --git a/deepface/detectors/DetectorWrapper.py b/deepface/detectors/DetectorWrapper.py index 06cfca6a..8c3ba919 100644 --- a/deepface/detectors/DetectorWrapper.py +++ b/deepface/detectors/DetectorWrapper.py @@ -2,15 +2,15 @@ import numpy as np from deepface.models.Detector import Detector from deepface.detectors import ( - OpenCvWrapper, - SsdWrapper, - DlibWrapper, - MtcnnWrapper, - RetinaFaceWrapper, - MediapipeWrapper, - YoloWrapper, - YunetWrapper, - FastMtcnnWrapper, + FastMtCnn, + MediaPipe, + MtCnn, + OpenCv, + Dlib, + RetinaFace, + Ssd, + Yolo, + YuNet, ) @@ -25,15 +25,15 @@ def build_model(detector_backend: str) -> Any: global face_detector_obj # singleton design pattern backends = { - "opencv": OpenCvWrapper.OpenCv, - "mtcnn": MtcnnWrapper.MtCnn, - "ssd": SsdWrapper.Ssd, - "dlib": DlibWrapper.Dlib, - "retinaface": RetinaFaceWrapper.RetinaFace, - "mediapipe": MediapipeWrapper.MediaPipe, - "yolov8": YoloWrapper.Yolo, - "yunet": YunetWrapper.YuNet, - "fastmtcnn": FastMtcnnWrapper.FastMtCnn, + "opencv": OpenCv.OpenCvClient, + "mtcnn": MtCnn.MtCnnClient, + "ssd": Ssd.SsdClient, + "dlib": Dlib.DlibClient, + "retinaface": RetinaFace.RetinaFaceClient, + "mediapipe": MediaPipe.MediaPipeClient, + "yolov8": Yolo.YoloClient, + "yunet": YuNet.YuNetClient, + "fastmtcnn": FastMtCnn.FastMtCnnClient, } if not "face_detector_obj" in globals(): diff --git a/deepface/detectors/DlibWrapper.py b/deepface/detectors/Dlib.py similarity index 99% rename from deepface/detectors/DlibWrapper.py rename to deepface/detectors/Dlib.py index 40f8eebd..9d2bcfdc 100644 --- a/deepface/detectors/DlibWrapper.py +++ b/deepface/detectors/Dlib.py @@ -9,7 +9,7 @@ logger = Logger(module="detectors.DlibWrapper") -class Dlib(Detector): +class DlibClient(Detector): def __init__(self): self.model = self.build_model() diff --git a/deepface/detectors/FastMtcnnWrapper.py b/deepface/detectors/FastMtCnn.py similarity index 98% rename from deepface/detectors/FastMtcnnWrapper.py rename to deepface/detectors/FastMtCnn.py index 3f38fa07..2f037d55 100644 --- a/deepface/detectors/FastMtcnnWrapper.py +++ b/deepface/detectors/FastMtCnn.py @@ -7,7 +7,7 @@ # Examples https://www.kaggle.com/timesler/guide-to-mtcnn-in-facenet-pytorch -class FastMtCnn(Detector): +class FastMtCnnClient(Detector): def __init__(self): self.model = self.build_model() diff --git a/deepface/detectors/MediapipeWrapper.py b/deepface/detectors/MediaPipe.py similarity index 96% rename from deepface/detectors/MediapipeWrapper.py rename to deepface/detectors/MediaPipe.py index 56e439ec..73a2fe21 100644 --- a/deepface/detectors/MediapipeWrapper.py +++ b/deepface/detectors/MediaPipe.py @@ -5,7 +5,7 @@ # Link - https://google.github.io/mediapipe/solutions/face_detection -class MediaPipe(Detector): +class MediaPipeClient(Detector): def __init__(self): self.model = self.build_model() diff --git a/deepface/detectors/MtcnnWrapper.py b/deepface/detectors/MtCnn.py similarity index 98% rename from deepface/detectors/MtcnnWrapper.py rename to deepface/detectors/MtCnn.py index 14c59f52..8f581ad0 100644 --- a/deepface/detectors/MtcnnWrapper.py +++ b/deepface/detectors/MtCnn.py @@ -4,7 +4,7 @@ from deepface.models.Detector import Detector -class MtCnn(Detector): +class MtCnnClient(Detector): """ Class to cover common face detection functionalitiy for MtCnn backend """ diff --git a/deepface/detectors/OpenCvWrapper.py b/deepface/detectors/OpenCv.py similarity index 99% rename from deepface/detectors/OpenCvWrapper.py rename to deepface/detectors/OpenCv.py index 515fdb09..e70d3244 100644 --- a/deepface/detectors/OpenCvWrapper.py +++ b/deepface/detectors/OpenCv.py @@ -5,7 +5,7 @@ from deepface.models.Detector import Detector -class OpenCv(Detector): +class OpenCvClient(Detector): """ Class to cover common face detection functionalitiy for OpenCv backend """ diff --git a/deepface/detectors/RetinaFaceWrapper.py b/deepface/detectors/RetinaFace.py similarity index 98% rename from deepface/detectors/RetinaFaceWrapper.py rename to deepface/detectors/RetinaFace.py index 6986ce5b..3d2d9ae6 100644 --- a/deepface/detectors/RetinaFaceWrapper.py +++ b/deepface/detectors/RetinaFace.py @@ -4,7 +4,7 @@ from deepface.models.Detector import Detector -class RetinaFace(Detector): +class RetinaFaceClient(Detector): def __init__(self): self.model = rf.build_model() diff --git a/deepface/detectors/SsdWrapper.py b/deepface/detectors/Ssd.py similarity index 95% rename from deepface/detectors/SsdWrapper.py rename to deepface/detectors/Ssd.py index f88eea3d..7847e122 100644 --- a/deepface/detectors/SsdWrapper.py +++ b/deepface/detectors/Ssd.py @@ -3,7 +3,7 @@ import cv2 import pandas as pd import numpy as np -from deepface.detectors import OpenCvWrapper +from deepface.detectors import OpenCv from deepface.commons import functions from deepface.models.Detector import Detector from deepface.commons.logger import Logger @@ -13,7 +13,7 @@ # pylint: disable=line-too-long -class Ssd(Detector): +class SsdClient(Detector): def __init__(self): self.model = self.build_model() @@ -65,7 +65,7 @@ def build_model(self) -> dict: detector = {} detector["face_detector"] = face_detector - detector["opencv_module"] = OpenCvWrapper.OpenCv() + detector["opencv_module"] = OpenCv.OpenCvClient() return detector @@ -134,7 +134,7 @@ def detect_faces(self, img: np.ndarray, align: bool = True) -> list: confidence = instance["confidence"] if align: - opencv_module: OpenCvWrapper.OpenCv = self.model["opencv_module"] + opencv_module: OpenCv.OpenCv = self.model["opencv_module"] left_eye, right_eye = opencv_module.find_eyes(detected_face) detected_face = self.align_face( img=detected_face, left_eye=left_eye, right_eye=right_eye diff --git a/deepface/detectors/YoloWrapper.py b/deepface/detectors/Yolo.py similarity index 99% rename from deepface/detectors/YoloWrapper.py rename to deepface/detectors/Yolo.py index a6666c99..5a97db6d 100644 --- a/deepface/detectors/YoloWrapper.py +++ b/deepface/detectors/Yolo.py @@ -16,7 +16,7 @@ LANDMARKS_CONFIDENCE_THRESHOLD = 0.5 -class Yolo(Detector): +class YoloClient(Detector): def __init__(self): self.model = self.build_model() diff --git a/deepface/detectors/YunetWrapper.py b/deepface/detectors/YuNet.py similarity index 99% rename from deepface/detectors/YunetWrapper.py rename to deepface/detectors/YuNet.py index 544bd5b9..41c1764b 100644 --- a/deepface/detectors/YunetWrapper.py +++ b/deepface/detectors/YuNet.py @@ -10,7 +10,7 @@ logger = Logger(module="detectors.YunetWrapper") -class YuNet(Detector): +class YuNetClient(Detector): def __init__(self): self.model = self.build_model() diff --git a/deepface/extendedmodels/Age.py b/deepface/extendedmodels/Age.py index 6315a8c8..1d5e6cd5 100644 --- a/deepface/extendedmodels/Age.py +++ b/deepface/extendedmodels/Age.py @@ -23,7 +23,7 @@ # ---------------------------------------- # pylint: disable=too-few-public-methods -class ApparentAge(Demography): +class ApparentAgeClient(Demography): """ Age model class """ diff --git a/deepface/extendedmodels/Emotion.py b/deepface/extendedmodels/Emotion.py index 7db38c17..8a04c33f 100644 --- a/deepface/extendedmodels/Emotion.py +++ b/deepface/extendedmodels/Emotion.py @@ -33,7 +33,7 @@ labels = ["angry", "disgust", "fear", "happy", "sad", "surprise", "neutral"] # pylint: disable=too-few-public-methods -class FacialExpression(Demography): +class EmotionClient(Demography): """ Emotion model class """ diff --git a/deepface/extendedmodels/Gender.py b/deepface/extendedmodels/Gender.py index 5c6bc95d..191e315d 100644 --- a/deepface/extendedmodels/Gender.py +++ b/deepface/extendedmodels/Gender.py @@ -26,7 +26,7 @@ labels = ["Woman", "Man"] # pylint: disable=too-few-public-methods -class Gender(Demography): +class GenderClient(Demography): """ Gender model class """ diff --git a/deepface/extendedmodels/Race.py b/deepface/extendedmodels/Race.py index 33a0d194..9c907f1c 100644 --- a/deepface/extendedmodels/Race.py +++ b/deepface/extendedmodels/Race.py @@ -25,7 +25,7 @@ labels = ["asian", "indian", "black", "white", "middle eastern", "latino hispanic"] # pylint: disable=too-few-public-methods -class Race(Demography): +class RaceClient(Demography): """ Race model class """ diff --git a/deepface/models/Detector.py b/deepface/models/Detector.py index b5c80af1..60bf1149 100644 --- a/deepface/models/Detector.py +++ b/deepface/models/Detector.py @@ -1,5 +1,5 @@ from abc import ABC, abstractmethod -from typing import Union +from typing import Union, Optional import numpy as np from PIL import Image @@ -12,7 +12,10 @@ def detect_faces(self, img: np.ndarray, align: bool = True) -> list: pass def align_face( - self, img: np.ndarray, left_eye: Union[list, tuple], right_eye: Union[list, tuple] + self, + img: np.ndarray, + left_eye: Optional[Union[list, tuple]] = None, + right_eye: Optional[Union[list, tuple]] = None, ) -> np.ndarray: """ Align a given image horizantally with respect to their left and right eye locations diff --git a/deepface/models/FacialRecognition.py b/deepface/models/FacialRecognition.py index 1709e406..7f323f4a 100644 --- a/deepface/models/FacialRecognition.py +++ b/deepface/models/FacialRecognition.py @@ -1,4 +1,4 @@ -from abc import ABC +from abc import ABC, abstractmethod from typing import Any, Union import numpy as np from deepface.commons import functions @@ -16,13 +16,6 @@ class FacialRecognition(ABC): model: Union[Model, Any] model_name: str + @abstractmethod def find_embeddings(self, img: np.ndarray) -> list: - if not isinstance(self.model, Model): - raise ValueError( - "If a facial recognition model is not type of (tf.)keras.models.Model," - "Then its find_embeddings method must be implemented its own module." - f"However {self.model_name}'s model type is {type(self.model)}" - ) - # model.predict causes memory issue when it is called in a for loop - # embedding = model.predict(img, verbose=0)[0].tolist() - return self.model(img, training=False).numpy()[0].tolist() + pass diff --git a/deepface/modules/modeling.py b/deepface/modules/modeling.py index f1b4bfb1..e738264f 100644 --- a/deepface/modules/modeling.py +++ b/deepface/modules/modeling.py @@ -5,12 +5,12 @@ from deepface.basemodels import ( VGGFace, OpenFace, - Facenet, FbDeepFace, DeepID, - DlibResNet, ArcFace, SFace, + Dlib, + FaceNet, ) from deepface.extendedmodels import Age, Gender, Race, Emotion @@ -31,19 +31,19 @@ def build_model(model_name: str) -> Any: global model_obj models = { - "VGG-Face": VGGFace.VggFace, - "OpenFace": OpenFace.OpenFace, - "Facenet": Facenet.FaceNet128d, - "Facenet512": Facenet.FaceNet512d, - "DeepFace": FbDeepFace.DeepFace, - "DeepID": DeepID.DeepId, - "Dlib": DlibResNet.Dlib, - "ArcFace": ArcFace.ArcFace, - "SFace": SFace.SFace, - "Emotion": Emotion.FacialExpression, - "Age": Age.ApparentAge, - "Gender": Gender.Gender, - "Race": Race.Race, + "VGG-Face": VGGFace.VggFaceClient, + "OpenFace": OpenFace.OpenFaceClient, + "Facenet": FaceNet.FaceNet128dClient, + "Facenet512": FaceNet.FaceNet512dClient, + "DeepFace": FbDeepFace.DeepFaceClient, + "DeepID": DeepID.DeepIdClient, + "Dlib": Dlib.DlibClient, + "ArcFace": ArcFace.ArcFaceClient, + "SFace": SFace.SFaceClient, + "Emotion": Emotion.EmotionClient, + "Age": Age.ApparentAgeClient, + "Gender": Gender.GenderClient, + "Race": Race.RaceClient, } if not "model_obj" in globals(): diff --git a/tests/visual-test.py b/tests/visual-test.py index dbdf54ff..78e79379 100644 --- a/tests/visual-test.py +++ b/tests/visual-test.py @@ -14,11 +14,12 @@ "Facenet512", "OpenFace", "DeepFace", - "DeepID", + # "DeepID", "Dlib", "ArcFace", "SFace", ] + detector_backends = ["opencv", "ssd", "dlib", "mtcnn", "retinaface"] @@ -44,10 +45,11 @@ for df in dfs: logger.info(df) + # extract faces for detector_backend in detector_backends: face_objs = DeepFace.extract_faces( - img_path="dataset/img1.jpg", detector_backend=detector_backend + img_path="dataset/img11.jpg", detector_backend=detector_backend ) for face_obj in face_objs: face = face_obj["face"]