Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[FEATURE]: adding yolov11 into face detection portfolio #1397

Merged
merged 12 commits into from
Dec 11, 2024
Merged
5 changes: 4 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ models = [
"ArcFace",
"Dlib",
"SFace",
"GhostFaceNet",
"GhostFaceNet"
]

#face verification
Expand Down Expand Up @@ -223,6 +223,9 @@ backends = [
'retinaface',
'mediapipe',
'yolov8',
'yolov11s',
'yolov11n',
serengil marked this conversation as resolved.
Show resolved Hide resolved
'yolov11m',
'yunet',
'centerface',
]
Expand Down
166 changes: 83 additions & 83 deletions deepface/DeepFace.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,10 +54,10 @@ def build_model(model_name: str, task: str = "facial_recognition") -> Any:
Args:
model_name (str): model identifier
- VGG-Face, Facenet, Facenet512, OpenFace, DeepFace, DeepID, Dlib,
serengil marked this conversation as resolved.
Show resolved Hide resolved
ArcFace, SFace, GhostFaceNet for face recognition
ArcFace, SFace and GhostFaceNet for face recognition
- Age, Gender, Emotion, Race for facial attributes
- opencv, mtcnn, ssd, dlib, retinaface, mediapipe, yolov8, yunet,
fastmtcnn or centerface for face detectors
- opencv, mtcnn, ssd, dlib, retinaface, mediapipe, yolov8, yolov11n,
yolov11s, yolov11m, yunet, fastmtcnn or centerface for face detectors
- Fasnet for spoofing
task (str): facial_recognition, facial_attribute, face_detector, spoofing
default is facial_recognition
Expand All @@ -68,18 +68,18 @@ def build_model(model_name: str, task: str = "facial_recognition") -> Any:


def verify(
img1_path: Union[str, np.ndarray, List[float]],
img2_path: Union[str, np.ndarray, List[float]],
model_name: str = "VGG-Face",
detector_backend: str = "opencv",
distance_metric: str = "cosine",
enforce_detection: bool = True,
align: bool = True,
expand_percentage: int = 0,
normalization: str = "base",
silent: bool = False,
threshold: Optional[float] = None,
anti_spoofing: bool = False,
img1_path: Union[str, np.ndarray, List[float]],
img2_path: Union[str, np.ndarray, List[float]],
model_name: str = "VGG-Face",
detector_backend: str = "opencv",
distance_metric: str = "cosine",
enforce_detection: bool = True,
align: bool = True,
expand_percentage: int = 0,
normalization: str = "base",
silent: bool = False,
threshold: Optional[float] = None,
anti_spoofing: bool = False,
) -> Dict[str, Any]:
"""
Verify if an image pair represents the same person or different persons.
Expand All @@ -96,8 +96,8 @@ def verify(
OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).
serengil marked this conversation as resolved.
Show resolved Hide resolved

detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
(default is opencv).
'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m',
'centerface' or 'skip' (default is opencv).

distance_metric (string): Metric for measuring similarity. Options: 'cosine',
'euclidean', 'euclidean_l2' (default is cosine).
Expand Down Expand Up @@ -164,14 +164,14 @@ def verify(


def analyze(
img_path: Union[str, np.ndarray],
actions: Union[tuple, list] = ("emotion", "age", "gender", "race"),
enforce_detection: bool = True,
detector_backend: str = "opencv",
align: bool = True,
expand_percentage: int = 0,
silent: bool = False,
anti_spoofing: bool = False,
img_path: Union[str, np.ndarray],
actions: Union[tuple, list] = ("emotion", "age", "gender", "race"),
enforce_detection: bool = True,
detector_backend: str = "opencv",
align: bool = True,
expand_percentage: int = 0,
silent: bool = False,
anti_spoofing: bool = False,
) -> List[Dict[str, Any]]:
"""
Analyze facial attributes such as age, gender, emotion, and race in the provided image.
Expand All @@ -187,8 +187,8 @@ def analyze(
Set to False to avoid the exception for low-resolution images (default is True).

detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
(default is opencv).
'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m',
'centerface' or 'skip' (default is opencv).

distance_metric (string): Metric for measuring similarity. Options: 'cosine',
'euclidean', 'euclidean_l2' (default is cosine).
Expand Down Expand Up @@ -263,20 +263,20 @@ def analyze(


def find(
img_path: Union[str, np.ndarray],
db_path: str,
model_name: str = "VGG-Face",
distance_metric: str = "cosine",
enforce_detection: bool = True,
detector_backend: str = "opencv",
align: bool = True,
expand_percentage: int = 0,
threshold: Optional[float] = None,
normalization: str = "base",
silent: bool = False,
refresh_database: bool = True,
anti_spoofing: bool = False,
batched: bool = False,
img_path: Union[str, np.ndarray],
db_path: str,
model_name: str = "VGG-Face",
distance_metric: str = "cosine",
enforce_detection: bool = True,
detector_backend: str = "opencv",
align: bool = True,
expand_percentage: int = 0,
threshold: Optional[float] = None,
normalization: str = "base",
silent: bool = False,
refresh_database: bool = True,
anti_spoofing: bool = False,
batched: bool = False,
) -> Union[List[pd.DataFrame], List[List[Dict[str, Any]]]]:
"""
Identify individuals in a database
Expand All @@ -298,8 +298,8 @@ def find(
Set to False to avoid the exception for low-resolution images (default is True).

serengil marked this conversation as resolved.
Show resolved Hide resolved
detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
(default is opencv).
'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m',
'centerface' or 'skip' (default is opencv).

align (boolean): Perform alignment based on the eye positions (default is True).

Expand Down Expand Up @@ -369,15 +369,15 @@ def find(


def represent(
img_path: Union[str, np.ndarray],
model_name: str = "VGG-Face",
enforce_detection: bool = True,
detector_backend: str = "opencv",
align: bool = True,
expand_percentage: int = 0,
normalization: str = "base",
anti_spoofing: bool = False,
max_faces: Optional[int] = None,
img_path: Union[str, np.ndarray],
model_name: str = "VGG-Face",
enforce_detection: bool = True,
detector_backend: str = "opencv",
align: bool = True,
expand_percentage: int = 0,
normalization: str = "base",
anti_spoofing: bool = False,
max_faces: Optional[int] = None,
) -> List[Dict[str, Any]]:
"""
Represent facial images as multi-dimensional vector embeddings.
Expand All @@ -396,8 +396,8 @@ def represent(
(default is True).

serengil marked this conversation as resolved.
Show resolved Hide resolved
detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
(default is opencv).
'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m',
'centerface' or 'skip' (default is opencv).

align (boolean): Perform alignment based on the eye positions (default is True).

Expand Down Expand Up @@ -441,15 +441,15 @@ def represent(


def stream(
db_path: str = "",
model_name: str = "VGG-Face",
detector_backend: str = "opencv",
distance_metric: str = "cosine",
enable_face_analysis: bool = True,
source: Any = 0,
time_threshold: int = 5,
frame_threshold: int = 5,
anti_spoofing: bool = False,
db_path: str = "",
model_name: str = "VGG-Face",
detector_backend: str = "opencv",
distance_metric: str = "cosine",
enable_face_analysis: bool = True,
source: Any = 0,
time_threshold: int = 5,
frame_threshold: int = 5,
anti_spoofing: bool = False,
) -> None:
"""
Run real time face recognition and facial attribute analysis
Expand All @@ -462,8 +462,8 @@ def stream(
OpenFace, DeepFace, DeepID, Dlib, ArcFace, SFace and GhostFaceNet (default is VGG-Face).

serengil marked this conversation as resolved.
Show resolved Hide resolved
detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
(default is opencv).
'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m',
'centerface' or 'skip' (default is opencv).

distance_metric (string): Metric for measuring similarity. Options: 'cosine',
'euclidean', 'euclidean_l2' (default is cosine).
Expand Down Expand Up @@ -499,15 +499,15 @@ def stream(


def extract_faces(
img_path: Union[str, np.ndarray],
detector_backend: str = "opencv",
enforce_detection: bool = True,
align: bool = True,
expand_percentage: int = 0,
grayscale: bool = False,
color_face: str = "rgb",
normalize_face: bool = True,
anti_spoofing: bool = False,
img_path: Union[str, np.ndarray],
detector_backend: str = "opencv",
enforce_detection: bool = True,
align: bool = True,
expand_percentage: int = 0,
grayscale: bool = False,
color_face: str = "rgb",
normalize_face: bool = True,
anti_spoofing: bool = False,
) -> List[Dict[str, Any]]:
"""
Extract faces from a given image
Expand All @@ -517,8 +517,8 @@ def extract_faces(
as a string, numpy array (BGR), or base64 encoded images.

detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
(default is opencv).
'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m',
'centerface' or 'skip' (default is opencv).

enforce_detection (boolean): If no face is detected in an image, raise an exception.
Set to False to avoid the exception for low-resolution images (default is True).
Expand Down Expand Up @@ -584,11 +584,11 @@ def cli() -> None:


def detectFace(
img_path: Union[str, np.ndarray],
target_size: tuple = (224, 224),
detector_backend: str = "opencv",
enforce_detection: bool = True,
align: bool = True,
img_path: Union[str, np.ndarray],
target_size: tuple = (224, 224),
detector_backend: str = "opencv",
enforce_detection: bool = True,
align: bool = True,
) -> Union[np.ndarray, None]:
"""
Deprecated face detection function. Use extract_faces for same functionality.
Expand All @@ -601,8 +601,8 @@ def detectFace(
added to resize the image (default is (224, 224)).

detector_backend (string): face detector backend. Options: 'opencv', 'retinaface',
'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'centerface' or 'skip'
(default is opencv).
'mtcnn', 'ssd', 'dlib', 'mediapipe', 'yolov8', 'yolov11n', 'yolov11s', 'yolov11m',
'centerface' or 'skip' (default is opencv).

enforce_detection (boolean): If no face is detected in an image, raise an exception.
Set to False to avoid the exception for low-resolution images (default is True).
Expand Down
21 changes: 17 additions & 4 deletions deepface/commons/weight_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,8 +128,9 @@ def download_all_models_in_one_shot() -> None:
WEIGHTS_URL as SSD_WEIGHTS,
)
from deepface.models.face_detection.Yolo import (
WEIGHT_URL as YOLOV8_WEIGHTS,
WEIGHT_NAME as YOLOV8_WEIGHT_NAME,
WEIGHT_URLS as YOLO_WEIGHTS,
WEIGHT_NAMES as YOLO_WEIGHT_NAMES,
YoloModel
)
from deepface.models.face_detection.YuNet import WEIGHTS_URL as YUNET_WEIGHTS
from deepface.models.face_detection.Dlib import WEIGHTS_URL as DLIB_FD_WEIGHTS
Expand Down Expand Up @@ -162,8 +163,20 @@ def download_all_models_in_one_shot() -> None:
SSD_MODEL,
SSD_WEIGHTS,
{
"filename": YOLOV8_WEIGHT_NAME,
"url": YOLOV8_WEIGHTS,
"filename": YOLO_WEIGHT_NAMES[YoloModel.V8N.value],
"url": YOLO_WEIGHTS[YoloModel.V8N.value],
},
{
"filename": YOLO_WEIGHT_NAMES[YoloModel.V11N.value],
"url": YOLO_WEIGHTS[YoloModel.V11N.value],
},
{
"filename": YOLO_WEIGHT_NAMES[YoloModel.V11S.value],
"url": YOLO_WEIGHTS[YoloModel.V11S.value],
},
{
"filename": YOLO_WEIGHT_NAMES[YoloModel.V11M.value],
"url": YOLO_WEIGHTS[YoloModel.V11M.value],
},
YUNET_WEIGHTS,
DLIB_FD_WEIGHTS,
Expand Down
Loading
Loading