-
-
Notifications
You must be signed in to change notification settings - Fork 2.3k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
10 changed files
with
374 additions
and
404 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,5 @@ | ||
test: | ||
cd tests && python -m pytest unit_tests.py -s --disable-warnings | ||
cd tests && python -m pytest . -s --disable-warnings | ||
|
||
lint: | ||
python -m pylint deepface/ --fail-under=10 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,133 @@ | ||
import cv2 | ||
from deepface import DeepFace | ||
from deepface.commons.logger import Logger | ||
|
||
logger = Logger("tests/test_analyze.py") | ||
|
||
detectors = ["opencv", "mtcnn"] | ||
|
||
|
||
def test_standard_analyze(): | ||
img = "dataset/img4.jpg" | ||
demography_objs = DeepFace.analyze(img, silent=True) | ||
for demography in demography_objs: | ||
logger.debug(demography) | ||
assert demography["age"] > 20 and demography["age"] < 40 | ||
assert demography["dominant_gender"] == "Woman" | ||
logger.info("✅ test standard analyze done") | ||
|
||
|
||
def test_analyze_with_all_actions_as_tuple(): | ||
img = "dataset/img4.jpg" | ||
demography_objs = DeepFace.analyze( | ||
img, actions=("age", "gender", "race", "emotion"), silent=True | ||
) | ||
|
||
for demography in demography_objs: | ||
logger.debug(f"Demography: {demography}") | ||
age = demography["age"] | ||
gender = demography["dominant_gender"] | ||
race = demography["dominant_race"] | ||
emotion = demography["dominant_emotion"] | ||
logger.debug(f"Age: {age}") | ||
logger.debug(f"Gender: {gender}") | ||
logger.debug(f"Race: {race}") | ||
logger.debug(f"Emotion: {emotion}") | ||
assert demography.get("age") is not None | ||
assert demography.get("dominant_gender") is not None | ||
assert demography.get("dominant_race") is not None | ||
assert demography.get("dominant_emotion") is not None | ||
|
||
logger.info("✅ test analyze for all actions as tuple done") | ||
|
||
|
||
def test_analyze_with_all_actions_as_list(): | ||
img = "dataset/img4.jpg" | ||
demography_objs = DeepFace.analyze( | ||
img, actions=["age", "gender", "race", "emotion"], silent=True | ||
) | ||
|
||
for demography in demography_objs: | ||
logger.debug(f"Demography: {demography}") | ||
age = demography["age"] | ||
gender = demography["dominant_gender"] | ||
race = demography["dominant_race"] | ||
emotion = demography["dominant_emotion"] | ||
logger.debug(f"Age: {age}") | ||
logger.debug(f"Gender: {gender}") | ||
logger.debug(f"Race: {race}") | ||
logger.debug(f"Emotion: {emotion}") | ||
assert demography.get("age") is not None | ||
assert demography.get("dominant_gender") is not None | ||
assert demography.get("dominant_race") is not None | ||
assert demography.get("dominant_emotion") is not None | ||
|
||
logger.info("✅ test analyze for all actions as array done") | ||
|
||
|
||
def test_analyze_for_some_actions(): | ||
img = "dataset/img4.jpg" | ||
demography_objs = DeepFace.analyze(img, ["age", "gender"], silent=True) | ||
|
||
for demography in demography_objs: | ||
age = demography["age"] | ||
gender = demography["dominant_gender"] | ||
|
||
logger.debug(f"Age: { age }") | ||
logger.debug(f"Gender: {gender}") | ||
|
||
assert demography.get("age") is not None | ||
assert demography.get("dominant_gender") is not None | ||
|
||
# these are not in actions | ||
assert demography.get("dominant_race") is None | ||
assert demography.get("dominant_emotion") is None | ||
|
||
logger.info("✅ test analyze for some actions done") | ||
|
||
|
||
def test_analyze_for_preloaded_image(): | ||
img = cv2.imread("dataset/img1.jpg") | ||
resp_objs = DeepFace.analyze(img, silent=True) | ||
for resp_obj in resp_objs: | ||
logger.debug(resp_obj) | ||
assert resp_obj["age"] > 20 and resp_obj["age"] < 40 | ||
assert resp_obj["dominant_gender"] == "Woman" | ||
|
||
logger.info("✅ test analyze for pre-loaded image done") | ||
|
||
|
||
def test_analyze_for_different_detectors(): | ||
img_paths = [ | ||
"dataset/img1.jpg", | ||
"dataset/img5.jpg", | ||
"dataset/img6.jpg", | ||
"dataset/img8.jpg", | ||
"dataset/img1.jpg", | ||
"dataset/img2.jpg", | ||
"dataset/img1.jpg", | ||
"dataset/img2.jpg", | ||
"dataset/img6.jpg", | ||
"dataset/img6.jpg", | ||
] | ||
|
||
for img_path in img_paths: | ||
for detector in detectors: | ||
results = DeepFace.analyze( | ||
img_path, actions=("gender",), detector_backend=detector, enforce_detection=False | ||
) | ||
for result in results: | ||
logger.debug(result) | ||
|
||
# validate keys | ||
assert "gender" in result.keys() | ||
assert "dominant_gender" in result.keys() and result["dominant_gender"] in [ | ||
"Man", | ||
"Woman", | ||
] | ||
|
||
# validate probabilities | ||
if result["dominant_gender"] == "Man": | ||
assert result["gender"]["Man"] > result["gender"]["Woman"] | ||
else: | ||
assert result["gender"]["Man"] < result["gender"]["Woman"] |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,46 @@ | ||
import pytest | ||
import numpy as np | ||
from deepface import DeepFace | ||
from deepface.commons.logger import Logger | ||
|
||
logger = Logger("tests/test_enforce_detection.py") | ||
|
||
|
||
def test_enabled_enforce_detection_for_non_facial_input(): | ||
black_img = np.zeros([224, 224, 3]) | ||
|
||
with pytest.raises(ValueError, match="Face could not be detected."): | ||
DeepFace.represent(img_path=black_img) | ||
|
||
with pytest.raises(ValueError, match="Face could not be detected."): | ||
DeepFace.verify(img1_path=black_img, img2_path=black_img) | ||
|
||
logger.info("✅ enabled enforce detection with non facial input tests done") | ||
|
||
|
||
def test_disabled_enforce_detection_for_non_facial_input_on_represent(): | ||
black_img = np.zeros([224, 224, 3]) | ||
objs = DeepFace.represent(img_path=black_img, enforce_detection=False) | ||
|
||
assert isinstance(objs, list) | ||
assert len(objs) > 0 | ||
assert isinstance(objs[0], dict) | ||
assert "embedding" in objs[0].keys() | ||
assert "facial_area" in objs[0].keys() | ||
assert isinstance(objs[0]["facial_area"], dict) | ||
assert "x" in objs[0]["facial_area"].keys() | ||
assert "y" in objs[0]["facial_area"].keys() | ||
assert "w" in objs[0]["facial_area"].keys() | ||
assert "h" in objs[0]["facial_area"].keys() | ||
assert isinstance(objs[0]["embedding"], list) | ||
assert len(objs[0]["embedding"]) == 2622 # embedding of VGG-Face | ||
|
||
logger.info("✅ disabled enforce detection with non facial input test for represent tests done") | ||
|
||
|
||
def test_disabled_enforce_detection_for_non_facial_input_on_verify(): | ||
black_img = np.zeros([224, 224, 3]) | ||
obj = DeepFace.verify(img1_path=black_img, img2_path=black_img, enforce_detection=False) | ||
assert isinstance(obj, dict) | ||
|
||
logger.info("✅ disabled enforce detection with non facial input test for verify tests done") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,24 @@ | ||
from deepface import DeepFace | ||
from deepface.commons.logger import Logger | ||
|
||
logger = Logger("tests/test_extract_faces.py") | ||
|
||
|
||
def test_different_detectors(): | ||
detectors = ["opencv", "mtcnn"] | ||
|
||
for detector in detectors: | ||
img_objs = DeepFace.extract_faces(img_path="dataset/img11.jpg", detector_backend=detector) | ||
for img_obj in img_objs: | ||
assert "face" in img_obj.keys() | ||
assert "facial_area" in img_obj.keys() | ||
assert isinstance(img_obj["facial_area"], dict) | ||
assert "x" in img_obj["facial_area"].keys() | ||
assert "y" in img_obj["facial_area"].keys() | ||
assert "w" in img_obj["facial_area"].keys() | ||
assert "h" in img_obj["facial_area"].keys() | ||
assert "confidence" in img_obj.keys() | ||
|
||
img = img_obj["face"] | ||
assert img.shape[0] > 0 and img.shape[1] > 0 | ||
logger.info(f"✅ extract_faces for {detector} backend test is done") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
import cv2 | ||
import pandas as pd | ||
from deepface import DeepFace | ||
from deepface.commons.logger import Logger | ||
|
||
logger = Logger("tests/test_find.py") | ||
|
||
|
||
def test_find_with_exact_path(): | ||
dfs = DeepFace.find(img_path="dataset/img1.jpg", db_path="dataset", silent=True) | ||
for df in dfs: | ||
assert isinstance(df, pd.DataFrame) | ||
logger.debug(df.head()) | ||
assert df.shape[0] > 0 | ||
logger.info("✅ test find for exact path done") | ||
|
||
|
||
def test_find_with_array_input(): | ||
img1 = cv2.imread("dataset/img1.jpg") | ||
dfs = DeepFace.find(img1, db_path="dataset", silent=True) | ||
|
||
for df in dfs: | ||
logger.debug(df.head()) | ||
assert df.shape[0] > 0 | ||
|
||
logger.info("✅ test find for array input done") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,30 @@ | ||
from deepface import DeepFace | ||
from deepface.commons.logger import Logger | ||
|
||
logger = Logger("tests/test_represent.py") | ||
|
||
|
||
def test_standard_represent(): | ||
img_path = "dataset/img1.jpg" | ||
embedding_objs = DeepFace.represent(img_path) | ||
for embedding_obj in embedding_objs: | ||
embedding = embedding_obj["embedding"] | ||
logger.info(f"Function returned {len(embedding)} dimensional vector") | ||
assert len(embedding) == 2622 | ||
logger.info("✅ test standard represent function done") | ||
|
||
|
||
def test_represent_for_skipped_detector_backend(): | ||
face_img = "dataset/img5.jpg" | ||
img_objs = DeepFace.represent(img_path=face_img, detector_backend="skip") | ||
assert len(img_objs) >= 1 | ||
img_obj = img_objs[0] | ||
assert "embedding" in img_obj.keys() | ||
assert "facial_area" in img_obj.keys() | ||
assert isinstance(img_obj["facial_area"], dict) | ||
assert "x" in img_obj["facial_area"].keys() | ||
assert "y" in img_obj["facial_area"].keys() | ||
assert "w" in img_obj["facial_area"].keys() | ||
assert "h" in img_obj["facial_area"].keys() | ||
assert "face_confidence" in img_obj.keys() | ||
logger.info("✅ test represent function for skipped detector backend done") |
Oops, something went wrong.