Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Recognization Feature Works. Fixed Bugs as well ;) #1

Merged
merged 1 commit into from
Dec 16, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -71,5 +71,8 @@ fabric.properties
.idea/caches/build_file_checksums.ser

# our project
Attendance/
training_images/
unknown_images/
files/trainer.yml
trainer.yml
training_images/
5 changes: 2 additions & 3 deletions files/student_details.csv
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
Id,Name
ID,Name
,
,
,
Expand All @@ -17,5 +17,4 @@ Id,Name
,
,
,
0, Test
1,Aslam
0,Test
28 changes: 19 additions & 9 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,16 @@
import pyfiglet


# input live stream from a recorder
INPUT_VIDEO = "http://192.168.1.100:8080/video"

# input from saved video
# INPUT_VIDEO = "video.avi"

# input from a device attached to computer
# INPUT_VIDEO = 0 # or -1 if 0 doesn't work


# creating the title bar function
def title_bar():
# os.system('cls') # for windows
Expand All @@ -31,16 +41,16 @@ def main_menu():
choice = int(input("Enter Choice: "))

if choice == 1:
check_camera()
check_camera(INPUT_VIDEO)
break
elif choice == 2:
capture_face()
capture_face(INPUT_VIDEO)
break
elif choice == 3:
train_face()
break
elif choice == 4:
recognize_face()
recognize_face(INPUT_VIDEO)
break
elif choice == 5:
print("Thank You =)")
Expand All @@ -57,15 +67,15 @@ def main_menu():

# ---------------------------------------------------------
# calling the camera test function from check camera.py file
def check_camera():
capture_video.start()
def check_camera(input_video):
capture_video.start(input_video)
main_menu()


# --------------------------------------------------------------
# calling the take image function form capture image.py file
def capture_face():
capture_images.capture()
def capture_face(input_video):
capture_images.capture(input_video)
main_menu()


Expand All @@ -78,8 +88,8 @@ def train_face():

# --------------------------------------------------------------------
# calling the recognize_attendance from recognize.py file
def recognize_face():
recognize.mark_attendance()
def recognize_face(input_video):
recognize.mark_attendance(input_video)
main_menu()


Expand Down
Binary file modified modules/__pycache__/__init__.cpython-37.pyc
Binary file not shown.
Binary file modified modules/__pycache__/capture_images.cpython-37.pyc
Binary file not shown.
Binary file modified modules/__pycache__/capture_video.cpython-37.pyc
Binary file not shown.
Binary file modified modules/__pycache__/recognize.cpython-37.pyc
Binary file not shown.
Binary file modified modules/__pycache__/train_images.cpython-37.pyc
Binary file not shown.
31 changes: 13 additions & 18 deletions modules/capture_images.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,15 +3,6 @@
import cv2
import unicodedata # to check if entered in different unicode format

# input live stream from a recorder
# URL = "http://192.168.1.103:8080/video"

# input from saved video
# URL = "video.avi"

# input from a device attached to computer
URL = 0 # or -1


# check if student_id is a number
def is_number(_id):
Expand All @@ -31,13 +22,14 @@ def is_number(_id):


# Capture Image function definition
def capture():
def capture(input_video):
student_id = input("Enter Your ID (numbers only): ")
name = input("Enter Your Name (alphabets only): ")

# if "student_id is a number" and "name consists of alphabetic chars only" then
if is_number(student_id) and name.isalpha():
cap = cv2.VideoCapture(URL)
# store input video stream in cap variable
cap = cv2.VideoCapture(input_video)

# using haar cascade
haar_cascade_path = "files" + os.sep + "haarcascade_frontalface_default.xml"
Expand All @@ -48,35 +40,38 @@ def capture():
while True:
# capture frame-by-frame
ret, img = cap.read()

if ret is True:
# operations on the frame come here
if ret is True: # video is detected
# convert frame to grayscale
gray_frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# detect faces using haar cascade detector
faces = detector.detectMultiScale(gray_frame, 1.3, 5)
for(x, y, w, h) in faces:
cv2.rectangle(gray_frame, (x, y), (x+w, y+h), (255, 0, 0), 2) # ##gray
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2) # ##gray

# incrementing number
increment_num += 1

# saving the captured face in the data-set folder training_images
cv2.imwrite("training_images" + os.sep + name + "." + student_id + '.' +
str(increment_num) + ".jpg", gray_frame[y:y+h, x:x+w]) # ##gray[y:y+h, x:x+w]
str(increment_num) + ".jpg", img[y:y+h, x:x+w]) # ##gray[y:y+h, x:x+w]

# display the resulting frame
cv2.imshow('frame', gray_frame) # ##gray
cv2.imshow('Capturing Face - Attendance using Face Recognition', img) # ##gray

# wait for 100 milliseconds
if cv2.waitKey(100) & 0xFF == ord('q'):
break
# break if the sample number is more than 100
elif increment_num > 60:
break
else:
else: # video not detected
break

# when everything is done
cap.release()
cv2.destroyAllWindows()

# res = "Images Saved for ID : " + student_id + " Name : " + name
row = [student_id, name]
with open("files" + os.sep + "student_details.csv", 'a+') as csv_file:
Expand Down
17 changes: 6 additions & 11 deletions modules/capture_video.py
Original file line number Diff line number Diff line change
@@ -1,30 +1,25 @@
import cv2

# input live stream from a recorder
# URL = "http://192.168.1.103:8080/video"

# input from saved video
# URL = "video.avi"
def start(input_video):
# store input video stream capture in cap variable
cap = cv2.VideoCapture(input_video)

# input from a device attached to computer
URL = 0 # or -1


def start():
cap = cv2.VideoCapture(URL)
while cap.isOpened():
# capture frame-by-frame
ret, frame = cap.read()
if ret is True: # video is detected
# convert frame to grayscale
# gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

# display the resulting frame
cv2.imshow('frame', frame)
cv2.imshow('Checking Video - Attendance using Face Recognition', frame)

if cv2.waitKey(1) & 0xFF == ord('q'):
break
else: # video not detected
break

# when everything is done
cap.release()
cv2.destroyAllWindows()
103 changes: 61 additions & 42 deletions modules/recognize.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,53 +5,72 @@
import pandas as pd


def mark_attendance():
def mark_attendance(input_video):
# reading trained dataset
recognizer = cv2.face.LBPHFaceRecognizer_create() # cv2.createLBPHFaceRecognizer()
recognizer.read("TrainingImageLabel"+os.sep+"trainer.yml")
harcascadePath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(harcascadePath)
df = pd.read_csv("StudentDetails"+os.sep+"student_details.csv")
cam = cv2.VideoCapture(0)
recognizer.read("files" + os.sep + "trainer.yml")

# using haar cascade
haar_cascade_path = "files" + os.sep + "haarcascade_frontalface_default.xml"
face_cascade = cv2.CascadeClassifier(haar_cascade_path)

# preparing pandas dataframe
df = pd.read_csv("files" + os.sep + "student_details.csv")
col_names = ['ID', 'Name', 'Date', 'Time']
attendance_df = pd.DataFrame(columns=col_names)

# store input video stream capture in cap variable
cam = cv2.VideoCapture(input_video)
font = cv2.FONT_HERSHEY_SIMPLEX
col_names = ['Id', 'Name', 'Date', 'Time']
attendance = pd.DataFrame(columns=col_names)

while True:
ret, im = cam.read()
gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.2, 5)
for(x, y, w, h) in faces:
cv2.rectangle(im, (x, y), (x+w, y+h), (225, 0, 0), 2)
Id, conf = recognizer.predict(gray[y:y+h, x:x+w])

if(conf < 50):
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
timeStamp = datetime.datetime.fromtimestamp(
ts).strftime('%H:%M:%S')
aa = df.loc[df['Id'] == Id]['Name'].values
tt = str(Id)+"-"+aa
attendance.loc[len(attendance)] = [Id, aa, date, timeStamp]

else:
Id = 'Unknown'
tt = str(Id)
if(conf > 75):
noOfFile = len(os.listdir("ImagesUnknown"))+1
cv2.imwrite("ImagesUnknown"+os.sep+"Image"+str(noOfFile) +
".jpg", im[y:y+h, x:x+w])
cv2.putText(im, str(tt), (x, y+h), font, 1, (255, 255, 255), 2)
attendance = attendance.drop_duplicates(subset=['Id'], keep='first')
cv2.imshow('im', im)
if (cv2.waitKey(1) == ord('q')):
# capture frame-by-frame
ret, img = cam.read()
if ret is True: # video is detected
# convert frame to grayscale
gray_frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# detect faces using haar cascade detector
faces = face_cascade.detectMultiScale(gray_frame, 1.2, 5)
for(x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x+w, y+h), (225, 0, 0), 2)
_id, conf = recognizer.predict(gray_frame[y:y+h, x:x+w])

if conf < 50:
current_time = time.time()
date = datetime.datetime.fromtimestamp(current_time).strftime('%Y-%m-%d')
timestamp = datetime.datetime.fromtimestamp(current_time).strftime('%H:%M:%S')
student_name = df.loc[df['ID'] == _id]['Name'].values[0]
display_text = student_name # str(_id) + "-" +
attendance_df.loc[len(attendance_df)] = [_id, student_name, date, timestamp]

else:
display_text = 'Unknown'

if conf > 75:
file_number = len(os.listdir("unknown_images")) + 1
cv2.imwrite("unknown_images" + os.sep + "Image" + str(file_number) +
".jpg", img[y:y+h, x:x+w])
cv2.putText(img, display_text, (x, y+h), font, 1, (255, 255, 255), 2)
attendance_df = attendance_df.drop_duplicates(subset=['ID'], keep='first')
cv2.imshow('Recognizing Faces - Attendance using Face Recognition', img)
if cv2.waitKey(1) == ord('q'):
break
else: # video not detected
break
ts = time.time()
date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')
timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S')
Hour, Minute, Second = timeStamp.split(":")
fileName = "Attendance"+os.sep+"Attendance_"+date+"_"+Hour+"-"+Minute+"-"+Second+".csv"
attendance.to_csv(fileName, index=False)

# get current time and date
current_time = time.time()
date = datetime.datetime.fromtimestamp(current_time).strftime('%Y-%m-%d')
timestamp = datetime.datetime.fromtimestamp(current_time).strftime('%H:%M:%S')
hour, minute, second = timestamp.split(":")

# create a csv(comma separated value) file and append current date and time to its name
file_name = "Attendance" + os.sep + "Attendance_" + date + "_" + hour + "-" + minute + "-" + second + ".csv"
attendance_df.to_csv(file_name, index=False)

# when everything is done
cam.release()
cv2.destroyAllWindows()

print("Attendance Successfull")
print("Attendance Successful!")
4 changes: 2 additions & 2 deletions modules/train_images.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,9 +32,9 @@ def get_images_and_labels(path):
# ----------- train images function ---------------
def train():
recognizer = cv2.face_LBPHFaceRecognizer.create()
# haar_cascade_path = "files"+os.sep+"haarcascade_frontalface_default.xml"
# haar_cascade_path = "files" + os.sep + "haarcascade_frontalface_default.xml"
# detector = cv2.CascadeClassifier(haar_cascade_path)
faces, _id = get_images_and_labels("training_images")
recognizer.train(faces, np.array(_id))
recognizer.save("files"+os.sep+"trainer.yml")
recognizer.save("files" + os.sep + "trainer.yml")
print("Images Trained Successfully")
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
pyfiglet
appdirs==1.4.3
attrs==19.1.0
black==19.3b0
Expand Down