From 9b1373301c37465d7baaf1aeb22f6aa0e076853a Mon Sep 17 00:00:00 2001 From: codewhizz <45493966+codewhizz@users.noreply.github.com> Date: Mon, 16 Dec 2019 03:42:26 +0530 Subject: [PATCH] Recognization Feature Works. Fixed Bugs as well ;) --- .gitignore | 5 +- files/student_details.csv | 5 +- main.py | 28 +++-- modules/__pycache__/__init__.cpython-37.pyc | Bin 155 -> 155 bytes .../__pycache__/capture_images.cpython-37.pyc | Bin 1730 -> 1772 bytes .../__pycache__/capture_video.cpython-37.pyc | Bin 520 -> 564 bytes modules/__pycache__/recognize.cpython-37.pyc | Bin 2064 -> 2154 bytes .../__pycache__/train_images.cpython-37.pyc | Bin 1165 -> 1165 bytes modules/capture_images.py | 31 +++--- modules/capture_video.py | 17 +-- modules/recognize.py | 103 +++++++++++------- modules/train_images.py | 4 +- requirements.txt | 1 + 13 files changed, 108 insertions(+), 86 deletions(-) diff --git a/.gitignore b/.gitignore index 3722d1c..7c5e67c 100644 --- a/.gitignore +++ b/.gitignore @@ -71,5 +71,8 @@ fabric.properties .idea/caches/build_file_checksums.ser # our project +Attendance/ +training_images/ +unknown_images/ +files/trainer.yml trainer.yml -training_images/ \ No newline at end of file diff --git a/files/student_details.csv b/files/student_details.csv index 08d99a7..30a8dc0 100644 --- a/files/student_details.csv +++ b/files/student_details.csv @@ -1,4 +1,4 @@ -Id,Name +ID,Name , , , @@ -17,5 +17,4 @@ Id,Name , , , -0, Test -1,Aslam +0,Test diff --git a/main.py b/main.py index 8e735b8..917d0d5 100644 --- a/main.py +++ b/main.py @@ -6,6 +6,16 @@ import pyfiglet +# input live stream from a recorder +INPUT_VIDEO = "http://192.168.1.100:8080/video" + +# input from saved video +# INPUT_VIDEO = "video.avi" + +# input from a device attached to computer +# INPUT_VIDEO = 0 # or -1 if 0 doesn't work + + # creating the title bar function def title_bar(): # os.system('cls') # for windows @@ -31,16 +41,16 @@ def main_menu(): choice = int(input("Enter Choice: ")) if choice == 1: - check_camera() + check_camera(INPUT_VIDEO) break elif choice == 2: - capture_face() + capture_face(INPUT_VIDEO) break elif choice == 3: train_face() break elif choice == 4: - recognize_face() + recognize_face(INPUT_VIDEO) break elif choice == 5: print("Thank You =)") @@ -57,15 +67,15 @@ def main_menu(): # --------------------------------------------------------- # calling the camera test function from check camera.py file -def check_camera(): - capture_video.start() +def check_camera(input_video): + capture_video.start(input_video) main_menu() # -------------------------------------------------------------- # calling the take image function form capture image.py file -def capture_face(): - capture_images.capture() +def capture_face(input_video): + capture_images.capture(input_video) main_menu() @@ -78,8 +88,8 @@ def train_face(): # -------------------------------------------------------------------- # calling the recognize_attendance from recognize.py file -def recognize_face(): - recognize.mark_attendance() +def recognize_face(input_video): + recognize.mark_attendance(input_video) main_menu() diff --git a/modules/__pycache__/__init__.cpython-37.pyc b/modules/__pycache__/__init__.cpython-37.pyc index 9d08e7432bf1fcd087649169da5539584dc33f3a..460e0a18379ac696ade665c194e32c2d9dc52612 100644 GIT binary patch delta 82 zcmbQuIGd5ziIcITi32TRcI0nF$!o132}fiae+hGfgxZ_rOvu;Yq}*a+K{ax zb(6RtRz>2(A>qVlfCCaje29Gpj)*gMhic33pMU=OIq&6%``dL_T-QN*T>kub>vnhG!*{8JXR} zTDnyvvocpjTbQT0lURVH%`>-+4F=X)=3#GoNJMj_Q=@!q-M*WbDS8)3Fisk z!Znm&K$<1bhk412b8x(F0tH@3fEVu~QRJl&>|*_nD^28Q_0==GRC{-t=-`oA>qx)4 zl%NUGjL{M#?qHP{<>}Wd>qKTp_`|#^h}pOzHM=UxVqBH^1OTn0YaBgBYv?JGg?P@8 z_zTlQd=WKX8EM$GAYVl-;^f!zn;Z8hW$yZ&`h!;6t1s2>O*^7K2&eHiuifwXUD56P z(RKP9=A-v?G5SV-=1OOouD?GJo3Fdv>ql=b35(I#o{0)J8`gAtgsY}WfB_?A?2sHd zm|~2xTIaM^f(wKZ3M-^Q=73^HFPj)Ng9mm485_~599c#y{tlNE!QbqKE8vk15g%rprkJ=w#8 z@g`&v&pS7-UOafvM5F!(4*mj<#BOvvPKFRgk<2iA8y#Ey`05 ziGB1}v!RzQYI$RnU3uSE0kCE4|D{(6-FqgTy9gQ5LQW(nv)SOT(2{C2bE(u1Gnd z`}r%8QzLmPddfPi1{p&jnX)WtVd8_6b+fO?a)z?<5Y&hq8Q?CXK5jRlSD;^y?sD2a zXu!dWv2~!XM>Dw888cpk#x0f<)%f*ojWwu}@d0~RPfBWHSk)!Ew6ZP_Cv`DHgmsYc z7*^pK=+dxeM0zvVgZQB8a$-QN>w$b$_4I}HGk0NkqEE& diff --git a/modules/__pycache__/capture_video.cpython-37.pyc b/modules/__pycache__/capture_video.cpython-37.pyc index 064859364c449d17fba118756902ff20d3654422..2ac3694ba6049e7e3dfbb77d02dbd683fc441b66 100644 GIT binary patch literal 564 zcmYjP&5qMB5FXn}8+OwJ65`xz4@;yyBeY8;1VSp+f>p-G&z-7P`w3EqRv zk@w&!a^=($Hx682oCU;KGv8SA`%OHK7t6Q^5(ikqSSu6*tQzT*h9{f$GN{40(}91;k{uc=6$Qo@2Hu>Wk^i zLZO}>O;4S}TFE-Y>Dv0hBT4QVX8NKoT%qf|XTJYMre~M4TfNAxj?;K3pUcLr4W?0H zpEX#c>H~vPrK6&c<94lor$53avA`JuS=gqE>oiXF-2h)Xmzx zx$?A9k;x_%23;S%HL}7lU=E4E&nHQ)D@?4Drlgk1vdwa7s-@BIFmpDM|1NW6TkEh& zDy`Nf+JqWuu~S}}{lnBQdqABl*W2}Nhzgam^>koeEU6zHS- OH)lNX?_~Xlh{SJzU6p{buUK z)o9SE{N|-j&kMBm#H{=^*-o9!k9AVGla*as>>{!TbzD?#QLlA>Z3=&6n@-h)U7u`e zjkEeGTIZ>5#$|a?EU{j@k z{nh5)WyX8EM6=B83)eVrtE{T=y0k9ic45KKH(}d@RnzsHH{^%h)$KEZ7>I#PMG~d* onTTX86G7p1q$p;gexML|+~hw>Io2hw{|2;%B+Z|uZMtcBID;IPGY-j zPqyu9tQ@L6aH!OkzkqY(fB2N3Jh$Bf2M*Ypd7k%v@6GS`p3lp_ zmrHkwMUCKd@ZFz}AL=hk2XmSp@oD2qLm%S`eo_b#WK3gaK|(5N%zz3$&NK-qO|qbr zT%TE!go+?>Rw@e`GNHz~>ljb5A%_fPL519qhZ&)c^D+bZGa6K# za49Tf#*)-zQI-Y@?n_M~OS0S~0|kmuT4X3eS!Fpdp!*{5JcV5VpDP2Ug|v`FC9KLS zEKRVuFGMA-U_X{cbzF^?@8uOXtD_T;h?G6(Ypw6Gm2B~N~MK#VsXahi!P{3~5O<+F)%p$qekN6-1x2x1$%V&J*R6z36+^re8+C zun%wg>2Kaszj7z8cjW%|X?iNnE>C^iF@~7>CcpjOvvV3}M{!4abV|neFKP%<<~f zauaoG2LfzAQElhQw}q9kK={$m8FSuF3buPV6aA+)SYD#}S5z!Nsf-N6@0&(o8esJg z4KeEJ17!Ewl_)Ui8u7zU7L(uo29`K-&5AI}nC**wX_sgEX98-qXPj z4yRaJSjVDQNaxabXU^9sgO9wKpJW}|oneJ+fC61e@|TdinEWmA)7iYb+Z}n1)y4Au z)bpXce`1ad-`V%Qv1N*&`?tK^;3N>1({((U;SbP_-cffB6phwciE6Ayb*eLsW)-gJ zR8vamZO|=x3lTY5#dn=@y23b~t8@h;TeOThHOiGNyF`@PTeO7VImN#py;OJV_mSZR N&3|m4MSrNBzW~0Sec}KB delta 1352 zcmb7E%Wm676rCATqC`C`$&x79PU6^Zj4WalT?XwUwH-7~;<|9sB!q&1;iY8BBGr)N z#x`akphA`|TyWb3xhlHsF5gfT=$?O&en1yl<&rYc7U-rFct>;YgXbJR{;XZps-IUX zI>Cc}{nzu4i$7MMP19X^tl*iZw($(!GD0{R(TK%F5BRV$x%> zAdA>o78pmBm`Po*s48?QLnT&Z32UE<(hj+x&-Z$&EX#_l$_4CL5JgcIl|I8*HHfH) z1%vb%RAC{`(NscB=!j6)yII@FGExPQp&Q6056cw^Lv7DXeb zQ4{K@X7OJL;<71}QA;SGb&04Q;WQ|8$*3(`+z_=Hg+-*(GKg%8nrMkd(Hy8k8?|^x zXfrzflz3%m%G$gbi|WuiQjx-Myu;O~3+GX_UPB=?OtzE1(T1EW^qNfh(dI&qrJZ zBc}WsT%FM)=8yNutML=^znp4Dt8fk7R%AzZ5kv$@tjyg78=6v(@+XmZ?4Vj=afh@7;ZDGbTNY)LpS_h zT?>C!H|aS1Lwy>4lG`YmMVxEh_3;wnH@Oof;C}dft`ok?y_~Kjy6@PNfj2yJe6xCJ z`L-3<7C8GBKQv2y%XYTsK*FGJDz@b&YJcK!vxxLwcsMgKb&GSy1Ix3WMDZQ&*HeP( zz_(75Tsk61=t)9P6Z$%#hdzD@Fl$e|elle|NK{B=7dgs4;FkNX$t){joSOx2@>T!t zaEzXOF*)|l{KKJljHUSlr0juN=wmK*?ib>xADQ>N+qZj<1IG_~cMcxhdDz=OwNa6~ z?@vaK&4b?GJZ}Z30e9SD&z->W*a>>!58CE4s?#P_v#8TNEz>enF;?-{rfc*D(`kV= wvP_58Xh%_TuOre)vxtP&wqrS@XNyL2PfKQzyJUM diff --git a/modules/__pycache__/train_images.cpython-37.pyc b/modules/__pycache__/train_images.cpython-37.pyc index f1a34d84267789ffb9aaecf7dd61fa3b0f2a889d..e5ef29a9bcc04b338136c7212737a3f33420949e 100644 GIT binary patch delta 85 zcmeC>?B(Qj;^pOH00O5u-(qKP?B(Qj;^pOH00PFqZ?U^K@}@CbI$Onr7N-^!$2bQ2IQqr}R3>L67Uc#Mc(zXCEEL diff --git a/modules/capture_images.py b/modules/capture_images.py index 407deaa..49396a5 100644 --- a/modules/capture_images.py +++ b/modules/capture_images.py @@ -3,15 +3,6 @@ import cv2 import unicodedata # to check if entered in different unicode format -# input live stream from a recorder -# URL = "http://192.168.1.103:8080/video" - -# input from saved video -# URL = "video.avi" - -# input from a device attached to computer -URL = 0 # or -1 - # check if student_id is a number def is_number(_id): @@ -31,13 +22,14 @@ def is_number(_id): # Capture Image function definition -def capture(): +def capture(input_video): student_id = input("Enter Your ID (numbers only): ") name = input("Enter Your Name (alphabets only): ") # if "student_id is a number" and "name consists of alphabetic chars only" then if is_number(student_id) and name.isalpha(): - cap = cv2.VideoCapture(URL) + # store input video stream in cap variable + cap = cv2.VideoCapture(input_video) # using haar cascade haar_cascade_path = "files" + os.sep + "haarcascade_frontalface_default.xml" @@ -48,24 +40,24 @@ def capture(): while True: # capture frame-by-frame ret, img = cap.read() - - if ret is True: - # operations on the frame come here + if ret is True: # video is detected + # convert frame to grayscale gray_frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + # detect faces using haar cascade detector faces = detector.detectMultiScale(gray_frame, 1.3, 5) for(x, y, w, h) in faces: - cv2.rectangle(gray_frame, (x, y), (x+w, y+h), (255, 0, 0), 2) # ##gray + cv2.rectangle(img, (x, y), (x+w, y+h), (255, 0, 0), 2) # ##gray # incrementing number increment_num += 1 # saving the captured face in the data-set folder training_images cv2.imwrite("training_images" + os.sep + name + "." + student_id + '.' + - str(increment_num) + ".jpg", gray_frame[y:y+h, x:x+w]) # ##gray[y:y+h, x:x+w] + str(increment_num) + ".jpg", img[y:y+h, x:x+w]) # ##gray[y:y+h, x:x+w] # display the resulting frame - cv2.imshow('frame', gray_frame) # ##gray + cv2.imshow('Capturing Face - Attendance using Face Recognition', img) # ##gray # wait for 100 milliseconds if cv2.waitKey(100) & 0xFF == ord('q'): @@ -73,10 +65,13 @@ def capture(): # break if the sample number is more than 100 elif increment_num > 60: break - else: + else: # video not detected break + + # when everything is done cap.release() cv2.destroyAllWindows() + # res = "Images Saved for ID : " + student_id + " Name : " + name row = [student_id, name] with open("files" + os.sep + "student_details.csv", 'a+') as csv_file: diff --git a/modules/capture_video.py b/modules/capture_video.py index 716bd81..49887be 100644 --- a/modules/capture_video.py +++ b/modules/capture_video.py @@ -1,30 +1,25 @@ import cv2 -# input live stream from a recorder -# URL = "http://192.168.1.103:8080/video" -# input from saved video -# URL = "video.avi" +def start(input_video): + # store input video stream capture in cap variable + cap = cv2.VideoCapture(input_video) -# input from a device attached to computer -URL = 0 # or -1 - - -def start(): - cap = cv2.VideoCapture(URL) while cap.isOpened(): # capture frame-by-frame ret, frame = cap.read() if ret is True: # video is detected + # convert frame to grayscale # gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # display the resulting frame - cv2.imshow('frame', frame) + cv2.imshow('Checking Video - Attendance using Face Recognition', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break else: # video not detected break + # when everything is done cap.release() cv2.destroyAllWindows() diff --git a/modules/recognize.py b/modules/recognize.py index a672d06..5bee44f 100644 --- a/modules/recognize.py +++ b/modules/recognize.py @@ -5,53 +5,72 @@ import pandas as pd -def mark_attendance(): +def mark_attendance(input_video): + # reading trained dataset recognizer = cv2.face.LBPHFaceRecognizer_create() # cv2.createLBPHFaceRecognizer() - recognizer.read("TrainingImageLabel"+os.sep+"trainer.yml") - harcascadePath = "haarcascade_frontalface_default.xml" - faceCascade = cv2.CascadeClassifier(harcascadePath) - df = pd.read_csv("StudentDetails"+os.sep+"student_details.csv") - cam = cv2.VideoCapture(0) + recognizer.read("files" + os.sep + "trainer.yml") + + # using haar cascade + haar_cascade_path = "files" + os.sep + "haarcascade_frontalface_default.xml" + face_cascade = cv2.CascadeClassifier(haar_cascade_path) + + # preparing pandas dataframe + df = pd.read_csv("files" + os.sep + "student_details.csv") + col_names = ['ID', 'Name', 'Date', 'Time'] + attendance_df = pd.DataFrame(columns=col_names) + + # store input video stream capture in cap variable + cam = cv2.VideoCapture(input_video) font = cv2.FONT_HERSHEY_SIMPLEX - col_names = ['Id', 'Name', 'Date', 'Time'] - attendance = pd.DataFrame(columns=col_names) while True: - ret, im = cam.read() - gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) - faces = faceCascade.detectMultiScale(gray, 1.2, 5) - for(x, y, w, h) in faces: - cv2.rectangle(im, (x, y), (x+w, y+h), (225, 0, 0), 2) - Id, conf = recognizer.predict(gray[y:y+h, x:x+w]) - - if(conf < 50): - ts = time.time() - date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d') - timeStamp = datetime.datetime.fromtimestamp( - ts).strftime('%H:%M:%S') - aa = df.loc[df['Id'] == Id]['Name'].values - tt = str(Id)+"-"+aa - attendance.loc[len(attendance)] = [Id, aa, date, timeStamp] - - else: - Id = 'Unknown' - tt = str(Id) - if(conf > 75): - noOfFile = len(os.listdir("ImagesUnknown"))+1 - cv2.imwrite("ImagesUnknown"+os.sep+"Image"+str(noOfFile) + - ".jpg", im[y:y+h, x:x+w]) - cv2.putText(im, str(tt), (x, y+h), font, 1, (255, 255, 255), 2) - attendance = attendance.drop_duplicates(subset=['Id'], keep='first') - cv2.imshow('im', im) - if (cv2.waitKey(1) == ord('q')): + # capture frame-by-frame + ret, img = cam.read() + if ret is True: # video is detected + # convert frame to grayscale + gray_frame = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + + # detect faces using haar cascade detector + faces = face_cascade.detectMultiScale(gray_frame, 1.2, 5) + for(x, y, w, h) in faces: + cv2.rectangle(img, (x, y), (x+w, y+h), (225, 0, 0), 2) + _id, conf = recognizer.predict(gray_frame[y:y+h, x:x+w]) + + if conf < 50: + current_time = time.time() + date = datetime.datetime.fromtimestamp(current_time).strftime('%Y-%m-%d') + timestamp = datetime.datetime.fromtimestamp(current_time).strftime('%H:%M:%S') + student_name = df.loc[df['ID'] == _id]['Name'].values[0] + display_text = student_name # str(_id) + "-" + + attendance_df.loc[len(attendance_df)] = [_id, student_name, date, timestamp] + + else: + display_text = 'Unknown' + + if conf > 75: + file_number = len(os.listdir("unknown_images")) + 1 + cv2.imwrite("unknown_images" + os.sep + "Image" + str(file_number) + + ".jpg", img[y:y+h, x:x+w]) + cv2.putText(img, display_text, (x, y+h), font, 1, (255, 255, 255), 2) + attendance_df = attendance_df.drop_duplicates(subset=['ID'], keep='first') + cv2.imshow('Recognizing Faces - Attendance using Face Recognition', img) + if cv2.waitKey(1) == ord('q'): + break + else: # video not detected break - ts = time.time() - date = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d') - timeStamp = datetime.datetime.fromtimestamp(ts).strftime('%H:%M:%S') - Hour, Minute, Second = timeStamp.split(":") - fileName = "Attendance"+os.sep+"Attendance_"+date+"_"+Hour+"-"+Minute+"-"+Second+".csv" - attendance.to_csv(fileName, index=False) + + # get current time and date + current_time = time.time() + date = datetime.datetime.fromtimestamp(current_time).strftime('%Y-%m-%d') + timestamp = datetime.datetime.fromtimestamp(current_time).strftime('%H:%M:%S') + hour, minute, second = timestamp.split(":") + + # create a csv(comma separated value) file and append current date and time to its name + file_name = "Attendance" + os.sep + "Attendance_" + date + "_" + hour + "-" + minute + "-" + second + ".csv" + attendance_df.to_csv(file_name, index=False) + + # when everything is done cam.release() cv2.destroyAllWindows() - print("Attendance Successfull") + print("Attendance Successful!") diff --git a/modules/train_images.py b/modules/train_images.py index 4ff3fd0..ec1a187 100644 --- a/modules/train_images.py +++ b/modules/train_images.py @@ -32,9 +32,9 @@ def get_images_and_labels(path): # ----------- train images function --------------- def train(): recognizer = cv2.face_LBPHFaceRecognizer.create() - # haar_cascade_path = "files"+os.sep+"haarcascade_frontalface_default.xml" + # haar_cascade_path = "files" + os.sep + "haarcascade_frontalface_default.xml" # detector = cv2.CascadeClassifier(haar_cascade_path) faces, _id = get_images_and_labels("training_images") recognizer.train(faces, np.array(_id)) - recognizer.save("files"+os.sep+"trainer.yml") + recognizer.save("files" + os.sep + "trainer.yml") print("Images Trained Successfully") diff --git a/requirements.txt b/requirements.txt index 62a5130..9be1d46 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,3 +1,4 @@ +pyfiglet appdirs==1.4.3 attrs==19.1.0 black==19.3b0