diff --git a/AI_Attendence_Program/Main.py b/AI_Attendence_Program/Main.py index cd0f8960..a5cc5697 100644 --- a/AI_Attendence_Program/Main.py +++ b/AI_Attendence_Program/Main.py @@ -4,73 +4,55 @@ import os from datetime import datetime -path = 'Images' -Images = [] -PersonName = [] -mylist = os.listdir(path) -print(mylist) -# for separating the name from their extensions -for cu_img in mylist: - current_Img = cv2.imread(f'{path}/{cu_img}') - Images.append(current_Img) - PersonName.append(os.path.splitext(cu_img)[0]) -print(PersonName) +IMAGE_DIR = 'Images' +ATTENDANCE_FILE = 'Attendance.csv' +SCALE_FACTOR = 0.25 +def load_images_and_names(): + image_files = os.listdir(IMAGE_DIR) + images = [cv2.imread(f'{IMAGE_DIR}/{file}') for file in image_files] + names = [os.path.splitext(file)[0] for file in image_files] + return images, names -def encodings(images): - encodelist = [] - for img in images: - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - encode = face_recognition.face_encodings(img)[0] - encodelist.append(encode) - return encodelist +def generate_face_encodings(images): + return [face_recognition.face_encodings(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))[0] for img in images] +def record_attendance(name): + with open(ATTENDANCE_FILE, 'r+') as file: + existing_names = {line.split(',')[0] for line in file.readlines()} + if name not in existing_names: + timestamp = datetime.now().strftime('%H:%M:%S,%d/%m/%Y') + file.write(f'\n{name},{timestamp}') -encode_list_Known = encodings(Images) -print("ALL ENCODING FOUND!!!") +known_images, known_names = load_images_and_names() +known_encodings = generate_face_encodings(known_images) - -def attendance(name): - with open('Attendence.csv', 'r+') as f: - myDataList = f.readlines() - nameList = [] - for line in myDataList: - entry = line.split(',') - nameList.append(entry[0]) - if name not in nameList: - time_now = datetime.now() - tStr = time_now.strftime('%H:%M:%S') - dStr = time_now.strftime('%d/%m/%Y') - f.writelines(f'\n{name},{tStr},{dStr}') - - -cap = cv2.VideoCapture(0) +video_capture = cv2.VideoCapture(0) while True: - ret, frame = cap.read() - faces = cv2.resize(frame, (0, 0), None, 0.25, 0.25) - faces = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - - faces_currentframe = face_recognition.face_locations(faces) - encode_currentframe = face_recognition.face_encodings(faces, faces_currentframe) - - for encodeFace, faceLoc in zip(encode_currentframe, faces_currentframe): - matches = face_recognition.compare_faces(encode_list_Known, encodeFace) - faceDistance = face_recognition.face_distance(encode_list_Known, encodeFace) - - matchIndex = np.argmin(faceDistance) - - if matches[matchIndex]: - name = PersonName[matchIndex].upper() - y1, x2, y2, x1 = faceLoc - #y1, x2, y2, x1 = y1 * 4, x2 * 4, y2 * 4, x1 * 4 - cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0), 2) - cv2.rectangle(frame, (x1, y2 - 35), (x2, y2), (0, 255, 0), cv2.FILLED) - cv2.putText(frame, name, (x1 + 6, y2 - 6), cv2.FONT_HERSHEY_COMPLEX, 1, (255, 255, 255), 2) - attendance(name) - - cv2.imshow("camera", frame) + ret, frame = video_capture.read() + scaled_frame = cv2.resize(frame, (0, 0), fx=SCALE_FACTOR, fy=SCALE_FACTOR) + rgb_frame = cv2.cvtColor(scaled_frame, cv2.COLOR_BGR2RGB) + + face_locations = face_recognition.face_locations(rgb_frame) + face_encodings = face_recognition.face_encodings(rgb_frame, face_locations) + + for encoding, location in zip(face_encodings, face_locations): + matches = face_recognition.compare_faces(known_encodings, encoding) + face_distances = face_recognition.face_distance(known_encodings, encoding) + best_match_index = np.argmin(face_distances) + + if matches[best_match_index]: + name = known_names[best_match_index].upper() + top, right, bottom, left = [coord * int(1/SCALE_FACTOR) for coord in location] + cv2.rectangle(frame, (left, top), (right, bottom), (255, 0, 0), 2) + cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 255, 0), cv2.FILLED) + cv2.putText(frame, name, (left + 6, bottom - 6), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) + record_attendance(name) + + cv2.imshow('Video Feed', frame) if cv2.waitKey(10) == 13: break -cap.release() + +video_capture.release() cv2.destroyAllWindows()