-
Notifications
You must be signed in to change notification settings - Fork 3
/
classify_video.py
72 lines (53 loc) · 1.78 KB
/
classify_video.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import cv2
import math
import time
from cv2 import VideoCapture
from tensorflow import keras
import os
from tensorflow.keras.applications.resnet50 import preprocess_input
LABELS = ["fire", "no fire"]
frames_per_second = 4
def get_image_generator(filename=None):
# Setting parameter to 0 here will capture video from attached interface
if filename:
cap = cv2.VideoCapture(filename)
else:
cap = cv2.VideoCapture(0)
fps = cap.get(cv2.CAP_PROP_FPS)/frames_per_second
while True:
ret, frame = cap.read()
if not ret:
break
frame_id = cap.get(1)
if (filename and frame_id % math.floor(fps) == 0):
yield frame
if not filename:
before = time.time()
yield frame
remaining_time = max(0, 1 - (time.time() - before))
time.sleep(remaining_time)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
def resize(img):
return cv2.resize(img, (224, 224)).reshape(1, 224, 224, 3)
def main():
model = keras.models.load_model('saved_models/resnet50')
#fp = open('framebyframe.txt', 'w')
image_generator = get_image_generator('./aerial_video.mp4')
# image_generator = get_image_generator()
for frame in image_generator:
res = model.predict(preprocess_input(resize(frame)))
result_label = LABELS[res.argmax()]
#fp.write(str(result_label)+"\n")
print(f'{result_label} - {res[0]}')
if "DISPLAY" in os.environ:
cv2.imshow('frame', frame)
while False:
if cv2.waitKey(1) & 0xFF == ord('r'):
break
#fp.close()
if __name__ == '__main__':
main()