-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathapp.py
142 lines (106 loc) · 3.86 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
from flask import Flask, render_template, Response, request,redirect, url_for, jsonify
from flask import jsonify
import time
import cv2
import torch
from PIL import Image
import sys
import matplotlib.pyplot as plt
import utils.model
from models.stacked_hourglass import PoseNet, SGSC_PoseNet
from preprocessing.img import prepare_image, opencv_to_PIL, PIL_to_opencv
from preprocessing.keypoints import get_keyppoints, post_process_keypoints, draw_cv2_keypoints, draw_keypoints, upScaleKeypoints
app = Flask(__name__)
net_SGSC = utils.model.load_model(arch='SGSC')
net_SHG = utils.model.load_model(arch='SHG')
net = net_SGSC
has_gpu = torch.cuda.is_available()
device = torch.device('cpu')
cap = cv2.VideoCapture(0)
mode = 0
threshold = 0.2
use_gpu = False
current_model = 'SGSC'
seconds = None
def update_model():
if current_model == 'SHG':
return net_SHG
else:
return net_SGSC
def update_device(net):
if use_gpu:
if not next(net.parameters()).is_cuda:
net = net.cuda()
if not use_gpu:
if next(net.parameters()).is_cuda:
net = net.cpu()
return net
@app.route('/')
def index():
"""Video streaming home page."""
return render_template('index.html', has_gpu=int(has_gpu))
def gen():
"""Video streaming generator function."""
global net, seconds
# Read until video is completed
while(cap.isOpened()):
# Capture frame-by-frame
ret, img = cap.read()
if ret == True:
input, c, s = prepare_image(img) # input is tensor of shape [1, C x H x W]
if next(net.parameters()).is_cuda:
input = input.cuda()
with torch.no_grad():
start_prediction = time.time()
heatmaps = net(input) # returns shape (1, 4, 16, 64, 64) = (bs, hg_modules, 16 kp, height, width)
end_prediction = time.time()
seconds = end_prediction - start_prediction
if heatmaps.is_cuda:
heatmaps = heatmaps.cpu()
# get keypoints from predicted heatmaps as (x, y) = (width, height)
pred_keypoints = get_keyppoints(heatmaps[:, -1], threshold) # returns (batch_size, 16, 2)
input_res = 256
keypoints = post_process_keypoints(pred_keypoints, input, c, s, input_res)
img_with_keypoints = draw_cv2_keypoints(img, keypoints[0], radius=6, mode=mode)
net = update_model()
net = update_device(net)
frame = cv2.imencode('.jpg', img_with_keypoints)[1].tobytes()
yield (b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
time.sleep(0.1)
else:
break
@app.route('/video_feed')
def video_feed():
"""Video streaming route. Put this in the src attribute of an img tag."""
return Response(gen(),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route("/mode", methods=['GET', 'POST'])
def set_mode():
global mode
if request.method == "POST":
mode = request.json['data']
return render_template('index.html')
@app.route("/threshold", methods=['GET', 'POST'])
def set_threshold():
global threshold
if request.method == "POST":
threshold = request.json['data']
return render_template('index.html')
@app.route("/setCuda", methods=['GET', 'POST'])
def set_cuda():
global use_gpu
if request.method == "POST":
use_gpu = request.json['data']
return render_template('index.html')
@app.route("/model", methods=['GET', 'POST'])
def set_model():
global current_model
if request.method == "POST":
current_model = request.json['data']
return render_template('index.html')
@app.route("/performance", methods=['GET'])
def get_performance():
global seconds
return jsonify({'speed': seconds})
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)