-
Notifications
You must be signed in to change notification settings - Fork 0
/
run.py
96 lines (68 loc) · 3.04 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 3 17:32:40 2020
@author: berk
"""
from tensorflow.keras.preprocessing.image import img_to_array
import scipy.misc
import os
from imutils import paths
import cv2
from tensorflow.keras.models import load_model
import time
#------------------------------------------------------
#This section for reduce some errors related to GPU allocation on my system.
#it may not neccesary for yours. If it is, removing this part may increase the performance.
from tensorflow import Session,ConfigProto
from keras.backend.tensorflow_backend import set_session
config = ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.3
set_session(Session(config=config))
#--------------------------------------------------------
input_shape = (66, 200, 3)
angle=[]
smooth_angle=0
test_ids=[]
f= open("data.txt") #read steering angles from disk and preprocess
data = f.read()
data = data.split()
for i in data: #if the node end with ".jpg" ignore it. It's for collecting angles
if i[-1]=='g':
pass
else:
angle.append(float(i) * scipy.pi / 180) #convert rad.
model = load_model("model.h5") #import our model
sahin_direksiyon = cv2.imread("sahin_direksiyon_simiti.png") #read steering image
sahin_konsol = cv2.imread("sahin_on_konsol_2..jpg") #frontside image of a car.
#resize it
sahin_direksiyon=cv2.resize(sahin_direksiyon, (270,210))
#get it's shape
rows,cols,level = sahin_direksiyon.shape
test_paths = list(paths.list_images(os.getcwd()+"/test"))
#get test images ids (names)
for i in test_paths:
name = i.split(os.path.sep)[-1]
name = name[:-4]
test_ids.append(int(name))
test_ids.sort()
#test_ids=test_ids[15000:] #where it start to show
for i in test_ids:
image = cv2.imread(os.getcwd()+"/test/"+str(i)+".jpg") #read images from disk
image_show = image
image=cv2.resize(image[-150:], (200,66))
image = img_to_array(image)/255 #convert to numpy array
cv2.imshow("Self Driving Car",cv2.resize(image_show,(800,398))) #show image
result = -model.predict(image[None])*180.0/scipy.pi #make a prediction
print("Actual Angle= {} Predicted Angle= {}".format(str(angle[i]),str(-result)))
#this section just for the smoother rotation of streeing wheel.
smooth_angle += 0.2 * pow(abs((result - smooth_angle)), 2.0/3.0)*(result - smooth_angle)/abs(result-smooth_angle)
M=cv2.getRotationMatrix2D((cols/2,rows/2),smooth_angle,1)
dst= cv2.warpAffine(sahin_direksiyon, M, (cols,rows))
#sahin_konsol[20:230,30:300]=dst #If you want to show frontside of the car just use this and replace dst with sahin_konsol in the next line. Note: Optional it's just for fun.
cv2.imshow("Sahin Wheel",dst)
#small delay for Optimus Prime level computers
time.sleep(0.02)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
cv2.destroyAllWindows()