-
Notifications
You must be signed in to change notification settings - Fork 3
/
game_model.py
569 lines (412 loc) · 22.4 KB
/
game_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
import cv2
import pyautogui
from time import time
from math import hypot
import mediapipe as mp
import matplotlib.pyplot as plt
import pyttsx3
# import threading
# import time
# Initialize mediapipe pose class.
mp_pose = mp.solutions.pose
# Setup the Pose function for images.
pose_image = mp_pose.Pose(static_image_mode=True,
min_detection_confidence=0.5, model_complexity=1)
# Setup the Pose function for videos.
pose_video = mp_pose.Pose(static_image_mode=False, model_complexity=1, min_detection_confidence=0.7,
min_tracking_confidence=0.7)
# Initialize mediapipe drawing class.
mp_drawing = mp.solutions.drawing_utils
# FUNCTION TO DETECT POSE
def detectPose(image, pose, draw=False, display=False):
'''
This function performs the pose detection on the most prominent person in an image.
Args:
image: The input image with a prominent person whose pose landmarks needs to be detected.
pose: The pose function required to perform the pose detection.
draw: A boolean value that is if set to true the function draw pose landmarks on the output image.
display: A boolean value that is if set to true the function displays the original input image, and the
resultant image and returns nothing.
Returns:
output_image: The input image with the detected pose landmarks drawn if it was specified.
results: The output of the pose landmarks detection on the input image.
'''
# Create a copy of the input image.
output_image = image.copy()
# Convert the image from BGR into RGB format.
imageRGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Perform the Pose Detection.
results = pose.process(imageRGB)
# Check if any landmarks are detected and are specified to be drawn.
if results.pose_landmarks and draw:
# Draw Pose Landmarks on the output image.
mp_drawing.draw_landmarks(image=output_image, landmark_list=results.pose_landmarks,
connections=mp_pose.POSE_CONNECTIONS,
landmark_drawing_spec=mp_drawing.DrawingSpec(color=(255, 255, 255),
thickness=3, circle_radius=3),
connection_drawing_spec=mp_drawing.DrawingSpec(color=(49, 125, 237),
thickness=2, circle_radius=2))
# Check if the original input image and the resultant image are specified to be displayed.
if display:
# Display the original input image and the resultant image.
plt.figure(figsize=[22, 22])
plt.subplot(121)
plt.imshow(image[:, :, ::-1])
plt.title("Original Image")
plt.axis('off')
plt.subplot(122)
plt.imshow(output_image[:, :, ::-1])
plt.title("Output Image")
plt.axis('off')
# Otherwise
else:
# Return the output image and the results of pose landmarks detection.
return output_image, results
# FUCNTION FOR STARTING
def checkHandsJoined(image, results, draw=False, display=False):
'''
This function checks whether the hands of the person are joined or not in an image.
Args:
image: The input image with a prominent person whose hands status (joined or not) needs to be classified.
results: The output of the pose landmarks detection on the input image.
draw: A boolean value that is if set to true the function writes the hands status & distance on the output image.
display: A boolean value that is if set to true the function displays the resultant image and returns nothing.
Returns:
output_image: The same input image but with the classified hands status written, if it was specified.
hand_status: The classified status of the hands whether they are joined or not.
'''
# Get the height and width of the input image.
height, width, _ = image.shape
# Create a copy of the input image to write the hands status label on.
output_image = image.copy()
# Get the left wrist landmark x and y coordinates.
left_wrist_landmark = (results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_WRIST].x * width,
results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_WRIST].y * height)
# Get the right wrist landmark x and y coordinates.
right_wrist_landmark = (results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_WRIST].x * width,
results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_WRIST].y * height)
# Calculate the euclidean distance between the left and right wrist.
euclidean_distance = int(hypot(left_wrist_landmark[0] - right_wrist_landmark[0],
left_wrist_landmark[1] - right_wrist_landmark[1]))
# Compare the distance between the wrists with a appropriate threshold to check if both hands are joined.
if euclidean_distance < 130:
# Set the hands status to joined.
hand_status = 'Hands Joined'
# Set the color value to green.
color = (0, 255, 0)
# Otherwise.
else:
# Set the hands status to not joined.
hand_status = 'Hands Not Joined'
# Set the color value to red.
color = (0, 0, 255)
# Check if the Hands Joined status and hands distance are specified to be written on the output image.
if draw:
# Write the classified hands status on the image.
cv2.putText(output_image, hand_status, (10, 30),
cv2.FONT_HERSHEY_PLAIN, 2, color, 3)
# Write the the distance between the wrists on the image.
cv2.putText(output_image, f'Distance: {euclidean_distance}', (10, 70),
cv2.FONT_HERSHEY_PLAIN, 2, color, 3)
# Check if the output image is specified to be displayed.
if display:
# Display the output image.
plt.figure(figsize=[10, 10])
plt.imshow(output_image[:, :, ::-1])
plt.title("Output Image")
plt.axis('off')
# Otherwise
else:
# Return the output image and the classified hands status indicating whether the hands are joined or not.
return output_image, hand_status
# FUCNTION FOR LEFT AND RIGHT MOVEMENT
def checkLeftRight(image, results, draw=False, display=False):
'''
This function finds the horizontal position (left, center, right) of the person in an image.
Args:
image: The input image with a prominent person whose the horizontal position needs to be found.
results: The output of the pose landmarks detection on the input image.
draw: A boolean value that is if set to true the function writes the horizontal position on the output image.
display: A boolean value that is if set to true the function displays the resultant image and returns nothing.
Returns:
output_image: The same input image but with the horizontal position written, if it was specified.
horizontal_position: The horizontal position (left, center, right) of the person in the input image.
'''
# Declare a variable to store the horizontal position (left, center, right) of the person.
horizontal_position = None
# Get the height and width of the image.
height, width, _ = image.shape
# Create a copy of the input image to write the horizontal position on.
output_image = image.copy()
# Retreive the x-coordinate of the left shoulder landmark.
left_x = int(
results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_SHOULDER].x * width)
# Retreive the x-corrdinate of the right shoulder landmark.
right_x = int(
results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_SHOULDER].x * width)
# Check if the person is at left that is when both shoulder landmarks x-corrdinates
# are less than or equal to the x-corrdinate of the center of the image.
if (right_x <= width//2 and left_x <= width//2):
# Set the person's position to left.
horizontal_position = 'Left'
# Check if the person is at right that is when both shoulder landmarks x-corrdinates
# are greater than or equal to the x-corrdinate of the center of the image.
elif (right_x >= width//2 and left_x >= width//2):
# Set the person's position to right.
horizontal_position = 'Right'
# Check if the person is at center that is when right shoulder landmark x-corrdinate is greater than or equal to
# and left shoulder landmark x-corrdinate is less than or equal to the x-corrdinate of the center of the image.
elif (right_x >= width//2 and left_x <= width//2):
# Set the person's position to center.
horizontal_position = 'Center'
# Check if the person's horizontal position and a line at the center of the image is specified to be drawn.
if draw:
# Write the horizontal position of the person on the image.
cv2.putText(output_image, horizontal_position, (5, height - 10),
cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), 3)
# Draw a line at the center of the image.
cv2.line(output_image, (width//2, 0),
(width//2, height), (255, 255, 255), 2)
# Check if the output image is specified to be displayed.
if display:
# Display the output image.
plt.figure(figsize=[10, 10])
plt.imshow(output_image[:, :, ::-1])
plt.title("Output Image")
plt.axis('off')
# Otherwise
else:
# Return the output image and the person's horizontal position.
return output_image, horizontal_position
# FUNCTION FOR UP AND DOWN MOVEMENT
def checkJumpCrouch(image, results, MID_Y=250, draw=False, display=False):
'''
This function checks the posture (Jumping, Crouching or Standing) of the person in an image.
Args:
image: The input image with a prominent person whose the posture needs to be checked.
results: The output of the pose landmarks detection on the input image.
MID_Y: The intial center y-coordinate of both shoulders landmarks of the person recorded during starting
the game. This will give the idea of the person's height when he is standing straight.
draw: A boolean value that is if set to true the function writes the posture on the output image.
display: A boolean value that is if set to true the function displays the resultant image and returns nothing.
Returns:
output_image: The input image with the person's posture written, if it was specified.
posture: The posture (Jumping, Crouching or Standing) of the person in an image.
'''
# Get the height and width of the image.
height, width, _ = image.shape
# Create a copy of the input image to write the posture label on.
output_image = image.copy()
# Retreive the y-coordinate of the left shoulder landmark.
left_y = int(
results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_SHOULDER].y * height)
# Retreive the y-coordinate of the right shoulder landmark.
right_y = int(
results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_SHOULDER].y * height)
# Calculate the y-coordinate of the mid-point of both shoulders.
actual_mid_y = abs(right_y + left_y) // 2
# Calculate the upper and lower bounds of the threshold.
lower_bound = MID_Y-15
upper_bound = MID_Y+100
# Check if the person has jumped that is when the y-coordinate of the mid-point
# of both shoulders is less than the lower bound.
if (actual_mid_y < lower_bound):
# Set the posture to jumping.
posture = 'Jumping'
# Check if the person has crouched that is when the y-coordinate of the mid-point
# of both shoulders is greater than the upper bound.
elif (actual_mid_y > upper_bound):
# Set the posture to crouching.
posture = 'Crouching'
# Otherwise the person is standing and the y-coordinate of the mid-point
# of both shoulders is between the upper and lower bounds.
else:
# Set the posture to Standing straight.
posture = 'Standing'
# Check if the posture and a horizontal line at the threshold is specified to be drawn.
if draw:
# Write the posture of the person on the image.
cv2.putText(output_image, posture, (5, height - 50),
cv2.FONT_HERSHEY_PLAIN, 2, (255, 255, 255), 3)
# Draw a line at the intial center y-coordinate of the person (threshold).
cv2.line(output_image, (0, MID_Y), (width, MID_Y), (255, 255, 255), 2)
# Check if the output image is specified to be displayed.
if display:
# Display the output image.
plt.figure(figsize=[10, 10])
plt.imshow(output_image[:, :, ::-1])
plt.title("Output Image")
plt.axis('off')
# Otherwise
else:
# Return the output image and posture indicating whether the person is standing straight or has jumped, or crouched.
return output_image, posture
def speak(a):
textspeech = pyttsx3.init()
speak = a
textspeech.say(speak)
textspeech.runAndWait()
# RUNNING MAIN PROGRAM
# Initialize the VideoCapture object to read from the webcam.
camera_video = cv2.VideoCapture(0)
camera_video.set(3, 1280)
camera_video.set(4, 960)
# Create named window for resizing purposes.
cv2.namedWindow('Play Game', cv2.WINDOW_NORMAL)
# Initialize a variable to store the time of the previous frame.
time1 = 0
# Initialize a variable to store the state of the game (started or not).
game_started = False
# Initialize a variable to store the index of the current horizontal position of the person.
# At Start the character is at center so the index is 1 and it can move left (value 0) and right (value 2).
x_pos_index = 1
# Initialize a variable to store the index of the current vertical posture of the person.
# At Start the person is standing so the index is 1 and he can crouch (value 0) and jump (value 2).
y_pos_index = 1
# Declate a variable to store the intial y-coordinate of the mid-point of both shoulders of the person.
MID_Y = None
# Initialize a counter to store count of the number of consecutive frames with person's hands joined.
counter = 0
# Initialize the number of consecutive frames on which we want to check if person hands joined before starting the game.
num_of_frames = 10
# outputFrame = None
# lock = threading.Lock()
speak("Congratulations on unlocking this game")
speak("Your avatar on the game will copy your actions")
speak("You get ten lives, each time you hit an obstacle you loose a life, so play carefully!")
speak("Have fun playing!")
# Iterate until the webcam is accessed successfully.
while camera_video.isOpened():
# Read a frame.
ok, frame = camera_video.read()
# Check if frame is not read properly then continue to the next iteration to read the next frame.
if not ok:
continue
# Flip the frame horizontally for natural (selfie-view) visualization.
frame = cv2.flip(frame, 1)
# Get the height and width of the frame of the webcam video.
frame_height, frame_width, _ = frame.shape
# Perform the pose detection on the frame.
frame, results = detectPose(frame, pose_video, draw=game_started)
# Check if the pose landmarks in the frame are detected.
if results.pose_landmarks:
# Check if the game has started
if game_started:
# Commands to control the horizontal movements of the character.
# --------------------------------------------------------------------------------------------------------------
# Get horizontal position of the person in the frame.
frame, horizontal_position = checkLeftRight(
frame, results, draw=True)
# Check if the person has moved to left from center or to center from right.
if (horizontal_position == 'Left' and x_pos_index != 0) or (horizontal_position == 'Center' and x_pos_index == 2):
# Press the left arrow key.
pyautogui.press('left')
# Update the horizontal position index of the character.
x_pos_index -= 1
# Check if the person has moved to Right from center or to center from left.
elif (horizontal_position == 'Right' and x_pos_index != 2) or (horizontal_position == 'Center' and x_pos_index == 0):
# Press the right arrow key.
pyautogui.press('right')
# Update the horizontal position index of the character.
x_pos_index += 1
# --------------------------------------------------------------------------------------------------------------
# Otherwise if the game has not started
else:
# Write the text representing the way to start the game on the frame.
cv2.putText(frame, 'JOIN BOTH HANDS TO START THE GAME.', (5, frame_height - 10), cv2.FONT_HERSHEY_PLAIN,
2, (0, 255, 0), 3)
# Command to Start or resume the game.
# ------------------------------------------------------------------------------------------------------------------
# Check if the left and right hands are joined.
if checkHandsJoined(frame, results)[1] == 'Hands Joined':
# Increment the count of consecutive frames with +ve condition.
counter += 1
# Check if the counter is equal to the required number of consecutive frames.
if counter == num_of_frames:
# Command to Start the game first time.
# ----------------------------------------------------------------------------------------------------------
# Check if the game has not started yet.
if not(game_started):
# Update the value of the variable that stores the game state.
game_started = True
# Retreive the y-coordinate of the left shoulder landmark.
left_y = int(
results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_SHOULDER].y * frame_height)
# Retreive the y-coordinate of the right shoulder landmark.
right_y = int(
results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_SHOULDER].y * frame_height)
# Calculate the intial y-coordinate of the mid-point of both shoulders of the person.
MID_Y = abs(right_y + left_y) // 2
# Move to 1300, 800, then click the left mouse button to start the game.
pyautogui.click(x=1300, y=800, button='left')
# ----------------------------------------------------------------------------------------------------------
# Command to resume the game after death of the character.
# ----------------------------------------------------------------------------------------------------------
# Otherwise if the game has started.
else:
# Press the space key.
pyautogui.press('space')
# ----------------------------------------------------------------------------------------------------------
# Update the counter value to zero.
counter = 0
# Otherwise if the left and right hands are not joined.
else:
# Update the counter value to zero.
counter = 0
# ------------------------------------------------------------------------------------------------------------------
# Commands to control the vertical movements of the character.
# ------------------------------------------------------------------------------------------------------------------
# Check if the intial y-coordinate of the mid-point of both shoulders of the person has a value.
if MID_Y:
# Get posture (jumping, crouching or standing) of the person in the frame.
frame, posture = checkJumpCrouch(frame, results, MID_Y, draw=True)
# Check if the person has jumped.
if posture == 'Jumping' and y_pos_index == 1:
# Press the up arrow key
pyautogui.press('up')
# Update the veritcal position index of the character.
y_pos_index += 1
# Check if the person has crouched.
elif posture == 'Crouching' and y_pos_index == 1:
# Press the down arrow key
pyautogui.press('down')
# Update the veritcal position index of the character.
y_pos_index -= 1
# Check if the person has stood.
elif posture == 'Standing' and y_pos_index != 1:
# Update the veritcal position index of the character.
y_pos_index = 1
# ------------------------------------------------------------------------------------------------------------------
# Otherwise if the pose landmarks in the frame are not detected.
else:
# Update the counter value to zero.
counter = 0
# Calculate the frames updates in one second
# ----------------------------------------------------------------------------------------------------------------------
# Set the time for this frame to the current time.
time2 = time()
# Check if the difference between the previous and this frame time > 0 to avoid division by zero.
if (time2 - time1) > 0:
# Calculate the number of frames per second.
frames_per_second = 1.0 / (time2 - time1)
# Write the calculated number of frames per second on the frame.
cv2.putText(frame, 'FPS: {}'.format(int(frames_per_second)),
(10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 3)
# Update the previous frame time to this frame time.
# As this frame will become previous frame in next iteration.
time1 = time2
# ----------------------------------------------------------------------------------------------------------------------
# Display the frame.
cv2.imshow('Play Game', frame)
# with lock:
# outputFrame = frame.copy()
# Wait for 1ms. If a a key is pressed, retreive the ASCII code of the key.
k = cv2.waitKey(1) & 0xFF
# Check if 'ESC' is pressed and break the loop.
if(k == 27):
game_started = False
break
# Release the VideoCapture Object and close the windows.
speak("Hope you enjoyed Playing!")
camera_video.release()
cv2.destroyAllWindows()