-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathshades.py
94 lines (70 loc) · 3.34 KB
/
shades.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
from my_CNN_model import *
import cv2
import numpy as np
# Load the model built in the previous step
my_model = load_my_CNN_model('my_model')
# Face cascade to detect faces
face_cascade = cv2.CascadeClassifier('cascades/haarcascade_frontalface_default.xml')
# Define the upper and lower boundaries for a color to be considered "Blue"
blueLower = np.array([100, 60, 60])
blueUpper = np.array([140, 255, 255])
# Define a 5x5 kernel for erosion and dilation
kernel = np.ones((5, 5), np.uint8)
# Define filters
filters = ['images/sunglasses.png']
filterIndex = 0
# Load the video
camera = cv2.VideoCapture(0)
# Keep looping
while True:
# Grab the current paintWindow
(grabbed, frame) = camera.read()
frame = cv2.flip(frame, 1)
frame2 = np.copy(frame)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces
faces = face_cascade.detectMultiScale(gray, 1.25, 6)
for (x, y, w, h) in faces:
# Grab the face
gray_face = gray[y:y+h, x:x+w]
color_face = frame[y:y+h, x:x+w]
# Normalize to match the input format of the model - Range of pixel to [0, 1]
gray_normalized = gray_face / 255
# Resize it to 96x96 to match the input format of the model
original_shape = gray_face.shape # A Copy for future reference
face_resized = cv2.resize(gray_normalized, (96, 96), interpolation = cv2.INTER_AREA)
face_resized_copy = face_resized.copy()
face_resized = face_resized.reshape(1, 96, 96, 1)
# Predicting the keypoints using the model
keypoints = my_model.predict(face_resized)
# De-Normalize the keypoints values
keypoints = keypoints * 48 + 48
# Map the Keypoints back to the original image
face_resized_color = cv2.resize(color_face, (96, 96), interpolation = cv2.INTER_AREA)
face_resized_color2 = np.copy(face_resized_color)
# Pair them together
points = []
for i, co in enumerate(keypoints[0][0::2]):
points.append((co, keypoints[0][1::2][i]))
# Add FILTER to the frame
sunglasses = cv2.imread(filters[filterIndex], cv2.IMREAD_UNCHANGED)
sunglass_width = int((points[7][0]-points[9][0])*1.1)
sunglass_height = int((points[10][1]-points[8][1])/1.1)
sunglass_resized = cv2.resize(sunglasses, (sunglass_width, sunglass_height), interpolation = cv2.INTER_CUBIC)
transparent_region = sunglass_resized[:,:,:3] != 0
face_resized_color[int(points[9][1]):int(points[9][1])+sunglass_height, int(points[9][0]):int(points[9][0])+sunglass_width,:][transparent_region] = sunglass_resized[:,:,:3][transparent_region]
# Resize the face_resized_color image back to its original shape
frame[y:y+h, x:x+w] = cv2.resize(face_resized_color, original_shape, interpolation = cv2.INTER_CUBIC)
# Add KEYPOINTS to the frame2
for keypoint in points:
cv2.circle(face_resized_color2, keypoint, 1, (0,255,0), 1)
frame2[y:y+h, x:x+w] = cv2.resize(face_resized_color2, original_shape, interpolation = cv2.INTER_CUBIC)
# Show the frame
cv2.imshow("Selfie Filters", frame)
# If the 'q' key is pressed, stop the loop
if cv2.waitKey(1) & 0xFF == ord("q"):
break
# Cleanup the camera and close any open windows
camera.release()
cv2.destroyAllWindows()