-
Notifications
You must be signed in to change notification settings - Fork 6
/
LandmarkDetection.py
108 lines (84 loc) · 4.29 KB
/
LandmarkDetection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
from FaceNet3D import FaceNet3D as Helpers
import cv2
import numpy as np
import dlib
from imutils import face_utils
class LandmarkDetection(Helpers):
def __init__(self):
"""
Class initializer, initialize detector and landmark predictor model
"""
super().__init__()
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor(self.predictor_path)
@staticmethod
def face_remap(shape):
"""
extracts convex part of the face from the landmarks detected
:param shape: <class 'numpy.ndarray'> with shape (68, 2)
:return: convex part between landmarks 0:27, <class 'numpy.ndarray'> with shape (20, 1, 2)
"""
remapped_image = cv2.convexHull(shape)
return remapped_image
def cutout_mask_array(self, image, flip_rgb):
"""
Function that:
detects landmarks
extracts the face
gets the mask of the face
removes mouth interior
(optional) interchanges R and B color channels
:param image: <class 'numpy.ndarray'> with shape (m, n, 3)
:param flip_rgb: boolean: if True return RGB image else return BGR
:return: mask of the face without the mouth interior <class 'numpy.ndarray'> with shape (m, n, 3)
"""
out_face = np.zeros_like(image)
# convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# detect faces in bw image
faces = self.detector(gray)
for face in faces:
landmarks = self.predictor(gray, face)
shape = face_utils.shape_to_np(landmarks)
center = shape[33, :]
# initialize mask array
feature_mask = np.zeros((image.shape[0], image.shape[1]))
# extract the face
remapped_shape = self.face_remap(shape)
# get the mask of the face
cv2.fillConvexPoly(feature_mask, remapped_shape[0:27], 1)
# extract the mouth
mouth = np.array([[shape[60, :], shape[61, :], shape[62, :], shape[63, :], shape[64, :],
shape[65, :], shape[66, :], shape[67, :]]], dtype=np.int32)
# remove mouth interior
cv2.fillConvexPoly(feature_mask, mouth, 0)
feature_mask = feature_mask.astype(np.bool)
out_face[feature_mask] = image[feature_mask]
out_face = out_face[center[1] - 112:center[1] + 112, center[0] - 112:center[0] + 112]
if flip_rgb:
out_face = cv2.cvtColor(out_face, cv2.COLOR_BGR2RGB)
return out_face
def detect_landmarks_for_loss(self, image):
"""
Function that returns Landmark coordinates for original Loss Layer
:param image: <class 'numpy.ndarray'> with shape self.IMG_SHAPE
:return: <class 'numpy.ndarray'> with shape (46, 2)
"""
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# detect landmarks and transform to image coordinates
landmarks = self.predictor(gray, dlib.get_rect(gray))
shape = face_utils.shape_to_np(landmarks)
# keep only 46 landmarks
coords = np.array([[shape[17, :], shape[18, :], shape[19, :], shape[20, :], shape[21, :], # left brow
shape[22, :], shape[23, :], shape[24, :], shape[25, :], shape[26, :], # right brow
shape[36, :], shape[39, :], shape[42, :], shape[45, :], # left and right eye limits
shape[27, :], shape[28, :], shape[29, :], shape[30, :], shape[31, :],
shape[32, :], shape[33, :], shape[34, :], shape[35, :], # nose
shape[48, :], shape[49, :], shape[50, :], shape[51, :], shape[52, :],
shape[53, :], shape[54, :], shape[55, :], shape[56, :], shape[57, :],
shape[58, :], shape[59, :], shape[61, :], shape[62, :], shape[63, :],
shape[65, :], shape[66, :], shape[67, :], # mouth
shape[6, :], shape[7, :], shape[8, :], shape[9, :], shape[10, :] # chin
]], dtype=np.int32)
coords = np.squeeze(coords)
return coords