File size: 8,254 Bytes
86c90a3
c3f75db
d6c0ea5
 
4df3bee
 
c3f75db
 
 
86c90a3
4df3bee
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d6c0ea5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86c90a3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d6c0ea5
86c90a3
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
import numpy as np
import mediapipe as mp
from PIL import Image
import PIL
import scipy
import scipy.ndimage

mp_face_detection = mp.solutions.face_detection
face_detection = mp_face_detection.FaceDetection(min_detection_confidence=0.5, model_selection=0)

def aligns(pil_image,enable_padding=True,output_size=512,model_type="dlib",max_people=7):
    w,h = pil_image.size
    scale = 1
    if min(w,h) > output_size*2:
        scale = min(w,h) / (output_size*2)
        new_w = int(w/scale)
        new_h = int(h/scale)
        pil_image = pil_image.resize((new_w,new_h),PIL.Image.BILINEAR)
    
    numpy_im = np.array(pil_image)
    
    #Find the locations of faces
    locations,context = get_locations(numpy_im,model_type)#face_recognition.face_locations(numpy_im)
    n_found = len(locations)
    print("Faces found",n_found)
    if (n_found == 0):
        return []
    
    #How many are we going to return?
    n_to_return = min(n_found,max_people)
        
    #Return the largest ones
    areas = [(l[2] - l[0])*(l[1] - l[3]) for l in locations]
    indices = np.argpartition(areas, -n_to_return)[-n_to_return:]
    
    #Find the landmarks
    face_landmarks_list = get_landmarks(numpy_im,[locations[i] for i in indices],context,model_type)#face_recognition.face_landmarks(numpy_im,[locations[i]])

    #Package them up
    to_return = []
    for face in face_landmarks_list:
        im,quad = image_align(pil_image,face,enable_padding=enable_padding,output_size=output_size,transform_size=output_size)
        to_return.append((im,quad*scale))

    #Return them
    return to_return

def get_landmarks(numpy_array,locations,context,model_type="dlib"):
    '''
    model_type can be "dlib" or "mediapipe"
    context is the second result from get_locations
    '''
    assert(model_type in ["dlib","mediapipe"])

    if model_type == "dlib":
        return face_recognition.face_landmarks(numpy_array,locations)
    else:
        return [context[tuple(l)] for l in locations]

def landmarks_from_result(result,np_array):
    keypoint_names = ["left_eye", "right_eye", "nose" ,"mouth", "left_ear", "right_ear"]

    landmarks = {}
    for i,k in enumerate(result.location_data.relative_keypoints):
        x = round(k.x * np_array.shape[1])
        y = round(k.y * np_array.shape[0])
        landmarks[keypoint_names[i]] = np.array([x,y])

    return landmarks

def get_locations(numpy_array,model_type="dlib"):
    '''
    model_type can be "dlib" or "mediapipe"
    returns face locations and a context for fast landmark finding
    '''
    assert(model_type in ["dlib","mediapipe"])

    if model_type == "dlib":
        return face_recognition.face_locations(numpy_array),None
    else:
        results = face_detection.process(np.array(numpy_array))
        to_return = None
        im_h,im_w = numpy_array.shape[:2]
        box_list = []
        landmarks = {}
        if results.detections is None:
            return box_list,landmarks
        for result in results.detections:
            x = round(result.location_data.relative_bounding_box.xmin*im_w)
            y = round(result.location_data.relative_bounding_box.ymin*im_h)
            w = round(result.location_data.relative_bounding_box.width*im_w)
            h = round(result.location_data.relative_bounding_box.height*im_h)
            box_list.append([x,y,x+w-1,y+h-1])
            landmarks[(x,y,x+w-1,y+h-1)] = landmarks_from_result(result,numpy_array)

        return box_list,landmarks

def align(pil_image,enable_padding=True,output_size=512,model_type="dlib"):
    w,h = pil_image.size
    scale = 1
    if min(w,h) > output_size*2:
        scale = min(w,h) / (output_size*2)
        new_w = int(w/scale)
        new_h = int(h/scale)
        pil_image = pil_image.resize((new_w,new_h),PIL.Image.BILINEAR)
    
    numpy_im = np.array(pil_image)
    locations,context = get_locations(numpy_im,model_type)#face_recognition.face_locations(numpy_im)
    if (len(locations) == 0):
        return None
    areas = [(l[2] - l[0])*(l[1] - l[3]) for l in locations]
    i = np.argmax(areas)
    face_landmarks_list = get_landmarks(numpy_im,[locations[i]],context,model_type)#face_recognition.face_landmarks(numpy_im,[locations[i]])
    im,quad = image_align(Image.fromarray(numpy_im),face_landmarks_list[0],enable_padding=enable_padding,output_size=output_size,transform_size=4*output_size)
    return im,quad*scale
    
def image_align(img, lm, output_size=1024, transform_size=4096, enable_padding=True, x_scale=1, y_scale=1, em_scale=0.1, alpha=False):
    # Align function from FFHQ dataset pre-processing step
    # https://github.com/NVlabs/ffhq-dataset/blob/master/download_ffhq.py

    # Compute the land marks differently depending on what face finding model has been used
    if type(lm["left_eye"]) == np.ndarray and lm["left_eye"].size == 2:
        #Media pipe
        eye_left = lm["left_eye"]
        eye_right = lm["right_eye"]
        mouth_avg = lm["mouth"]
    else:
        #DLIB
        eye_left = np.mean(lm["left_eye"], axis=0)
        eye_right = np.mean(lm["right_eye"], axis=0)
        mouth_avg = (np.mean( lm["top_lip"],axis=0) + np.mean(lm["bottom_lip"],axis=0)) * 0.5

    # Calculate auxiliary vectors.
    eye_avg      = (eye_left + eye_right) * 0.5
    eye_to_eye   = eye_right - eye_left
    eye_to_mouth = mouth_avg - eye_avg

    # Choose oriented crop rectangle.
    x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
    x /= np.hypot(*x)
    x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
    x *= x_scale
    y = np.flipud(x) * [-y_scale, y_scale]
    c = eye_avg + eye_to_mouth * em_scale
    quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y]) #Quad means box
    qsize = np.hypot(*x) * 2
    original_quad = np.copy(quad)

    # Load in-the-wild image.
    #img = img.convert('RGBA').convert('RGB')  #I've already taken care of this

    # Shrink.
    shrink = int(np.floor(qsize / output_size * 0.5))
    if shrink > 1:
        rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
        img = img.resize(rsize, PIL.Image.ANTIALIAS)
        quad /= shrink
        qsize /= shrink

    # Crop.
    border = max(int(np.rint(qsize * 0.1)), 3)
    crop = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
    crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]), min(crop[3] + border, img.size[1]))
    if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
        img = img.crop(crop)
        quad -= crop[0:2]

    # Pad.
    pad = (int(np.floor(min(quad[:,0]))), int(np.floor(min(quad[:,1]))), int(np.ceil(max(quad[:,0]))), int(np.ceil(max(quad[:,1]))))
    pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0), max(pad[3] - img.size[1] + border, 0))
    if enable_padding and max(pad) > border - 4:
        pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
        img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
        h, w, _ = img.shape
        y, x, _ = np.ogrid[:h, :w, :1]
        mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w-1-x) / pad[2]), 1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h-1-y) / pad[3]))
        blur = qsize * 0.02
        img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
        img += (np.median(img, axis=(0,1)) - img) * np.clip(mask, 0.0, 1.0)
        img = np.uint8(np.clip(np.rint(img), 0, 255))
        if alpha:
            mask = 1-np.clip(3.0 * mask, 0.0, 1.0)
            mask = np.uint8(np.clip(np.rint(mask*255), 0, 255))
            img = np.concatenate((img, mask), axis=2)
            img = PIL.Image.fromarray(img, 'RGBA')
        else:
            img = PIL.Image.fromarray(img, 'RGB')
        quad += pad[:2]

    # Transform.
    img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
    if output_size < transform_size:
        img = img.resize((output_size, output_size), PIL.Image.LANCZOS)

    # Save aligned image.
    return img,original_quad