|
| 1 | +# built-in dependencies |
| 2 | +import os |
| 3 | +from typing import List |
| 4 | + |
| 5 | +# 3rd party dependencies |
| 6 | +import numpy as np |
| 7 | +import cv2 |
| 8 | +import gdown |
| 9 | + |
| 10 | +# project dependencies |
| 11 | +from deepface.commons import folder_utils |
| 12 | +from deepface.models.Detector import Detector, FacialAreaRegion |
| 13 | +from deepface.commons import logger as log |
| 14 | + |
| 15 | +logger = log.get_singletonish_logger() |
| 16 | + |
| 17 | +# pylint: disable=c-extension-no-member |
| 18 | + |
| 19 | +WEIGHTS_URL = "https://github.com/Star-Clouds/CenterFace/raw/master/models/onnx/centerface.onnx" |
| 20 | + |
| 21 | + |
| 22 | +class CenterFaceClient(Detector): |
| 23 | + def __init__(self): |
| 24 | + # BUG: model must be flushed for each call |
| 25 | + # self.model = self.build_model() |
| 26 | + pass |
| 27 | + |
| 28 | + def build_model(self): |
| 29 | + """ |
| 30 | + Download pre-trained weights of CenterFace model if necessary and load built model |
| 31 | + """ |
| 32 | + weights_path = f"{folder_utils.get_deepface_home()}/.deepface/weights/centerface.onnx" |
| 33 | + if not os.path.isfile(weights_path): |
| 34 | + logger.info(f"Downloading CenterFace weights from {WEIGHTS_URL} to {weights_path}...") |
| 35 | + try: |
| 36 | + gdown.download(WEIGHTS_URL, weights_path, quiet=False) |
| 37 | + except Exception as err: |
| 38 | + raise ValueError( |
| 39 | + f"Exception while downloading CenterFace weights from {WEIGHTS_URL}." |
| 40 | + f"You may consider to download it to {weights_path} manually." |
| 41 | + ) from err |
| 42 | + logger.info(f"CenterFace model is just downloaded to {os.path.basename(weights_path)}") |
| 43 | + |
| 44 | + return CenterFace(weight_path=weights_path) |
| 45 | + |
| 46 | + def detect_faces(self, img: np.ndarray) -> List["FacialAreaRegion"]: |
| 47 | + """ |
| 48 | + Detect and align face with CenterFace |
| 49 | +
|
| 50 | + Args: |
| 51 | + img (np.ndarray): pre-loaded image as numpy array |
| 52 | +
|
| 53 | + Returns: |
| 54 | + results (List[FacialAreaRegion]): A list of FacialAreaRegion objects |
| 55 | + """ |
| 56 | + resp = [] |
| 57 | + |
| 58 | + threshold = float(os.getenv("CENTERFACE_THRESHOLD", "0.80")) |
| 59 | + |
| 60 | + # BUG: model causes problematic results from 2nd call if it is not flushed |
| 61 | + # detections, landmarks = self.model.forward( |
| 62 | + # img, img.shape[0], img.shape[1], threshold=threshold |
| 63 | + # ) |
| 64 | + detections, landmarks = self.build_model().forward( |
| 65 | + img, img.shape[0], img.shape[1], threshold=threshold |
| 66 | + ) |
| 67 | + |
| 68 | + for i, detection in enumerate(detections): |
| 69 | + boxes, confidence = detection[:4], detection[4] |
| 70 | + |
| 71 | + x = boxes[0] |
| 72 | + y = boxes[1] |
| 73 | + w = boxes[2] - x |
| 74 | + h = boxes[3] - y |
| 75 | + |
| 76 | + landmark = landmarks[i] |
| 77 | + |
| 78 | + right_eye = (int(landmark[0]), int(landmark[1])) |
| 79 | + left_eye = (int(landmark[2]), int(landmark[3])) |
| 80 | + # nose = (int(landmark[4]), int(landmark [5])) |
| 81 | + # mouth_right = (int(landmark[6]), int(landmark [7])) |
| 82 | + # mouth_left = (int(landmark[8]), int(landmark [9])) |
| 83 | + |
| 84 | + facial_area = FacialAreaRegion( |
| 85 | + x=x, |
| 86 | + y=y, |
| 87 | + w=w, |
| 88 | + h=h, |
| 89 | + left_eye=left_eye, |
| 90 | + right_eye=right_eye, |
| 91 | + confidence=min(max(0, float(confidence)), 1.0), |
| 92 | + ) |
| 93 | + resp.append(facial_area) |
| 94 | + |
| 95 | + return resp |
| 96 | + |
| 97 | + |
| 98 | +class CenterFace: |
| 99 | + """ |
| 100 | + This class is heavily inspired from |
| 101 | + github.com/Star-Clouds/CenterFace/blob/master/prj-python/centerface.py |
| 102 | + """ |
| 103 | + |
| 104 | + def __init__(self, weight_path: str): |
| 105 | + self.net = cv2.dnn.readNetFromONNX(weight_path) |
| 106 | + self.img_h_new, self.img_w_new, self.scale_h, self.scale_w = 0, 0, 0, 0 |
| 107 | + |
| 108 | + def forward(self, img, height, width, threshold=0.5): |
| 109 | + self.img_h_new, self.img_w_new, self.scale_h, self.scale_w = self.transform(height, width) |
| 110 | + return self.inference_opencv(img, threshold) |
| 111 | + |
| 112 | + def inference_opencv(self, img, threshold): |
| 113 | + blob = cv2.dnn.blobFromImage( |
| 114 | + img, |
| 115 | + scalefactor=1.0, |
| 116 | + size=(self.img_w_new, self.img_h_new), |
| 117 | + mean=(0, 0, 0), |
| 118 | + swapRB=True, |
| 119 | + crop=False, |
| 120 | + ) |
| 121 | + self.net.setInput(blob) |
| 122 | + heatmap, scale, offset, lms = self.net.forward(["537", "538", "539", "540"]) |
| 123 | + return self.postprocess(heatmap, lms, offset, scale, threshold) |
| 124 | + |
| 125 | + def transform(self, h, w): |
| 126 | + img_h_new, img_w_new = int(np.ceil(h / 32) * 32), int(np.ceil(w / 32) * 32) |
| 127 | + scale_h, scale_w = img_h_new / h, img_w_new / w |
| 128 | + return img_h_new, img_w_new, scale_h, scale_w |
| 129 | + |
| 130 | + def postprocess(self, heatmap, lms, offset, scale, threshold): |
| 131 | + dets, lms = self.decode( |
| 132 | + heatmap, scale, offset, lms, (self.img_h_new, self.img_w_new), threshold=threshold |
| 133 | + ) |
| 134 | + if len(dets) > 0: |
| 135 | + dets[:, 0:4:2], dets[:, 1:4:2] = ( |
| 136 | + dets[:, 0:4:2] / self.scale_w, |
| 137 | + dets[:, 1:4:2] / self.scale_h, |
| 138 | + ) |
| 139 | + lms[:, 0:10:2], lms[:, 1:10:2] = ( |
| 140 | + lms[:, 0:10:2] / self.scale_w, |
| 141 | + lms[:, 1:10:2] / self.scale_h, |
| 142 | + ) |
| 143 | + else: |
| 144 | + dets = np.empty(shape=[0, 5], dtype=np.float32) |
| 145 | + lms = np.empty(shape=[0, 10], dtype=np.float32) |
| 146 | + return dets, lms |
| 147 | + |
| 148 | + def decode(self, heatmap, scale, offset, landmark, size, threshold=0.1): |
| 149 | + heatmap = np.squeeze(heatmap) |
| 150 | + scale0, scale1 = scale[0, 0, :, :], scale[0, 1, :, :] |
| 151 | + offset0, offset1 = offset[0, 0, :, :], offset[0, 1, :, :] |
| 152 | + c0, c1 = np.where(heatmap > threshold) |
| 153 | + boxes, lms = [], [] |
| 154 | + if len(c0) > 0: |
| 155 | + # pylint:disable=consider-using-enumerate |
| 156 | + for i in range(len(c0)): |
| 157 | + s0, s1 = np.exp(scale0[c0[i], c1[i]]) * 4, np.exp(scale1[c0[i], c1[i]]) * 4 |
| 158 | + o0, o1 = offset0[c0[i], c1[i]], offset1[c0[i], c1[i]] |
| 159 | + s = heatmap[c0[i], c1[i]] |
| 160 | + x1, y1 = max(0, (c1[i] + o1 + 0.5) * 4 - s1 / 2), max( |
| 161 | + 0, (c0[i] + o0 + 0.5) * 4 - s0 / 2 |
| 162 | + ) |
| 163 | + x1, y1 = min(x1, size[1]), min(y1, size[0]) |
| 164 | + boxes.append([x1, y1, min(x1 + s1, size[1]), min(y1 + s0, size[0]), s]) |
| 165 | + lm = [] |
| 166 | + for j in range(5): |
| 167 | + lm.append(landmark[0, j * 2 + 1, c0[i], c1[i]] * s1 + x1) |
| 168 | + lm.append(landmark[0, j * 2, c0[i], c1[i]] * s0 + y1) |
| 169 | + lms.append(lm) |
| 170 | + boxes = np.asarray(boxes, dtype=np.float32) |
| 171 | + keep = self.nms(boxes[:, :4], boxes[:, 4], 0.3) |
| 172 | + boxes = boxes[keep, :] |
| 173 | + lms = np.asarray(lms, dtype=np.float32) |
| 174 | + lms = lms[keep, :] |
| 175 | + return boxes, lms |
| 176 | + |
| 177 | + def nms(self, boxes, scores, nms_thresh): |
| 178 | + x1 = boxes[:, 0] |
| 179 | + y1 = boxes[:, 1] |
| 180 | + x2 = boxes[:, 2] |
| 181 | + y2 = boxes[:, 3] |
| 182 | + areas = (x2 - x1 + 1) * (y2 - y1 + 1) |
| 183 | + order = np.argsort(scores)[::-1] |
| 184 | + num_detections = boxes.shape[0] |
| 185 | + suppressed = np.zeros((num_detections,), dtype=bool) |
| 186 | + |
| 187 | + keep = [] |
| 188 | + for _i in range(num_detections): |
| 189 | + i = order[_i] |
| 190 | + if suppressed[i]: |
| 191 | + continue |
| 192 | + keep.append(i) |
| 193 | + |
| 194 | + ix1 = x1[i] |
| 195 | + iy1 = y1[i] |
| 196 | + ix2 = x2[i] |
| 197 | + iy2 = y2[i] |
| 198 | + iarea = areas[i] |
| 199 | + |
| 200 | + for _j in range(_i + 1, num_detections): |
| 201 | + j = order[_j] |
| 202 | + if suppressed[j]: |
| 203 | + continue |
| 204 | + |
| 205 | + xx1 = max(ix1, x1[j]) |
| 206 | + yy1 = max(iy1, y1[j]) |
| 207 | + xx2 = min(ix2, x2[j]) |
| 208 | + yy2 = min(iy2, y2[j]) |
| 209 | + w = max(0, xx2 - xx1 + 1) |
| 210 | + h = max(0, yy2 - yy1 + 1) |
| 211 | + |
| 212 | + inter = w * h |
| 213 | + ovr = inter / (iarea + areas[j] - inter) |
| 214 | + if ovr >= nms_thresh: |
| 215 | + suppressed[j] = True |
| 216 | + |
| 217 | + return keep |
0 commit comments