Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/main/insightface_func/face_detect_crop_multi.py
Views: 792
from __future__ import division1import collections2import numpy as np3import glob4import os5import os.path as osp6import cv27from insightface.model_zoo import model_zoo8from insightface.utils import face_align910__all__ = ['Face_detect_crop', 'Face']1112Face = collections.namedtuple('Face', [13'bbox', 'kps', 'det_score', 'embedding', 'gender', 'age',14'embedding_norm', 'normed_embedding',15'landmark'16])1718Face.__new__.__defaults__ = (None, ) * len(Face._fields)192021class Face_detect_crop:22def __init__(self, name, root='~/.insightface_func/models'):23self.models = {}24root = os.path.expanduser(root)25onnx_files = glob.glob(osp.join(root, name, '*.onnx'))26onnx_files = sorted(onnx_files)27for onnx_file in onnx_files:28if onnx_file.find('_selfgen_')>0:29#print('ignore:', onnx_file)30continue31model = model_zoo.get_model(onnx_file)32if model.taskname not in self.models:33print('find model:', onnx_file, model.taskname)34self.models[model.taskname] = model35else:36print('duplicated model task type, ignore:', onnx_file, model.taskname)37del model38assert 'detection' in self.models39self.det_model = self.models['detection']404142def prepare(self, ctx_id, det_thresh=0.5, det_size=(640, 640)):43self.det_thresh = det_thresh44assert det_size is not None45print('set det-size:', det_size)46self.det_size = det_size47for taskname, model in self.models.items():48if taskname=='detection':49model.prepare(ctx_id, input_size=det_size)50else:51model.prepare(ctx_id)5253def get(self, img, crop_size, max_num=0):54bboxes, kpss = self.det_model.detect(img,55threshold=self.det_thresh,56max_num=max_num,57metric='default')58if bboxes.shape[0] == 0:59return None60ret = []61# for i in range(bboxes.shape[0]):62# bbox = bboxes[i, 0:4]63# det_score = bboxes[i, 4]64# kps = None65# if kpss is not None:66# kps = kpss[i]67# M, _ = face_align.estimate_norm(kps, crop_size, mode ='None')68# align_img = cv2.warpAffine(img, M, (crop_size, crop_size), borderValue=0.0)69# align_img_list = []70# M_list = []71kps_list = []72for i in range(bboxes.shape[0]):73kps = None74if kpss is not None:75kps = kpss[i]76# M, _ = face_align.estimate_norm(kps, crop_size, mode ='None')77# align_img = cv2.warpAffine(img, M, (crop_size, crop_size), borderValue=0.0)78# align_img_list.append(align_img)79# M_list.append(M)80kps_list.append(kps)8182# det_score = bboxes[..., 4]8384# best_index = np.argmax(det_score)8586# kps = None87# if kpss is not None:88# kps = kpss[best_index]89# M, _ = face_align.estimate_norm(kps, crop_size, mode ='None')90# align_img = cv2.warpAffine(img, M, (crop_size, crop_size), borderValue=0.0)9192return kps_list939495