face_detect_crop_single.py 3.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. from __future__ import division
  2. import collections
  3. import numpy as np
  4. import glob
  5. import os
  6. import os.path as osp
  7. import cv2
  8. from insightface.model_zoo import model_zoo
  9. from insightface.utils import face_align
  10. __all__ = ['Face_detect_crop', 'Face']
  11. Face = collections.namedtuple('Face', [
  12. 'bbox', 'kps', 'det_score', 'embedding', 'gender', 'age',
  13. 'embedding_norm', 'normed_embedding',
  14. 'landmark'
  15. ])
  16. Face.__new__.__defaults__ = (None, ) * len(Face._fields)
  17. class Face_detect_crop:
  18. def __init__(self, name, root='~/.insightface_func/models'):
  19. self.models = {}
  20. root = os.path.expanduser(root)
  21. onnx_files = glob.glob(osp.join(root, name, '*.onnx'))
  22. onnx_files = sorted(onnx_files)
  23. for onnx_file in onnx_files:
  24. if onnx_file.find('_selfgen_')>0:
  25. #print('ignore:', onnx_file)
  26. continue
  27. model = model_zoo.get_model(onnx_file)
  28. if model.taskname not in self.models:
  29. print('find model:', onnx_file, model.taskname)
  30. self.models[model.taskname] = model
  31. else:
  32. print('duplicated model task type, ignore:', onnx_file, model.taskname)
  33. del model
  34. assert 'detection' in self.models
  35. self.det_model = self.models['detection']
  36. def prepare(self, ctx_id, det_thresh=0.5, det_size=(640, 640)):
  37. self.det_thresh = det_thresh
  38. assert det_size is not None
  39. print('set det-size:', det_size)
  40. self.det_size = det_size
  41. for taskname, model in self.models.items():
  42. if taskname=='detection':
  43. model.prepare(ctx_id, input_size=det_size)
  44. else:
  45. model.prepare(ctx_id)
  46. def get(self, img, crop_size, max_num=0):
  47. bboxes, kpss = self.det_model.detect(img,
  48. threshold=self.det_thresh,
  49. max_num=max_num,
  50. metric='default')
  51. if bboxes.shape[0] == 0:
  52. return None
  53. # ret = []
  54. # for i in range(bboxes.shape[0]):
  55. # bbox = bboxes[i, 0:4]
  56. # det_score = bboxes[i, 4]
  57. # kps = None
  58. # if kpss is not None:
  59. # kps = kpss[i]
  60. # M, _ = face_align.estimate_norm(kps, crop_size, mode ='None')
  61. # align_img = cv2.warpAffine(img, M, (crop_size, crop_size), borderValue=0.0)
  62. # for i in range(bboxes.shape[0]):
  63. # kps = None
  64. # if kpss is not None:
  65. # kps = kpss[i]
  66. # M, _ = face_align.estimate_norm(kps, crop_size, mode ='None')
  67. # align_img = cv2.warpAffine(img, M, (crop_size, crop_size), borderValue=0.0)
  68. det_score = bboxes[..., 4]
  69. # select the face with the hightest detection score
  70. best_index = np.argmax(det_score)
  71. kps = None
  72. if kpss is not None:
  73. kps = kpss[best_index]
  74. M, _ = face_align.estimate_norm(kps, crop_size, mode ='None')
  75. align_img = cv2.warpAffine(img, M, (crop_size, crop_size), borderValue=0.0)
  76. return [align_img], [M]