test_wholeimage_swap_multispecific.py 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. import cv2
  2. import torch
  3. import fractions
  4. import numpy as np
  5. from PIL import Image
  6. import torch.nn.functional as F
  7. from torchvision import transforms
  8. from models.models import create_model
  9. from options.test_options import TestOptions
  10. from insightface_func.face_detect_crop_multi import Face_detect_crop
  11. from util.reverse2original import reverse2wholeimage
  12. import os
  13. from util.add_watermark import watermark_image
  14. import torch.nn as nn
  15. from util.norm import SpecificNorm
  16. import glob
  17. from parsing_model.model import BiSeNet
  18. def lcm(a, b): return abs(a * b) / fractions.gcd(a, b) if a and b else 0
  19. transformer_Arcface = transforms.Compose([
  20. transforms.ToTensor(),
  21. transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
  22. ])
  23. def _totensor(array):
  24. tensor = torch.from_numpy(array)
  25. img = tensor.transpose(0, 1).transpose(0, 2).contiguous()
  26. return img.float().div(255)
  27. def _toarctensor(array):
  28. tensor = torch.from_numpy(array)
  29. img = tensor.transpose(0, 1).transpose(0, 2).contiguous()
  30. return img.float().div(255)
  31. if __name__ == '__main__':
  32. opt = TestOptions().parse()
  33. start_epoch, epoch_iter = 1, 0
  34. crop_size = 224
  35. multisepcific_dir = opt.multisepcific_dir
  36. torch.nn.Module.dump_patches = True
  37. logoclass = watermark_image('./simswaplogo/simswaplogo.png')
  38. model = create_model(opt)
  39. model.eval()
  40. mse = torch.nn.MSELoss().cuda()
  41. spNorm =SpecificNorm()
  42. app = Face_detect_crop(name='antelope', root='./insightface_func/models')
  43. app.prepare(ctx_id= 0, det_thresh=0.6, det_size=(640,640))
  44. with torch.no_grad():
  45. # The specific person to be swapped(source)
  46. source_specific_id_nonorm_list = []
  47. source_path = os.path.join(multisepcific_dir,'SRC_*')
  48. source_specific_images_path = sorted(glob.glob(source_path))
  49. for source_specific_image_path in source_specific_images_path:
  50. specific_person_whole = cv2.imread(source_specific_image_path)
  51. specific_person_align_crop, _ = app.get(specific_person_whole,crop_size)
  52. specific_person_align_crop_pil = Image.fromarray(cv2.cvtColor(specific_person_align_crop[0],cv2.COLOR_BGR2RGB))
  53. specific_person = transformer_Arcface(specific_person_align_crop_pil)
  54. specific_person = specific_person.view(-1, specific_person.shape[0], specific_person.shape[1], specific_person.shape[2])
  55. # convert numpy to tensor
  56. specific_person = specific_person.cuda()
  57. #create latent id
  58. specific_person_downsample = F.interpolate(specific_person, scale_factor=0.5)
  59. specific_person_id_nonorm = model.netArc(specific_person_downsample)
  60. source_specific_id_nonorm_list.append(specific_person_id_nonorm.clone())
  61. # The person who provides id information (list)
  62. target_id_norm_list = []
  63. target_path = os.path.join(multisepcific_dir,'DST_*')
  64. target_images_path = sorted(glob.glob(target_path))
  65. for target_image_path in target_images_path:
  66. img_a_whole = cv2.imread(target_image_path)
  67. img_a_align_crop, _ = app.get(img_a_whole,crop_size)
  68. img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB))
  69. img_a = transformer_Arcface(img_a_align_crop_pil)
  70. img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
  71. # convert numpy to tensor
  72. img_id = img_id.cuda()
  73. #create latent id
  74. img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
  75. latend_id = model.netArc(img_id_downsample)
  76. latend_id = F.normalize(latend_id, p=2, dim=1)
  77. target_id_norm_list.append(latend_id.clone())
  78. assert len(target_id_norm_list) == len(source_specific_id_nonorm_list), "The number of images in source and target directory must be same !!!"
  79. ############## Forward Pass ######################
  80. pic_b = opt.pic_b_path
  81. img_b_whole = cv2.imread(pic_b)
  82. img_b_align_crop_list, b_mat_list = app.get(img_b_whole,crop_size)
  83. # detect_results = None
  84. swap_result_list = []
  85. id_compare_values = []
  86. b_align_crop_tenor_list = []
  87. for b_align_crop in img_b_align_crop_list:
  88. b_align_crop_tenor = _totensor(cv2.cvtColor(b_align_crop,cv2.COLOR_BGR2RGB))[None,...].cuda()
  89. b_align_crop_tenor_arcnorm = spNorm(b_align_crop_tenor)
  90. b_align_crop_tenor_arcnorm_downsample = F.interpolate(b_align_crop_tenor_arcnorm, scale_factor=0.5)
  91. b_align_crop_id_nonorm = model.netArc(b_align_crop_tenor_arcnorm_downsample)
  92. id_compare_values.append([])
  93. for source_specific_id_nonorm_tmp in source_specific_id_nonorm_list:
  94. id_compare_values[-1].append(mse(b_align_crop_id_nonorm,source_specific_id_nonorm_tmp).detach().cpu().numpy())
  95. b_align_crop_tenor_list.append(b_align_crop_tenor)
  96. id_compare_values_array = np.array(id_compare_values).transpose(1,0)
  97. min_indexs = np.argmin(id_compare_values_array,axis=0)
  98. min_value = np.min(id_compare_values_array,axis=0)
  99. swap_result_list = []
  100. swap_result_matrix_list = []
  101. swap_result_ori_pic_list = []
  102. for tmp_index, min_index in enumerate(min_indexs):
  103. if min_value[tmp_index] < opt.id_thres:
  104. swap_result = model(None, b_align_crop_tenor_list[tmp_index], target_id_norm_list[min_index], None, True)[0]
  105. swap_result_list.append(swap_result)
  106. swap_result_matrix_list.append(b_mat_list[tmp_index])
  107. swap_result_ori_pic_list.append(b_align_crop_tenor_list[tmp_index])
  108. else:
  109. pass
  110. if len(swap_result_list) !=0:
  111. if opt.use_mask:
  112. n_classes = 19
  113. net = BiSeNet(n_classes=n_classes)
  114. net.cuda()
  115. save_pth = os.path.join('./parsing_model/checkpoint', '79999_iter.pth')
  116. net.load_state_dict(torch.load(save_pth))
  117. net.eval()
  118. else:
  119. net =None
  120. reverse2wholeimage(swap_result_ori_pic_list, swap_result_list, swap_result_matrix_list, crop_size, img_b_whole, logoclass,\
  121. os.path.join(opt.output_path, 'result_whole_swap_multispecific.jpg'), opt.no_simswaplogo,pasring_model =net,use_mask=opt.use_mask, norm = spNorm)
  122. print(' ')
  123. print('************ Done ! ************')
  124. else:
  125. print('The people you specified are not found on the picture: {}'.format(pic_b))