test_video_swap_multispecific.py 3.8 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. import cv2
  2. import torch
  3. import fractions
  4. import numpy as np
  5. from PIL import Image
  6. import torch.nn.functional as F
  7. from torchvision import transforms
  8. from models.models import create_model
  9. from options.test_options import TestOptions
  10. from insightface_func.face_detect_crop_multi import Face_detect_crop
  11. from util.videoswap_multispecific import video_swap
  12. import os
  13. import glob
  14. def lcm(a, b): return abs(a * b) / fractions.gcd(a, b) if a and b else 0
  15. transformer = transforms.Compose([
  16. transforms.ToTensor(),
  17. #transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
  18. ])
  19. transformer_Arcface = transforms.Compose([
  20. transforms.ToTensor(),
  21. transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
  22. ])
  23. # detransformer = transforms.Compose([
  24. # transforms.Normalize([0, 0, 0], [1/0.229, 1/0.224, 1/0.225]),
  25. # transforms.Normalize([-0.485, -0.456, -0.406], [1, 1, 1])
  26. # ])
  27. if __name__ == '__main__':
  28. opt = TestOptions().parse()
  29. pic_specific = opt.pic_specific_path
  30. start_epoch, epoch_iter = 1, 0
  31. crop_size = 224
  32. multisepcific_dir = opt.multisepcific_dir
  33. torch.nn.Module.dump_patches = True
  34. model = create_model(opt)
  35. model.eval()
  36. app = Face_detect_crop(name='antelope', root='./insightface_func/models')
  37. app.prepare(ctx_id= 0, det_thresh=0.6, det_size=(640,640))
  38. # The specific person to be swapped(source)
  39. source_specific_id_nonorm_list = []
  40. source_path = os.path.join(multisepcific_dir,'SRC_*')
  41. source_specific_images_path = sorted(glob.glob(source_path))
  42. with torch.no_grad():
  43. for source_specific_image_path in source_specific_images_path:
  44. specific_person_whole = cv2.imread(source_specific_image_path)
  45. specific_person_align_crop, _ = app.get(specific_person_whole,crop_size)
  46. specific_person_align_crop_pil = Image.fromarray(cv2.cvtColor(specific_person_align_crop[0],cv2.COLOR_BGR2RGB))
  47. specific_person = transformer_Arcface(specific_person_align_crop_pil)
  48. specific_person = specific_person.view(-1, specific_person.shape[0], specific_person.shape[1], specific_person.shape[2])
  49. # convert numpy to tensor
  50. specific_person = specific_person.cuda()
  51. #create latent id
  52. specific_person_downsample = F.interpolate(specific_person, scale_factor=0.5)
  53. specific_person_id_nonorm = model.netArc(specific_person_downsample)
  54. source_specific_id_nonorm_list.append(specific_person_id_nonorm.clone())
  55. # The person who provides id information (list)
  56. target_id_norm_list = []
  57. target_path = os.path.join(multisepcific_dir,'DST_*')
  58. target_images_path = sorted(glob.glob(target_path))
  59. for target_image_path in target_images_path:
  60. img_a_whole = cv2.imread(target_image_path)
  61. img_a_align_crop, _ = app.get(img_a_whole,crop_size)
  62. img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB))
  63. img_a = transformer_Arcface(img_a_align_crop_pil)
  64. img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
  65. # convert numpy to tensor
  66. img_id = img_id.cuda()
  67. #create latent id
  68. img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
  69. latend_id = model.netArc(img_id_downsample)
  70. latend_id = F.normalize(latend_id, p=2, dim=1)
  71. target_id_norm_list.append(latend_id.clone())
  72. assert len(target_id_norm_list) == len(source_specific_id_nonorm_list), "The number of images in source and target directory must be same !!!"
  73. video_swap(opt.video_path, target_id_norm_list,source_specific_id_nonorm_list, opt.id_thres, \
  74. model, app, opt.output_path,temp_results_dir=opt.temp_path,no_simswaplogo=opt.no_simswaplogo,use_mask=opt.use_mask)