test_video_swapspecific.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384
  1. import cv2
  2. import torch
  3. import fractions
  4. import numpy as np
  5. from PIL import Image
  6. import torch.nn.functional as F
  7. from torchvision import transforms
  8. from models.models import create_model
  9. from options.test_options import TestOptions
  10. from insightface_func.face_detect_crop_multi import Face_detect_crop
  11. from util.videoswap_specific import video_swap
  12. import os
  13. def lcm(a, b): return abs(a * b) / fractions.gcd(a, b) if a and b else 0
  14. transformer = transforms.Compose([
  15. transforms.ToTensor(),
  16. #transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
  17. ])
  18. transformer_Arcface = transforms.Compose([
  19. transforms.ToTensor(),
  20. transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
  21. ])
  22. # detransformer = transforms.Compose([
  23. # transforms.Normalize([0, 0, 0], [1/0.229, 1/0.224, 1/0.225]),
  24. # transforms.Normalize([-0.485, -0.456, -0.406], [1, 1, 1])
  25. # ])
  26. if __name__ == '__main__':
  27. opt = TestOptions().parse()
  28. pic_specific = opt.pic_specific_path
  29. start_epoch, epoch_iter = 1, 0
  30. crop_size = 224
  31. torch.nn.Module.dump_patches = True
  32. model = create_model(opt)
  33. model.eval()
  34. app = Face_detect_crop(name='antelope', root='./insightface_func/models')
  35. app.prepare(ctx_id= 0, det_thresh=0.6, det_size=(640,640))
  36. with torch.no_grad():
  37. pic_a = opt.pic_a_path
  38. # img_a = Image.open(pic_a).convert('RGB')
  39. img_a_whole = cv2.imread(pic_a)
  40. img_a_align_crop, _ = app.get(img_a_whole,crop_size)
  41. img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB))
  42. img_a = transformer_Arcface(img_a_align_crop_pil)
  43. img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
  44. # pic_b = opt.pic_b_path
  45. # img_b_whole = cv2.imread(pic_b)
  46. # img_b_align_crop, b_mat = app.get(img_b_whole,crop_size)
  47. # img_b_align_crop_pil = Image.fromarray(cv2.cvtColor(img_b_align_crop,cv2.COLOR_BGR2RGB))
  48. # img_b = transformer(img_b_align_crop_pil)
  49. # img_att = img_b.view(-1, img_b.shape[0], img_b.shape[1], img_b.shape[2])
  50. # convert numpy to tensor
  51. img_id = img_id.cuda()
  52. # img_att = img_att.cuda()
  53. #create latent id
  54. img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
  55. latend_id = model.netArc(img_id_downsample)
  56. latend_id = F.normalize(latend_id, p=2, dim=1)
  57. # The specific person to be swapped
  58. specific_person_whole = cv2.imread(pic_specific)
  59. specific_person_align_crop, _ = app.get(specific_person_whole,crop_size)
  60. specific_person_align_crop_pil = Image.fromarray(cv2.cvtColor(specific_person_align_crop[0],cv2.COLOR_BGR2RGB))
  61. specific_person = transformer_Arcface(specific_person_align_crop_pil)
  62. specific_person = specific_person.view(-1, specific_person.shape[0], specific_person.shape[1], specific_person.shape[2])
  63. specific_person = specific_person.cuda()
  64. specific_person_downsample = F.interpolate(specific_person, scale_factor=0.5)
  65. specific_person_id_nonorm = model.netArc(specific_person_downsample)
  66. video_swap(opt.video_path, latend_id,specific_person_id_nonorm, opt.id_thres, \
  67. model, app, opt.output_path,temp_results_dir=opt.temp_path,no_simswaplogo=opt.no_simswaplogo,use_mask=opt.use_mask)