test_video_swapsingle.py 2.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273
  1. import cv2
  2. import torch
  3. import fractions
  4. import numpy as np
  5. from PIL import Image
  6. import torch.nn.functional as F
  7. from torchvision import transforms
  8. from models.models import create_model
  9. from options.test_options import TestOptions
  10. from insightface_func.face_detect_crop_single import Face_detect_crop
  11. from util.videoswap import video_swap
  12. import os
  13. def lcm(a, b): return abs(a * b) / fractions.gcd(a, b) if a and b else 0
  14. transformer = transforms.Compose([
  15. transforms.ToTensor(),
  16. #transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
  17. ])
  18. transformer_Arcface = transforms.Compose([
  19. transforms.ToTensor(),
  20. transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
  21. ])
  22. # detransformer = transforms.Compose([
  23. # transforms.Normalize([0, 0, 0], [1/0.229, 1/0.224, 1/0.225]),
  24. # transforms.Normalize([-0.485, -0.456, -0.406], [1, 1, 1])
  25. # ])
  26. if __name__ == '__main__':
  27. opt = TestOptions().parse()
  28. start_epoch, epoch_iter = 1, 0
  29. crop_size = 224
  30. print(type(opt))
  31. torch.nn.Module.dump_patches = True
  32. model = create_model(opt)
  33. model.eval()
  34. app = Face_detect_crop(name='antelope', root='./insightface_func/models')
  35. app.prepare(ctx_id= 0, det_thresh=0.6, det_size=(640,640))
  36. with torch.no_grad():
  37. pic_a = opt.pic_a_path
  38. # img_a = Image.open(pic_a).convert('RGB')
  39. img_a_whole = cv2.imread(pic_a)
  40. img_a_align_crop, _ = app.get(img_a_whole,crop_size)
  41. img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB))
  42. img_a = transformer_Arcface(img_a_align_crop_pil)
  43. img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
  44. # pic_b = opt.pic_b_path
  45. # img_b_whole = cv2.imread(pic_b)
  46. # img_b_align_crop, b_mat = app.get(img_b_whole,crop_size)
  47. # img_b_align_crop_pil = Image.fromarray(cv2.cvtColor(img_b_align_crop,cv2.COLOR_BGR2RGB))
  48. # img_b = transformer(img_b_align_crop_pil)
  49. # img_att = img_b.view(-1, img_b.shape[0], img_b.shape[1], img_b.shape[2])
  50. # convert numpy to tensor
  51. img_id = img_id.cuda()
  52. # img_att = img_att.cuda()
  53. #create latent id
  54. img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
  55. latend_id = model.netArc(img_id_downsample)
  56. latend_id = F.normalize(latend_id, p=2, dim=1)
  57. video_swap(opt.video_path, latend_id, model, app, opt.output_path,temp_results_dir=opt.temp_path,\
  58. no_simswaplogo=opt.no_simswaplogo,use_mask=opt.use_mask)