reverse2original.py 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. import cv2
  2. import numpy as np
  3. # import time
  4. import torch
  5. from torch.nn import functional as F
  6. import torch.nn as nn
  7. def encode_segmentation_rgb(segmentation, no_neck=True):
  8. parse = segmentation
  9. face_part_ids = [1, 2, 3, 4, 5, 6, 10, 12, 13] if no_neck else [1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 13, 14]
  10. mouth_id = 11
  11. hair_id = 17
  12. face_map = np.zeros([parse.shape[0], parse.shape[1]])
  13. mouth_map = np.zeros([parse.shape[0], parse.shape[1]])
  14. hair_map = np.zeros([parse.shape[0], parse.shape[1]])
  15. for valid_id in face_part_ids:
  16. valid_index = np.where(parse==valid_id)
  17. face_map[valid_index] = 255
  18. valid_index = np.where(parse==mouth_id)
  19. mouth_map[valid_index] = 255
  20. valid_index = np.where(parse==hair_id)
  21. hair_map[valid_index] = 255
  22. return np.stack([face_map, mouth_map, hair_map], axis=2)
  23. class SoftErosion(nn.Module):
  24. def __init__(self, kernel_size=15, threshold=0.6, iterations=1):
  25. super(SoftErosion, self).__init__()
  26. r = kernel_size // 2
  27. self.padding = r
  28. self.iterations = iterations
  29. self.threshold = threshold
  30. # Create kernel
  31. y_indices, x_indices = torch.meshgrid(torch.arange(0., kernel_size), torch.arange(0., kernel_size))
  32. dist = torch.sqrt((x_indices - r) ** 2 + (y_indices - r) ** 2)
  33. kernel = dist.max() - dist
  34. kernel /= kernel.sum()
  35. kernel = kernel.view(1, 1, *kernel.shape)
  36. self.register_buffer('weight', kernel)
  37. def forward(self, x):
  38. x = x.float()
  39. for i in range(self.iterations - 1):
  40. x = torch.min(x, F.conv2d(x, weight=self.weight, groups=x.shape[1], padding=self.padding))
  41. x = F.conv2d(x, weight=self.weight, groups=x.shape[1], padding=self.padding)
  42. mask = x >= self.threshold
  43. x[mask] = 1.0
  44. x[~mask] /= x[~mask].max()
  45. return x, mask
  46. def postprocess(swapped_face, target, target_mask,smooth_mask):
  47. # target_mask = cv2.resize(target_mask, (self.size, self.size))
  48. mask_tensor = torch.from_numpy(target_mask.copy().transpose((2, 0, 1))).float().mul_(1/255.0).cuda()
  49. face_mask_tensor = mask_tensor[0] + mask_tensor[1]
  50. soft_face_mask_tensor, _ = smooth_mask(face_mask_tensor.unsqueeze_(0).unsqueeze_(0))
  51. soft_face_mask_tensor.squeeze_()
  52. soft_face_mask = soft_face_mask_tensor.cpu().numpy()
  53. soft_face_mask = soft_face_mask[:, :, np.newaxis]
  54. result = swapped_face * soft_face_mask + target * (1 - soft_face_mask)
  55. result = result[:,:,::-1]# .astype(np.uint8)
  56. return result
  57. def reverse2wholeimage(b_align_crop_tenor_list,swaped_imgs, mats, crop_size, oriimg, logoclass, save_path = '', \
  58. no_simswaplogo = False,pasring_model =None,norm = None, use_mask = False):
  59. target_image_list = []
  60. img_mask_list = []
  61. if use_mask:
  62. smooth_mask = SoftErosion(kernel_size=17, threshold=0.9, iterations=7).cuda()
  63. else:
  64. pass
  65. # print(len(swaped_imgs))
  66. # print(mats)
  67. # print(len(b_align_crop_tenor_list))
  68. for swaped_img, mat ,source_img in zip(swaped_imgs, mats,b_align_crop_tenor_list):
  69. swaped_img = swaped_img.cpu().detach().numpy().transpose((1, 2, 0))
  70. img_white = np.full((crop_size,crop_size), 255, dtype=float)
  71. # inverse the Affine transformation matrix
  72. mat_rev = np.zeros([2,3])
  73. div1 = mat[0][0]*mat[1][1]-mat[0][1]*mat[1][0]
  74. mat_rev[0][0] = mat[1][1]/div1
  75. mat_rev[0][1] = -mat[0][1]/div1
  76. mat_rev[0][2] = -(mat[0][2]*mat[1][1]-mat[0][1]*mat[1][2])/div1
  77. div2 = mat[0][1]*mat[1][0]-mat[0][0]*mat[1][1]
  78. mat_rev[1][0] = mat[1][0]/div2
  79. mat_rev[1][1] = -mat[0][0]/div2
  80. mat_rev[1][2] = -(mat[0][2]*mat[1][0]-mat[0][0]*mat[1][2])/div2
  81. orisize = (oriimg.shape[1], oriimg.shape[0])
  82. if use_mask:
  83. source_img_norm = norm(source_img)
  84. source_img_512 = F.interpolate(source_img_norm,size=(512,512))
  85. out = pasring_model(source_img_512)[0]
  86. parsing = out.squeeze(0).detach().cpu().numpy().argmax(0)
  87. vis_parsing_anno = parsing.copy().astype(np.uint8)
  88. tgt_mask = encode_segmentation_rgb(vis_parsing_anno)
  89. # face_mask_tensor = tgt_mask[...,0] + tgt_mask[...,1]
  90. target_mask = cv2.resize(tgt_mask, (224, 224))
  91. # print(source_img)
  92. target_image_parsing = postprocess(swaped_img, source_img[0].cpu().detach().numpy().transpose((1, 2, 0)), target_mask,smooth_mask)
  93. target_image_parsing = cv2.warpAffine(target_image_parsing, mat_rev, orisize)
  94. # target_image_parsing = cv2.warpAffine(swaped_img, mat_rev, orisize)
  95. else:
  96. target_image = cv2.warpAffine(swaped_img, mat_rev, orisize)
  97. # source_image = cv2.warpAffine(source_img, mat_rev, orisize)
  98. img_white = cv2.warpAffine(img_white, mat_rev, orisize)
  99. img_white[img_white>20] =255
  100. img_mask = img_white
  101. if use_mask:
  102. kernel = np.ones((10,10),np.uint8)
  103. img_mask = cv2.erode(img_mask,kernel,iterations = 1)
  104. else:
  105. kernel = np.ones((40,40),np.uint8)
  106. img_mask = cv2.erode(img_mask,kernel,iterations = 1)
  107. kernel_size = (20, 20)
  108. blur_size = tuple(2*i+1 for i in kernel_size)
  109. img_mask = cv2.GaussianBlur(img_mask, blur_size, 0)
  110. # kernel = np.ones((10,10),np.uint8)
  111. # img_mask = cv2.erode(img_mask,kernel,iterations = 1)
  112. img_mask /= 255
  113. img_mask = np.reshape(img_mask, [img_mask.shape[0],img_mask.shape[1],1])
  114. # pasing mask
  115. # target_image_parsing = postprocess(target_image, source_image, tgt_mask)
  116. if use_mask:
  117. target_image = np.array(target_image_parsing, dtype=np.float) * 255
  118. else:
  119. target_image = np.array(target_image, dtype=np.float)[..., ::-1] * 255
  120. img_mask_list.append(img_mask)
  121. target_image_list.append(target_image)
  122. # target_image /= 255
  123. # target_image = 0
  124. img = np.array(oriimg, dtype=np.float)
  125. for img_mask, target_image in zip(img_mask_list, target_image_list):
  126. img = img_mask * target_image + (1-img_mask) * img
  127. final_img = img.astype(np.uint8)
  128. if not no_simswaplogo:
  129. final_img = logoclass.apply_frames(final_img)
  130. cv2.imwrite(save_path, final_img)