浏览代码

Added the option for using mask

NNNNAI 3 年之前
父节点
当前提交
5e1e0dd90e

+ 40 - 40
test_video_swap_multispecific.py

@@ -51,44 +51,44 @@ if __name__ == '__main__':
     source_specific_id_nonorm_list = []
     source_path = os.path.join(multisepcific_dir,'SRC_*')
     source_specific_images_path = sorted(glob.glob(source_path))
-
-    for source_specific_image_path in source_specific_images_path:
-        specific_person_whole = cv2.imread(source_specific_image_path)
-        specific_person_align_crop, _ = app.get(specific_person_whole,crop_size)
-        specific_person_align_crop_pil = Image.fromarray(cv2.cvtColor(specific_person_align_crop[0],cv2.COLOR_BGR2RGB)) 
-        specific_person = transformer_Arcface(specific_person_align_crop_pil)
-        specific_person = specific_person.view(-1, specific_person.shape[0], specific_person.shape[1], specific_person.shape[2])
-        # convert numpy to tensor
-        specific_person = specific_person.cuda()
-        #create latent id
-        specific_person_downsample = F.interpolate(specific_person, scale_factor=0.5)
-        specific_person_id_nonorm = model.netArc(specific_person_downsample)
-        source_specific_id_nonorm_list.append(specific_person_id_nonorm.clone())
-
-
-    # The person who provides id information (list)
-    target_id_norm_list = []
-    target_path = os.path.join(multisepcific_dir,'DST_*')
-    target_images_path = sorted(glob.glob(target_path))
-
-    for target_image_path in target_images_path:
-        img_a_whole = cv2.imread(target_image_path)
-        img_a_align_crop, _ = app.get(img_a_whole,crop_size)
-        img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB)) 
-        img_a = transformer_Arcface(img_a_align_crop_pil)
-        img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
-        # convert numpy to tensor
-        img_id = img_id.cuda()
-        #create latent id
-        img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
-        latend_id = model.netArc(img_id_downsample)
-        latend_id = F.normalize(latend_id, p=2, dim=1)
-        target_id_norm_list.append(latend_id.clone())
-
-    assert len(target_id_norm_list) == len(source_specific_id_nonorm_list), "The number of images in source and target directory must be same !!!"
-
-
-
-    video_swap(opt.video_path, target_id_norm_list,source_specific_id_nonorm_list, opt.id_thres, \
-        model, app, opt.output_path,temp_results_dir=opt.temp_path,no_simswaplogo=opt.no_simswaplogo)
+    with torch.no_grad():
+        for source_specific_image_path in source_specific_images_path:
+            specific_person_whole = cv2.imread(source_specific_image_path)
+            specific_person_align_crop, _ = app.get(specific_person_whole,crop_size)
+            specific_person_align_crop_pil = Image.fromarray(cv2.cvtColor(specific_person_align_crop[0],cv2.COLOR_BGR2RGB)) 
+            specific_person = transformer_Arcface(specific_person_align_crop_pil)
+            specific_person = specific_person.view(-1, specific_person.shape[0], specific_person.shape[1], specific_person.shape[2])
+            # convert numpy to tensor
+            specific_person = specific_person.cuda()
+            #create latent id
+            specific_person_downsample = F.interpolate(specific_person, scale_factor=0.5)
+            specific_person_id_nonorm = model.netArc(specific_person_downsample)
+            source_specific_id_nonorm_list.append(specific_person_id_nonorm.clone())
+
+
+        # The person who provides id information (list)
+        target_id_norm_list = []
+        target_path = os.path.join(multisepcific_dir,'DST_*')
+        target_images_path = sorted(glob.glob(target_path))
+
+        for target_image_path in target_images_path:
+            img_a_whole = cv2.imread(target_image_path)
+            img_a_align_crop, _ = app.get(img_a_whole,crop_size)
+            img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB)) 
+            img_a = transformer_Arcface(img_a_align_crop_pil)
+            img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
+            # convert numpy to tensor
+            img_id = img_id.cuda()
+            #create latent id
+            img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
+            latend_id = model.netArc(img_id_downsample)
+            latend_id = F.normalize(latend_id, p=2, dim=1)
+            target_id_norm_list.append(latend_id.clone())
+
+        assert len(target_id_norm_list) == len(source_specific_id_nonorm_list), "The number of images in source and target directory must be same !!!"
+
+
+
+        video_swap(opt.video_path, target_id_norm_list,source_specific_id_nonorm_list, opt.id_thres, \
+            model, app, opt.output_path,temp_results_dir=opt.temp_path,no_simswaplogo=opt.no_simswaplogo,use_mask=opt.use_mask)
 

+ 27 - 25
test_video_swapmulti.py

@@ -44,29 +44,31 @@ if __name__ == '__main__':
     app = Face_detect_crop(name='antelope', root='./insightface_func/models')
     app.prepare(ctx_id= 0, det_thresh=0.6, det_size=(640,640))
 
-    pic_a = opt.pic_a_path
-    # img_a = Image.open(pic_a).convert('RGB')
-    img_a_whole = cv2.imread(pic_a)
-    img_a_align_crop, _ = app.get(img_a_whole,crop_size)
-    img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB)) 
-    img_a = transformer_Arcface(img_a_align_crop_pil)
-    img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
-
-    # pic_b = opt.pic_b_path
-    # img_b_whole = cv2.imread(pic_b)
-    # img_b_align_crop, b_mat = app.get(img_b_whole,crop_size)
-    # img_b_align_crop_pil = Image.fromarray(cv2.cvtColor(img_b_align_crop,cv2.COLOR_BGR2RGB)) 
-    # img_b = transformer(img_b_align_crop_pil)
-    # img_att = img_b.view(-1, img_b.shape[0], img_b.shape[1], img_b.shape[2])
-
-    # convert numpy to tensor
-    img_id = img_id.cuda()
-    # img_att = img_att.cuda()
-
-    #create latent id
-    img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
-    latend_id = model.netArc(img_id_downsample)
-    latend_id = F.normalize(latend_id, p=2, dim=1)
-
-    video_swap(opt.video_path, latend_id, model, app, opt.output_path,temp_results_dir=opt.temp_path,no_simswaplogo=opt.no_simswaplogo)
+    with torch.no_grad():
+        pic_a = opt.pic_a_path
+        # img_a = Image.open(pic_a).convert('RGB')
+        img_a_whole = cv2.imread(pic_a)
+        img_a_align_crop, _ = app.get(img_a_whole,crop_size)
+        img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB)) 
+        img_a = transformer_Arcface(img_a_align_crop_pil)
+        img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
+
+        # pic_b = opt.pic_b_path
+        # img_b_whole = cv2.imread(pic_b)
+        # img_b_align_crop, b_mat = app.get(img_b_whole,crop_size)
+        # img_b_align_crop_pil = Image.fromarray(cv2.cvtColor(img_b_align_crop,cv2.COLOR_BGR2RGB)) 
+        # img_b = transformer(img_b_align_crop_pil)
+        # img_att = img_b.view(-1, img_b.shape[0], img_b.shape[1], img_b.shape[2])
+
+        # convert numpy to tensor
+        img_id = img_id.cuda()
+        # img_att = img_att.cuda()
+
+        #create latent id
+        img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
+        latend_id = model.netArc(img_id_downsample)
+        latend_id = F.normalize(latend_id, p=2, dim=1)
+
+        video_swap(opt.video_path, latend_id, model, app, opt.output_path,temp_results_dir=opt.temp_path,\
+            no_simswaplogo=opt.no_simswaplogo,use_mask=opt.use_mask)
 

+ 27 - 26
test_video_swapsingle.py

@@ -43,30 +43,31 @@ if __name__ == '__main__':
 
     app = Face_detect_crop(name='antelope', root='./insightface_func/models')
     app.prepare(ctx_id= 0, det_thresh=0.6, det_size=(640,640))
-
-    pic_a = opt.pic_a_path
-    # img_a = Image.open(pic_a).convert('RGB')
-    img_a_whole = cv2.imread(pic_a)
-    img_a_align_crop, _ = app.get(img_a_whole,crop_size)
-    img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB)) 
-    img_a = transformer_Arcface(img_a_align_crop_pil)
-    img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
-
-    # pic_b = opt.pic_b_path
-    # img_b_whole = cv2.imread(pic_b)
-    # img_b_align_crop, b_mat = app.get(img_b_whole,crop_size)
-    # img_b_align_crop_pil = Image.fromarray(cv2.cvtColor(img_b_align_crop,cv2.COLOR_BGR2RGB)) 
-    # img_b = transformer(img_b_align_crop_pil)
-    # img_att = img_b.view(-1, img_b.shape[0], img_b.shape[1], img_b.shape[2])
-
-    # convert numpy to tensor
-    img_id = img_id.cuda()
-    # img_att = img_att.cuda()
-
-    #create latent id
-    img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
-    latend_id = model.netArc(img_id_downsample)
-    latend_id = F.normalize(latend_id, p=2, dim=1)
-
-    video_swap(opt.video_path, latend_id, model, app, opt.output_path,temp_results_dir=opt.temp_path,no_simswaplogo=opt.no_simswaplogo)
+    with torch.no_grad():
+        pic_a = opt.pic_a_path
+        # img_a = Image.open(pic_a).convert('RGB')
+        img_a_whole = cv2.imread(pic_a)
+        img_a_align_crop, _ = app.get(img_a_whole,crop_size)
+        img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB)) 
+        img_a = transformer_Arcface(img_a_align_crop_pil)
+        img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
+
+        # pic_b = opt.pic_b_path
+        # img_b_whole = cv2.imread(pic_b)
+        # img_b_align_crop, b_mat = app.get(img_b_whole,crop_size)
+        # img_b_align_crop_pil = Image.fromarray(cv2.cvtColor(img_b_align_crop,cv2.COLOR_BGR2RGB)) 
+        # img_b = transformer(img_b_align_crop_pil)
+        # img_att = img_b.view(-1, img_b.shape[0], img_b.shape[1], img_b.shape[2])
+
+        # convert numpy to tensor
+        img_id = img_id.cuda()
+        # img_att = img_att.cuda()
+
+        #create latent id
+        img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
+        latend_id = model.netArc(img_id_downsample)
+        latend_id = F.normalize(latend_id, p=2, dim=1)
+
+        video_swap(opt.video_path, latend_id, model, app, opt.output_path,temp_results_dir=opt.temp_path,\
+            no_simswaplogo=opt.no_simswaplogo,use_mask=opt.use_mask)
 

+ 38 - 38
test_video_swapspecific.py

@@ -43,42 +43,42 @@ if __name__ == '__main__':
 
     app = Face_detect_crop(name='antelope', root='./insightface_func/models')
     app.prepare(ctx_id= 0, det_thresh=0.6, det_size=(640,640))
-
-    pic_a = opt.pic_a_path
-    # img_a = Image.open(pic_a).convert('RGB')
-    img_a_whole = cv2.imread(pic_a)
-    img_a_align_crop, _ = app.get(img_a_whole,crop_size)
-    img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB)) 
-    img_a = transformer_Arcface(img_a_align_crop_pil)
-    img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
-
-    # pic_b = opt.pic_b_path
-    # img_b_whole = cv2.imread(pic_b)
-    # img_b_align_crop, b_mat = app.get(img_b_whole,crop_size)
-    # img_b_align_crop_pil = Image.fromarray(cv2.cvtColor(img_b_align_crop,cv2.COLOR_BGR2RGB)) 
-    # img_b = transformer(img_b_align_crop_pil)
-    # img_att = img_b.view(-1, img_b.shape[0], img_b.shape[1], img_b.shape[2])
-
-    # convert numpy to tensor
-    img_id = img_id.cuda()
-    # img_att = img_att.cuda()
-
-    #create latent id
-    img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
-    latend_id = model.netArc(img_id_downsample)
-    latend_id = F.normalize(latend_id, p=2, dim=1)
-
-
-    # The specific person to be swapped
-    specific_person_whole = cv2.imread(pic_specific)
-    specific_person_align_crop, _ = app.get(specific_person_whole,crop_size)
-    specific_person_align_crop_pil = Image.fromarray(cv2.cvtColor(specific_person_align_crop[0],cv2.COLOR_BGR2RGB)) 
-    specific_person = transformer_Arcface(specific_person_align_crop_pil)
-    specific_person = specific_person.view(-1, specific_person.shape[0], specific_person.shape[1], specific_person.shape[2])
-    specific_person = specific_person.cuda()
-    specific_person_downsample = F.interpolate(specific_person, scale_factor=0.5)
-    specific_person_id_nonorm = model.netArc(specific_person_downsample)
-
-    video_swap(opt.video_path, latend_id,specific_person_id_nonorm, opt.id_thres, \
-        model, app, opt.output_path,temp_results_dir=opt.temp_path,no_simswaplogo=opt.no_simswaplogo)
+    with torch.no_grad():
+        pic_a = opt.pic_a_path
+        # img_a = Image.open(pic_a).convert('RGB')
+        img_a_whole = cv2.imread(pic_a)
+        img_a_align_crop, _ = app.get(img_a_whole,crop_size)
+        img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB)) 
+        img_a = transformer_Arcface(img_a_align_crop_pil)
+        img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
+
+        # pic_b = opt.pic_b_path
+        # img_b_whole = cv2.imread(pic_b)
+        # img_b_align_crop, b_mat = app.get(img_b_whole,crop_size)
+        # img_b_align_crop_pil = Image.fromarray(cv2.cvtColor(img_b_align_crop,cv2.COLOR_BGR2RGB)) 
+        # img_b = transformer(img_b_align_crop_pil)
+        # img_att = img_b.view(-1, img_b.shape[0], img_b.shape[1], img_b.shape[2])
+
+        # convert numpy to tensor
+        img_id = img_id.cuda()
+        # img_att = img_att.cuda()
+
+        #create latent id
+        img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
+        latend_id = model.netArc(img_id_downsample)
+        latend_id = F.normalize(latend_id, p=2, dim=1)
+
+
+        # The specific person to be swapped
+        specific_person_whole = cv2.imread(pic_specific)
+        specific_person_align_crop, _ = app.get(specific_person_whole,crop_size)
+        specific_person_align_crop_pil = Image.fromarray(cv2.cvtColor(specific_person_align_crop[0],cv2.COLOR_BGR2RGB)) 
+        specific_person = transformer_Arcface(specific_person_align_crop_pil)
+        specific_person = specific_person.view(-1, specific_person.shape[0], specific_person.shape[1], specific_person.shape[2])
+        specific_person = specific_person.cuda()
+        specific_person_downsample = F.interpolate(specific_person, scale_factor=0.5)
+        specific_person_id_nonorm = model.netArc(specific_person_downsample)
+
+        video_swap(opt.video_path, latend_id,specific_person_id_nonorm, opt.id_thres, \
+            model, app, opt.output_path,temp_results_dir=opt.temp_path,no_simswaplogo=opt.no_simswaplogo,use_mask=opt.use_mask)
 

+ 104 - 89
test_wholeimage_swap_multispecific.py

@@ -15,6 +15,7 @@ from util.add_watermark import watermark_image
 import torch.nn as nn
 from util.norm import SpecificNorm
 import glob
+from parsing_model.model import BiSeNet
 
 def lcm(a, b): return abs(a * b) / fractions.gcd(a, b) if a and b else 0
 
@@ -53,93 +54,107 @@ if __name__ == '__main__':
     app = Face_detect_crop(name='antelope', root='./insightface_func/models')
     app.prepare(ctx_id= 0, det_thresh=0.6, det_size=(640,640))
 
-    # The specific person to be swapped(source)
-
-    source_specific_id_nonorm_list = []
-    source_path = os.path.join(multisepcific_dir,'SRC_*')
-    source_specific_images_path = sorted(glob.glob(source_path))
-
-    for source_specific_image_path in source_specific_images_path:
-        specific_person_whole = cv2.imread(source_specific_image_path)
-        specific_person_align_crop, _ = app.get(specific_person_whole,crop_size)
-        specific_person_align_crop_pil = Image.fromarray(cv2.cvtColor(specific_person_align_crop[0],cv2.COLOR_BGR2RGB)) 
-        specific_person = transformer_Arcface(specific_person_align_crop_pil)
-        specific_person = specific_person.view(-1, specific_person.shape[0], specific_person.shape[1], specific_person.shape[2])
-        # convert numpy to tensor
-        specific_person = specific_person.cuda()
-        #create latent id
-        specific_person_downsample = F.interpolate(specific_person, scale_factor=0.5)
-        specific_person_id_nonorm = model.netArc(specific_person_downsample)
-        source_specific_id_nonorm_list.append(specific_person_id_nonorm.clone())
-
-
-    # The person who provides id information (list)
-    target_id_norm_list = []
-    target_path = os.path.join(multisepcific_dir,'DST_*')
-    target_images_path = sorted(glob.glob(target_path))
-
-    for target_image_path in target_images_path:
-        img_a_whole = cv2.imread(target_image_path)
-        img_a_align_crop, _ = app.get(img_a_whole,crop_size)
-        img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB)) 
-        img_a = transformer_Arcface(img_a_align_crop_pil)
-        img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
-        # convert numpy to tensor
-        img_id = img_id.cuda()
-        #create latent id
-        img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
-        latend_id = model.netArc(img_id_downsample)
-        latend_id = F.normalize(latend_id, p=2, dim=1)
-        target_id_norm_list.append(latend_id.clone())
-
-    assert len(target_id_norm_list) == len(source_specific_id_nonorm_list), "The number of images in source and target directory must be same !!!"
-
-    ############## Forward Pass ######################
-
-    pic_b = opt.pic_b_path
-    img_b_whole = cv2.imread(pic_b)
-
-    img_b_align_crop_list, b_mat_list = app.get(img_b_whole,crop_size)
-    # detect_results = None
-    swap_result_list = []
-
-    id_compare_values = [] 
-    b_align_crop_tenor_list = []
-    for b_align_crop in img_b_align_crop_list:
-
-        b_align_crop_tenor = _totensor(cv2.cvtColor(b_align_crop,cv2.COLOR_BGR2RGB))[None,...].cuda()
-
-        b_align_crop_tenor_arcnorm = spNorm(b_align_crop_tenor)
-        b_align_crop_tenor_arcnorm_downsample = F.interpolate(b_align_crop_tenor_arcnorm, scale_factor=0.5)
-        b_align_crop_id_nonorm = model.netArc(b_align_crop_tenor_arcnorm_downsample)
-
-        id_compare_values.append([])
-        for source_specific_id_nonorm_tmp in source_specific_id_nonorm_list:
-            id_compare_values[-1].append(mse(b_align_crop_id_nonorm,source_specific_id_nonorm_tmp).detach().cpu().numpy())
-        b_align_crop_tenor_list.append(b_align_crop_tenor)
-
-    id_compare_values_array = np.array(id_compare_values).transpose(1,0)
-    min_indexs = np.argmin(id_compare_values_array,axis=0)
-    min_value = np.min(id_compare_values_array,axis=0)
-
-    swap_result_list = [] 
-    swap_result_matrix_list = []
-
-    for tmp_index, min_index in enumerate(min_indexs):
-        if min_value[tmp_index] < opt.id_thres:
-            swap_result = model(None, b_align_crop_tenor_list[tmp_index], target_id_norm_list[min_index], None, True)[0]
-            swap_result_list.append(swap_result)
-            swap_result_matrix_list.append(b_mat_list[tmp_index])
+    with torch.no_grad():
+        # The specific person to be swapped(source)
+
+        source_specific_id_nonorm_list = []
+        source_path = os.path.join(multisepcific_dir,'SRC_*')
+        source_specific_images_path = sorted(glob.glob(source_path))
+
+        for source_specific_image_path in source_specific_images_path:
+            specific_person_whole = cv2.imread(source_specific_image_path)
+            specific_person_align_crop, _ = app.get(specific_person_whole,crop_size)
+            specific_person_align_crop_pil = Image.fromarray(cv2.cvtColor(specific_person_align_crop[0],cv2.COLOR_BGR2RGB)) 
+            specific_person = transformer_Arcface(specific_person_align_crop_pil)
+            specific_person = specific_person.view(-1, specific_person.shape[0], specific_person.shape[1], specific_person.shape[2])
+            # convert numpy to tensor
+            specific_person = specific_person.cuda()
+            #create latent id
+            specific_person_downsample = F.interpolate(specific_person, scale_factor=0.5)
+            specific_person_id_nonorm = model.netArc(specific_person_downsample)
+            source_specific_id_nonorm_list.append(specific_person_id_nonorm.clone())
+
+
+        # The person who provides id information (list)
+        target_id_norm_list = []
+        target_path = os.path.join(multisepcific_dir,'DST_*')
+        target_images_path = sorted(glob.glob(target_path))
+
+        for target_image_path in target_images_path:
+            img_a_whole = cv2.imread(target_image_path)
+            img_a_align_crop, _ = app.get(img_a_whole,crop_size)
+            img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB)) 
+            img_a = transformer_Arcface(img_a_align_crop_pil)
+            img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
+            # convert numpy to tensor
+            img_id = img_id.cuda()
+            #create latent id
+            img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
+            latend_id = model.netArc(img_id_downsample)
+            latend_id = F.normalize(latend_id, p=2, dim=1)
+            target_id_norm_list.append(latend_id.clone())
+
+        assert len(target_id_norm_list) == len(source_specific_id_nonorm_list), "The number of images in source and target directory must be same !!!"
+
+        ############## Forward Pass ######################
+
+        pic_b = opt.pic_b_path
+        img_b_whole = cv2.imread(pic_b)
+
+        img_b_align_crop_list, b_mat_list = app.get(img_b_whole,crop_size)
+        # detect_results = None
+        swap_result_list = []
+
+        id_compare_values = [] 
+        b_align_crop_tenor_list = []
+        for b_align_crop in img_b_align_crop_list:
+
+            b_align_crop_tenor = _totensor(cv2.cvtColor(b_align_crop,cv2.COLOR_BGR2RGB))[None,...].cuda()
+
+            b_align_crop_tenor_arcnorm = spNorm(b_align_crop_tenor)
+            b_align_crop_tenor_arcnorm_downsample = F.interpolate(b_align_crop_tenor_arcnorm, scale_factor=0.5)
+            b_align_crop_id_nonorm = model.netArc(b_align_crop_tenor_arcnorm_downsample)
+
+            id_compare_values.append([])
+            for source_specific_id_nonorm_tmp in source_specific_id_nonorm_list:
+                id_compare_values[-1].append(mse(b_align_crop_id_nonorm,source_specific_id_nonorm_tmp).detach().cpu().numpy())
+            b_align_crop_tenor_list.append(b_align_crop_tenor)
+
+        id_compare_values_array = np.array(id_compare_values).transpose(1,0)
+        min_indexs = np.argmin(id_compare_values_array,axis=0)
+        min_value = np.min(id_compare_values_array,axis=0)
+
+        swap_result_list = [] 
+        swap_result_matrix_list = []
+        swap_result_ori_pic_list = []
+
+        for tmp_index, min_index in enumerate(min_indexs):
+            if min_value[tmp_index] < opt.id_thres:
+                swap_result = model(None, b_align_crop_tenor_list[tmp_index], target_id_norm_list[min_index], None, True)[0]
+                swap_result_list.append(swap_result)
+                swap_result_matrix_list.append(b_mat_list[tmp_index])
+                swap_result_ori_pic_list.append(b_align_crop_tenor_list[tmp_index])
+            else:
+                pass
+
+        if len(swap_result_list) !=0:
+
+            if opt.use_mask:
+                n_classes = 19
+                net = BiSeNet(n_classes=n_classes)
+                net.cuda()
+                save_pth = os.path.join('./parsing_model/checkpoint', '79999_iter.pth')
+                net.load_state_dict(torch.load(save_pth))
+                net.eval()
+            else:
+                net =None
+        
+            reverse2wholeimage(swap_result_ori_pic_list, swap_result_list, swap_result_matrix_list, crop_size, img_b_whole, logoclass,\
+                os.path.join(opt.output_path, 'result_whole_swap_multispecific.jpg'), opt.no_simswaplogo,pasring_model =net,use_mask=opt.use_mask, norm = spNorm)
+
+            print(' ')
+
+            print('************ Done ! ************')
+        
         else:
-            pass
-
-    if len(swap_result_list) !=0:
-    
-        reverse2wholeimage(swap_result_list, swap_result_matrix_list, crop_size, img_b_whole, logoclass, os.path.join(opt.output_path, 'result_whole_swap_multispecific.jpg'), opt.no_simswaplogo)
-
-        print(' ')
-
-        print('************ Done ! ************')
-    
-    else:
-        print('The people you specified are not found on the picture: {}'.format(pic_b))
+            print('The people you specified are not found on the picture: {}'.format(pic_b))

+ 42 - 26
test_wholeimage_swapmulti.py

@@ -12,6 +12,8 @@ from insightface_func.face_detect_crop_multi import Face_detect_crop
 from util.reverse2original import reverse2wholeimage
 import os
 from util.add_watermark import watermark_image
+from util.norm import SpecificNorm
+from parsing_model.model import BiSeNet
 
 def lcm(a, b): return abs(a * b) / fractions.gcd(a, b) if a and b else 0
 
@@ -35,46 +37,60 @@ if __name__ == '__main__':
     logoclass = watermark_image('./simswaplogo/simswaplogo.png')
     model = create_model(opt)
     model.eval()
-
+    spNorm =SpecificNorm()
 
     app = Face_detect_crop(name='antelope', root='./insightface_func/models')
     app.prepare(ctx_id= 0, det_thresh=0.6, det_size=(640,640))
 
-    pic_a = opt.pic_a_path
+    with torch.no_grad():
+        pic_a = opt.pic_a_path
+
+        img_a_whole = cv2.imread(pic_a)
+        img_a_align_crop, _ = app.get(img_a_whole,crop_size)
+        img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB)) 
+        img_a = transformer_Arcface(img_a_align_crop_pil)
+        img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
 
-    img_a_whole = cv2.imread(pic_a)
-    img_a_align_crop, _ = app.get(img_a_whole,crop_size)
-    img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB)) 
-    img_a = transformer_Arcface(img_a_align_crop_pil)
-    img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
+        # convert numpy to tensor
+        img_id = img_id.cuda()
 
-    # convert numpy to tensor
-    img_id = img_id.cuda()
+        #create latent id
+        img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
+        latend_id = model.netArc(img_id_downsample)
+        latend_id = F.normalize(latend_id, p=2, dim=1)
 
-    #create latent id
-    img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
-    latend_id = model.netArc(img_id_downsample)
-    latend_id = F.normalize(latend_id, p=2, dim=1)
 
+        ############## Forward Pass ######################
 
-    ############## Forward Pass ######################
+        pic_b = opt.pic_b_path
+        img_b_whole = cv2.imread(pic_b)
 
-    pic_b = opt.pic_b_path
-    img_b_whole = cv2.imread(pic_b)
+        img_b_align_crop_list, b_mat_list = app.get(img_b_whole,crop_size)
+        # detect_results = None
+        swap_result_list = []
+        b_align_crop_tenor_list = []
 
-    img_b_align_crop_list, b_mat_list = app.get(img_b_whole,crop_size)
-    # detect_results = None
-    swap_result_list = []
+        for b_align_crop in img_b_align_crop_list:
 
-    for b_align_crop in img_b_align_crop_list:
+            b_align_crop_tenor = _totensor(cv2.cvtColor(b_align_crop,cv2.COLOR_BGR2RGB))[None,...].cuda()
 
-        b_align_crop_tenor = _totensor(cv2.cvtColor(b_align_crop,cv2.COLOR_BGR2RGB))[None,...].cuda()
+            swap_result = model(None, b_align_crop_tenor, latend_id, None, True)[0]
+            swap_result_list.append(swap_result)
+            b_align_crop_tenor_list.append(b_align_crop_tenor)
 
-        swap_result = model(None, b_align_crop_tenor, latend_id, None, True)[0]
-        swap_result_list.append(swap_result)
 
+        if opt.use_mask:
+            n_classes = 19
+            net = BiSeNet(n_classes=n_classes)
+            net.cuda()
+            save_pth = os.path.join('./parsing_model/checkpoint', '79999_iter.pth')
+            net.load_state_dict(torch.load(save_pth))
+            net.eval()
+        else:
+            net =None
 
-    reverse2wholeimage(swap_result_list, b_mat_list, crop_size, img_b_whole, logoclass, os.path.join(opt.output_path, 'result_whole_swapmulti.jpg'),opt.no_simswaplogo)
-    print(' ')
+        reverse2wholeimage(b_align_crop_tenor_list,swap_result_list, b_mat_list, crop_size, img_b_whole, logoclass, \
+            os.path.join(opt.output_path, 'result_whole_swapmulti.jpg'),opt.no_simswaplogo,pasring_model =net,use_mask=opt.use_mask, norm = spNorm)
+        print(' ')
 
-    print('************ Done ! ************')
+        print('************ Done ! ************')

+ 43 - 26
test_wholeimage_swapsingle.py

@@ -12,6 +12,8 @@ from insightface_func.face_detect_crop_single import Face_detect_crop
 from util.reverse2original import reverse2wholeimage
 import os
 from util.add_watermark import watermark_image
+from util.norm import SpecificNorm
+from parsing_model.model import BiSeNet
 
 def lcm(a, b): return abs(a * b) / fractions.gcd(a, b) if a and b else 0
 
@@ -35,45 +37,60 @@ if __name__ == '__main__':
     model = create_model(opt)
     model.eval()
 
-
+    spNorm =SpecificNorm()
     app = Face_detect_crop(name='antelope', root='./insightface_func/models')
     app.prepare(ctx_id= 0, det_thresh=0.6, det_size=(640,640))
 
-    pic_a = opt.pic_a_path
+    with torch.no_grad():
+        pic_a = opt.pic_a_path
+
+        img_a_whole = cv2.imread(pic_a)
+        img_a_align_crop, _ = app.get(img_a_whole,crop_size)
+        img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB)) 
+        img_a = transformer_Arcface(img_a_align_crop_pil)
+        img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
+
+        # convert numpy to tensor
+        img_id = img_id.cuda()
 
-    img_a_whole = cv2.imread(pic_a)
-    img_a_align_crop, _ = app.get(img_a_whole,crop_size)
-    img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB)) 
-    img_a = transformer_Arcface(img_a_align_crop_pil)
-    img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
+        #create latent id
+        img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
+        latend_id = model.netArc(img_id_downsample)
+        latend_id = F.normalize(latend_id, p=2, dim=1)
 
-    # convert numpy to tensor
-    img_id = img_id.cuda()
 
-    #create latent id
-    img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
-    latend_id = model.netArc(img_id_downsample)
-    latend_id = F.normalize(latend_id, p=2, dim=1)
+        ############## Forward Pass ######################
 
+        pic_b = opt.pic_b_path
+        img_b_whole = cv2.imread(pic_b)
 
-    ############## Forward Pass ######################
+        img_b_align_crop_list, b_mat_list = app.get(img_b_whole,crop_size)
+        # detect_results = None
+        swap_result_list = []
 
-    pic_b = opt.pic_b_path
-    img_b_whole = cv2.imread(pic_b)
+        b_align_crop_tenor_list = []
 
-    img_b_align_crop_list, b_mat_list = app.get(img_b_whole,crop_size)
-    # detect_results = None
-    swap_result_list = []
+        for b_align_crop in img_b_align_crop_list:
 
-    for b_align_crop in img_b_align_crop_list:
+            b_align_crop_tenor = _totensor(cv2.cvtColor(b_align_crop,cv2.COLOR_BGR2RGB))[None,...].cuda()
 
-        b_align_crop_tenor = _totensor(cv2.cvtColor(b_align_crop,cv2.COLOR_BGR2RGB))[None,...].cuda()
+            swap_result = model(None, b_align_crop_tenor, latend_id, None, True)[0]
+            swap_result_list.append(swap_result)
+            b_align_crop_tenor_list.append(b_align_crop_tenor)
 
-        swap_result = model(None, b_align_crop_tenor, latend_id, None, True)[0]
-        swap_result_list.append(swap_result)
+        if opt.use_mask:
+            n_classes = 19
+            net = BiSeNet(n_classes=n_classes)
+            net.cuda()
+            save_pth = os.path.join('./parsing_model/checkpoint', '79999_iter.pth')
+            net.load_state_dict(torch.load(save_pth))
+            net.eval()
+        else:
+            net =None
 
-    reverse2wholeimage(swap_result_list, b_mat_list, crop_size, img_b_whole, logoclass, os.path.join(opt.output_path, 'result_whole_swapsingle.jpg'), opt.no_simswaplogo)
+        reverse2wholeimage(b_align_crop_tenor_list, swap_result_list, b_mat_list, crop_size, img_b_whole, logoclass, \
+            os.path.join(opt.output_path, 'result_whole_swapsingle.jpg'), opt.no_simswaplogo,pasring_model =net,use_mask=opt.use_mask, norm = spNorm)
 
-    print(' ')
+        print(' ')
 
-    print('************ Done ! ************')
+        print('************ Done ! ************')

+ 13 - 1
test_wholeimage_swapspecific.py

@@ -14,6 +14,7 @@ import os
 from util.add_watermark import watermark_image
 import torch.nn as nn
 from util.norm import SpecificNorm
+from parsing_model.model import BiSeNet
 
 def lcm(a, b): return abs(a * b) / fractions.gcd(a, b) if a and b else 0
 
@@ -110,11 +111,22 @@ if __name__ == '__main__':
     min_index = np.argmin(id_compare_values_array)
     min_value = id_compare_values_array[min_index]
 
+    if opt.use_mask:
+        n_classes = 19
+        net = BiSeNet(n_classes=n_classes)
+        net.cuda()
+        save_pth = os.path.join('./parsing_model/checkpoint', '79999_iter.pth')
+        net.load_state_dict(torch.load(save_pth))
+        net.eval()
+    else:
+        net =None
+
     if min_value < opt.id_thres:
 
         swap_result = model(None, b_align_crop_tenor_list[min_index], latend_id, None, True)[0]
 
-        reverse2wholeimage([swap_result], [b_mat_list[min_index]], crop_size, img_b_whole, logoclass, os.path.join(opt.output_path, 'result_whole_swapspecific.jpg'), opt.no_simswaplogo)
+        reverse2wholeimage([b_align_crop_tenor_list[min_index]], [swap_result], [b_mat_list[min_index]], crop_size, img_b_whole, logoclass, \
+            os.path.join(opt.output_path, 'result_whole_swapspecific.jpg'), opt.no_simswaplogo,pasring_model =net,use_mask=opt.use_mask, norm = spNorm)
 
         print(' ')