| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113 | import os import cv2import globimport torchimport shutilimport numpy as npfrom tqdm import tqdmfrom util.reverse2original import reverse2wholeimageimport moviepy.editor as mpfrom moviepy.editor import AudioFileClip, VideoFileClip from moviepy.video.io.ImageSequenceClip import ImageSequenceClipimport  timefrom util.add_watermark import watermark_imagefrom util.norm import SpecificNormfrom parsing_model.model import BiSeNetdef _totensor(array):    tensor = torch.from_numpy(array)    img = tensor.transpose(0, 1).transpose(0, 2).contiguous()    return img.float().div(255)def video_swap(video_path, id_vetor, swap_model, detect_model, save_path, temp_results_dir='./temp_results', crop_size=224, no_simswaplogo = False,use_mask =False):    video_forcheck = VideoFileClip(video_path)    if video_forcheck.audio is None:        no_audio = True    else:        no_audio = False    del video_forcheck    if not no_audio:        video_audio_clip = AudioFileClip(video_path)    video = cv2.VideoCapture(video_path)    logoclass = watermark_image('./simswaplogo/simswaplogo.png')    ret = True    frame_index = 0    frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))    # video_WIDTH = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))    # video_HEIGHT = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))        fps = video.get(cv2.CAP_PROP_FPS)    if  os.path.exists(temp_results_dir):            shutil.rmtree(temp_results_dir)    spNorm =SpecificNorm()    if use_mask:        n_classes = 19        net = BiSeNet(n_classes=n_classes)        net.cuda()        save_pth = os.path.join('./parsing_model/checkpoint', '79999_iter.pth')        net.load_state_dict(torch.load(save_pth))        net.eval()    else:        net =None    # while ret:    for frame_index in tqdm(range(frame_count)):         ret, frame = video.read()        if  ret:            detect_results = detect_model.get(frame,crop_size)            if detect_results is not None:                # print(frame_index)                if not os.path.exists(temp_results_dir):                        os.mkdir(temp_results_dir)                frame_align_crop_list = detect_results[0]                frame_mat_list = detect_results[1]                swap_result_list = []                frame_align_crop_tenor_list = []                for frame_align_crop in frame_align_crop_list:                    # BGR TO RGB                    # frame_align_crop_RGB = frame_align_crop[...,::-1]                    frame_align_crop_tenor = _totensor(cv2.cvtColor(frame_align_crop,cv2.COLOR_BGR2RGB))[None,...].cuda()                    swap_result = swap_model(None, frame_align_crop_tenor, id_vetor, None, True)[0]                    swap_result_list.append(swap_result)                    frame_align_crop_tenor_list.append(frame_align_crop_tenor)                                    reverse2wholeimage(frame_align_crop_tenor_list,swap_result_list, frame_mat_list, crop_size, frame, logoclass,\                    os.path.join(temp_results_dir, 'frame_{:0>7d}.jpg'.format(frame_index)),no_simswaplogo,pasring_model =net,use_mask=use_mask, norm = spNorm)            else:                if not os.path.exists(temp_results_dir):                    os.mkdir(temp_results_dir)                frame = frame.astype(np.uint8)                if not no_simswaplogo:                    frame = logoclass.apply_frames(frame)                cv2.imwrite(os.path.join(temp_results_dir, 'frame_{:0>7d}.jpg'.format(frame_index)), frame)        else:            break    video.release()    # image_filename_list = []    path = os.path.join(temp_results_dir,'*.jpg')    image_filenames = sorted(glob.glob(path))    clips = ImageSequenceClip(image_filenames,fps = fps)    if not no_audio:        clips = clips.set_audio(video_audio_clip)    clips.write_videofile(save_path,audio_codec='aac')
 |