chenxuanhong 4 年之前
父节点
当前提交
01a8d6d0a6

+ 16 - 1
.gitignore

@@ -20,6 +20,8 @@ parts/
 sdist/
 var/
 wheels/
+pip-wheel-metadata/
+share/python-wheels/
 *.egg-info/
 .installed.cfg
 *.egg
@@ -45,6 +47,7 @@ htmlcov/
 nosetests.xml
 coverage.xml
 *.cover
+*.py,cover
 .hypothesis/
 .pytest_cache/
 
@@ -56,6 +59,7 @@ coverage.xml
 *.log
 local_settings.py
 db.sqlite3
+db.sqlite3-journal
 
 # Flask stuff:
 instance/
@@ -80,8 +84,19 @@ ipython_config.py
 # pyenv
 .python-version
 
-# celery beat schedule file
+# pipenv
+#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+#   However, in case of collaboration, if having platform-specific dependencies or dependencies
+#   having no cross-platform support, pipenv may install dependencies that don't work, or not
+#   install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
 celerybeat-schedule
+celerybeat.pid
 
 # SageMath parsed files
 *.sage.py

+ 16 - 2
README.md

@@ -1,2 +1,16 @@
-# SimSwap
- A face swapping framework
+# SimSwap: An Efficient Framework For High Fidelity Face Swapping
+## Proceedings of the 28th ACM International Conference on Multimedia
+## The official repository with Pytorch
+[[Conference paper]](https://dl.acm.org/doi/10.1145/3394171.3413630)
+
+![Results1](/doc/img/results1.PNG)
+![Results2](/doc/img/results2.PNG)
+
+Use python3.5, pytorch1.3.0
+
+
+Use this command to test the face swapping between two images:
+
+python test_one_image.py --isTrain false  --name people --Arc_path models/BEST_checkpoint.tar --pic_a_path crop_224/mars.jpg --pic_b_path crop_224/ds.jpg --output_path output/
+
+--name refers to the checkpoint name.

+ 94 - 0
data/CelebA_class.py

@@ -0,0 +1,94 @@
+import torch
+from torch.utils.data import Dataset
+import os
+import numpy as np
+import random
+from torchvision import transforms
+from PIL import Image
+import cv2
+
+class FaceDataSet(Dataset):
+    def __init__(self, dataset_path, batch_size):
+        super(FaceDataSet, self).__init__()
+
+
+
+        '''picture_dir_list = []
+        for i in range(self.people_num):
+            picture_dir_list.append('/data/home/renwangchen/vgg_align_224/'+self.people_list[i])
+
+        self.people_pic_list = []
+        for i in range(self.people_num):
+            pic_list = os.listdir(picture_dir_list[i])
+            person_pic_list = []
+            for j in range(len(pic_list)):
+                pic_dir = os.path.join(picture_dir_list[i], pic_list[j])
+                person_pic_list.append(pic_dir)
+            self.people_pic_list.append(person_pic_list)'''
+
+        pic_dir = '/data/home/renwangchen/CelebA_224/'
+        latent_dir = '/data/home/renwangchen/CelebA_latent/'
+
+        tmp_list = os.listdir(pic_dir)
+        self.pic_list = []
+        self.latent_list = []
+        for i in range(len(tmp_list)):
+            self.pic_list.append(pic_dir + tmp_list[i])
+            self.latent_list.append(latent_dir + tmp_list[i][:-3] + 'npy')
+
+        self.pic_list = self.pic_list[:29984]
+        '''for i in range(29984):
+            print(self.pic_list[i])'''
+        self.latent_list = self.latent_list[:29984]
+
+        self.people_num = len(self.pic_list)
+
+        self.type = 1
+        self.bs = batch_size
+        self.count = 0
+
+        self.transformer = transforms.Compose([
+            transforms.ToTensor(),
+            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
+        ])
+        
+    def __getitem__(self, index):
+        p1 = random.randint(0, self.people_num - 1)
+        p2 = p1
+
+        if self.type == 0:
+            # load pictures from the same folder
+            pass
+        else:
+            # load pictures from different folders
+            p2 = p1
+            while p2 == p1:
+                p2 = random.randint(0, self.people_num - 1)
+
+        pic_id_dir = self.pic_list[p1]
+        pic_att_dir = self.pic_list[p2]
+        latent_id_dir = self.latent_list[p1]
+        latent_att_dir = self.latent_list[p2]
+
+        img_id = Image.open(pic_id_dir).convert('RGB')
+        img_id = self.transformer(img_id)
+        latent_id = np.load(latent_id_dir)
+        latent_id = latent_id / np.linalg.norm(latent_id)
+        latent_id = torch.from_numpy(latent_id)
+
+        img_att = Image.open(pic_att_dir).convert('RGB')
+        img_att = self.transformer(img_att)
+        latent_att = np.load(latent_att_dir)
+        latent_att = latent_att / np.linalg.norm(latent_att)
+        latent_att = torch.from_numpy(latent_att)
+        
+        self.count += 1
+        data_type = self.type
+        if self.count == self.bs:
+            self.type = 1 - self.type
+            self.count = 0
+        
+        return img_id, img_att, latent_id, latent_att, data_type
+        
+    def __len__(self):
+        return len(self.pic_list)

+ 76 - 0
data/aligned_dataset.py

@@ -0,0 +1,76 @@
+import os.path
+from data.base_dataset import BaseDataset, get_params, get_transform, normalize
+from data.image_folder import make_dataset
+from PIL import Image
+
+class AlignedDataset(BaseDataset):
+    def initialize(self, opt):
+        self.opt = opt
+        self.root = opt.dataroot    
+
+        ### input A (label maps)
+        dir_A = '_A' if self.opt.label_nc == 0 else '_label'
+        self.dir_A = os.path.join(opt.dataroot, opt.phase + dir_A)
+        self.A_paths = sorted(make_dataset(self.dir_A))
+
+        ### input B (real images)
+        if opt.isTrain or opt.use_encoded_image:
+            dir_B = '_B' if self.opt.label_nc == 0 else '_img'
+            self.dir_B = os.path.join(opt.dataroot, opt.phase + dir_B)  
+            self.B_paths = sorted(make_dataset(self.dir_B))
+
+        ### instance maps
+        if not opt.no_instance:
+            self.dir_inst = os.path.join(opt.dataroot, opt.phase + '_inst')
+            self.inst_paths = sorted(make_dataset(self.dir_inst))
+
+        ### load precomputed instance-wise encoded features
+        if opt.load_features:                              
+            self.dir_feat = os.path.join(opt.dataroot, opt.phase + '_feat')
+            print('----------- loading features from %s ----------' % self.dir_feat)
+            self.feat_paths = sorted(make_dataset(self.dir_feat))
+
+        self.dataset_size = len(self.A_paths) 
+      
+    def __getitem__(self, index):        
+        ### input A (label maps)
+        A_path = self.A_paths[index]              
+        A = Image.open(A_path)        
+        params = get_params(self.opt, A.size)
+        if self.opt.label_nc == 0:
+            transform_A = get_transform(self.opt, params)
+            A_tensor = transform_A(A.convert('RGB'))
+        else:
+            transform_A = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
+            A_tensor = transform_A(A) * 255.0
+
+        B_tensor = inst_tensor = feat_tensor = 0
+        ### input B (real images)
+        if self.opt.isTrain or self.opt.use_encoded_image:
+            B_path = self.B_paths[index]   
+            B = Image.open(B_path).convert('RGB')
+            transform_B = get_transform(self.opt, params)      
+            B_tensor = transform_B(B)
+
+        ### if using instance maps        
+        if not self.opt.no_instance:
+            inst_path = self.inst_paths[index]
+            inst = Image.open(inst_path)
+            inst_tensor = transform_A(inst)
+
+            if self.opt.load_features:
+                feat_path = self.feat_paths[index]            
+                feat = Image.open(feat_path).convert('RGB')
+                norm = normalize()
+                feat_tensor = norm(transform_A(feat))                            
+
+        input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor, 
+                      'feat': feat_tensor, 'path': A_path}
+
+        return input_dict
+
+    def __len__(self):
+        return len(self.A_paths) // self.opt.batchSize * self.opt.batchSize
+
+    def name(self):
+        return 'AlignedDataset'

+ 90 - 0
data/base_dataset.py

@@ -0,0 +1,90 @@
+import torch.utils.data as data
+from PIL import Image
+import torchvision.transforms as transforms
+import numpy as np
+import random
+
+class BaseDataset(data.Dataset):
+    def __init__(self):
+        super(BaseDataset, self).__init__()
+
+    def name(self):
+        return 'BaseDataset'
+
+    def initialize(self, opt):
+        pass
+
+def get_params(opt, size):
+    w, h = size
+    new_h = h
+    new_w = w
+    if opt.resize_or_crop == 'resize_and_crop':
+        new_h = new_w = opt.loadSize            
+    elif opt.resize_or_crop == 'scale_width_and_crop':
+        new_w = opt.loadSize
+        new_h = opt.loadSize * h // w
+
+    x = random.randint(0, np.maximum(0, new_w - opt.fineSize))
+    y = random.randint(0, np.maximum(0, new_h - opt.fineSize))
+    
+    flip = random.random() > 0.5
+    return {'crop_pos': (x, y), 'flip': flip}
+
+def get_transform(opt, params, method=Image.BICUBIC, normalize=True):
+    transform_list = []
+    if 'resize' in opt.resize_or_crop:
+        osize = [opt.loadSize, opt.loadSize]
+        transform_list.append(transforms.Scale(osize, method))   
+    elif 'scale_width' in opt.resize_or_crop:
+        transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.loadSize, method)))
+        
+    if 'crop' in opt.resize_or_crop:
+        transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.fineSize)))
+
+    if opt.resize_or_crop == 'none':
+        base = float(2 ** opt.n_downsample_global)
+        if opt.netG == 'local':
+            base *= (2 ** opt.n_local_enhancers)
+        transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method)))
+
+    if opt.isTrain and not opt.no_flip:
+        transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
+
+    transform_list += [transforms.ToTensor()]
+
+    if normalize:
+        transform_list += [transforms.Normalize((0.5, 0.5, 0.5),
+                                                (0.5, 0.5, 0.5))]
+    return transforms.Compose(transform_list)
+
+def normalize():    
+    return transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
+
+def __make_power_2(img, base, method=Image.BICUBIC):
+    ow, oh = img.size        
+    h = int(round(oh / base) * base)
+    w = int(round(ow / base) * base)
+    if (h == oh) and (w == ow):
+        return img
+    return img.resize((w, h), method)
+
+def __scale_width(img, target_width, method=Image.BICUBIC):
+    ow, oh = img.size
+    if (ow == target_width):
+        return img    
+    w = target_width
+    h = int(target_width * oh / ow)    
+    return img.resize((w, h), method)
+
+def __crop(img, pos, size):
+    ow, oh = img.size
+    x1, y1 = pos
+    tw = th = size
+    if (ow > tw or oh > th):        
+        return img.crop((x1, y1, x1 + tw, y1 + th))
+    return img
+
+def __flip(img, flip):
+    if flip:
+        return img.transpose(Image.FLIP_LEFT_RIGHT)
+    return img

+ 7 - 0
data/data_loader.py

@@ -0,0 +1,7 @@
+
+def CreateDataLoader(opt):
+    from data.custom_dataset_data_loader import CustomDatasetDataLoader
+    data_loader = CustomDatasetDataLoader()
+    print(data_loader.name())
+    data_loader.initialize(opt)
+    return data_loader

二进制
doc/img/results1.PNG


二进制
doc/img/results2.PNG


+ 4 - 0
models/__init__.py

@@ -0,0 +1,4 @@
+from .models import ArcMarginModel
+from .models import ResNet
+from .models import IRBlock
+from .models import SEBlock

+ 91 - 0
models/base_model.py

@@ -0,0 +1,91 @@
+import os
+import torch
+import sys
+
+class BaseModel(torch.nn.Module):
+    def name(self):
+        return 'BaseModel'
+
+    def initialize(self, opt):
+        self.opt = opt
+        self.gpu_ids = opt.gpu_ids
+        self.isTrain = opt.isTrain
+        self.Tensor = torch.cuda.FloatTensor if self.gpu_ids else torch.Tensor
+        self.save_dir = os.path.join(opt.checkpoints_dir, opt.name)
+
+    def set_input(self, input):
+        self.input = input
+
+    def forward(self):
+        pass
+
+    # used in test time, no backprop
+    def test(self):
+        pass
+
+    def get_image_paths(self):
+        pass
+
+    def optimize_parameters(self):
+        pass
+
+    def get_current_visuals(self):
+        return self.input
+
+    def get_current_errors(self):
+        return {}
+
+    def save(self, label):
+        pass
+
+    # helper saving function that can be used by subclasses
+    def save_network(self, network, network_label, epoch_label, gpu_ids):
+        save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
+        save_path = os.path.join(self.save_dir, save_filename)
+        torch.save(network.cpu().state_dict(), save_path)
+        if len(gpu_ids) and torch.cuda.is_available():
+            network.cuda()
+
+    # helper loading function that can be used by subclasses
+    def load_network(self, network, network_label, epoch_label, save_dir=''):        
+        save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
+        if not save_dir:
+            save_dir = self.save_dir
+        save_path = os.path.join(save_dir, save_filename)        
+        if not os.path.isfile(save_path):
+            print('%s not exists yet!' % save_path)
+            if network_label == 'G':
+                raise('Generator must exist!')
+        else:
+            #network.load_state_dict(torch.load(save_path))
+            try:
+                network.load_state_dict(torch.load(save_path))
+            except:   
+                pretrained_dict = torch.load(save_path)                
+                model_dict = network.state_dict()
+                try:
+                    pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}                    
+                    network.load_state_dict(pretrained_dict)
+                    if self.opt.verbose:
+                        print('Pretrained network %s has excessive layers; Only loading layers that are used' % network_label)
+                except:
+                    print('Pretrained network %s has fewer layers; The following are not initialized:' % network_label)
+                    for k, v in pretrained_dict.items():                      
+                        if v.size() == model_dict[k].size():
+                            model_dict[k] = v
+
+                    if sys.version_info >= (3,0):
+                        not_initialized = set()
+                    else:
+                        from sets import Set
+                        not_initialized = Set()                    
+
+                    for k, v in model_dict.items():
+                        if k not in pretrained_dict or v.size() != pretrained_dict[k].size():
+                            not_initialized.add(k.split('.')[0])
+                    
+                    print(sorted(not_initialized))
+                    network.load_state_dict(model_dict)                  
+
+    def update_learning_rate():
+        pass

+ 28 - 0
models/config.py

@@ -0,0 +1,28 @@
+import os
+
+import torch
+
+device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  # sets device for model and PyTorch tensors
+
+# Model parameters
+image_w = 112
+image_h = 112
+channel = 3
+emb_size = 512
+
+# Training parameters
+num_workers = 1  # for data-loading; right now, only 1 works with h5py
+grad_clip = 5.  # clip gradients at an absolute value of
+print_freq = 100  # print training/validation stats  every __ batches
+checkpoint = None  # path to checkpoint, None if none
+
+# Data parameters
+num_classes = 93431
+num_samples = 5179510
+DATA_DIR = 'data'
+# faces_ms1m_folder = 'data/faces_ms1m_112x112'
+faces_ms1m_folder = 'data/ms1m-retinaface-t1'
+path_imgidx = os.path.join(faces_ms1m_folder, 'train.idx')
+path_imgrec = os.path.join(faces_ms1m_folder, 'train.rec')
+IMG_DIR = 'data/images'
+pickle_file = 'data/faces_ms1m_112x112.pickle'

+ 242 - 0
models/fs_model.py

@@ -0,0 +1,242 @@
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import os
+from torch.autograd import Variable
+from util.image_pool import ImagePool
+from .base_model import BaseModel
+from . import networks
+from .fs_networks import Generator_Adain_Upsample, Discriminator
+
+class SpecificNorm(nn.Module):
+    def __init__(self, epsilon=1e-8):
+        """
+            @notice: avoid in-place ops.
+            https://discuss.pytorch.org/t/encounter-the-runtimeerror-one-of-the-variables-needed-for-gradient-computation-has-been-modified-by-an-inplace-operation/836/3
+        """
+        super(SpecificNorm, self).__init__()
+        self.mean = np.array([0.485, 0.456, 0.406])
+        self.mean = torch.from_numpy(self.mean).float().cuda()
+        self.mean = self.mean.view([1, 3, 1, 1])
+
+        self.std = np.array([0.229, 0.224, 0.225])
+        self.std = torch.from_numpy(self.std).float().cuda()
+        self.std = self.std.view([1, 3, 1, 1])
+
+    def forward(self, x):
+        mean = self.mean.expand([1, 3, x.shape[2], x.shape[3]])
+        std = self.std.expand([1, 3, x.shape[2], x.shape[3]])
+
+        x = (x - mean) / std
+
+        return x
+
+class fsModel(BaseModel):
+    def name(self):
+        return 'fsModel'
+
+    def init_loss_filter(self, use_gan_feat_loss, use_vgg_loss):
+        flags = (True, use_gan_feat_loss, use_vgg_loss, True, True, True, True, True)
+
+        def loss_filter(g_gan, g_gan_feat, g_vgg, g_id, g_rec, g_mask, d_real, d_fake):
+            return [l for (l, f) in zip((g_gan, g_gan_feat, g_vgg, g_id, g_rec, g_mask, d_real, d_fake), flags) if f]
+
+        return loss_filter
+
+    def initialize(self, opt):
+        BaseModel.initialize(self, opt)
+        if opt.resize_or_crop != 'none' or not opt.isTrain:  # when training at full res this causes OOM
+            torch.backends.cudnn.benchmark = True
+        self.isTrain = opt.isTrain
+
+        device = torch.device("cuda:0")
+
+        # Generator network
+        self.netG = Generator_Adain_Upsample(input_nc=3, output_nc=3, latent_size=512, n_blocks=9, deep=False)
+        self.netG.to(device)
+
+
+
+
+        # Id network
+        netArc_checkpoint = opt.Arc_path
+        netArc_checkpoint = torch.load(netArc_checkpoint)
+        self.netArc = netArc_checkpoint['model'].module
+        self.netArc = self.netArc.to(device)
+        self.netArc.eval()
+
+        if not self.isTrain:
+            pretrained_path = '' if not self.isTrain else opt.load_pretrain
+            self.load_network(self.netG, 'G', opt.which_epoch, pretrained_path)
+            return
+
+        # Discriminator network
+        if opt.gan_mode == 'original':
+            use_sigmoid = True
+        else:
+            use_sigmoid = False
+        self.netD1 = Discriminator(input_nc=3, use_sigmoid=use_sigmoid)
+        self.netD2 = Discriminator(input_nc=3, use_sigmoid=use_sigmoid)
+        self.netD1.to(device)
+        self.netD2.to(device)
+
+        #
+        self.spNorm =SpecificNorm()
+        self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
+
+        # load networks
+        if opt.continue_train or opt.load_pretrain:
+            pretrained_path = '' if not self.isTrain else opt.load_pretrain
+            # print (pretrained_path)
+            self.load_network(self.netG, 'G', opt.which_epoch, pretrained_path)
+            self.load_network(self.netD1, 'D1', opt.which_epoch, pretrained_path)
+            self.load_network(self.netD2, 'D2', opt.which_epoch, pretrained_path)
+
+
+
+        if self.isTrain:
+            # define loss functions
+            self.loss_filter = self.init_loss_filter(not opt.no_ganFeat_loss, not opt.no_vgg_loss)
+
+            self.criterionGAN = networks.GANLoss(opt.gan_mode, tensor=self.Tensor, opt=self.opt)
+            self.criterionFeat = nn.L1Loss()
+            self.criterionRec = nn.L1Loss()
+
+            # Names so we can breakout loss
+            self.loss_names = self.loss_filter('G_GAN', 'G_GAN_Feat', 'G_VGG', 'G_ID', 'G_Rec', 'D_GP',
+                                               'D_real', 'D_fake')
+
+           # initialize optimizers
+
+            # optimizer G
+            params = list(self.netG.parameters())
+            self.optimizer_G = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999))
+
+            # optimizer D
+            params = list(self.netD1.parameters()) + list(self.netD2.parameters())
+            self.optimizer_D = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999))
+
+    def _gradinet_penalty_D(self, netD, img_att, img_fake):
+        # interpolate sample
+        bs = img_fake.shape[0]
+        alpha = torch.rand(bs, 1, 1, 1).expand_as(img_fake).cuda()
+        interpolated = Variable(alpha * img_att + (1 - alpha) * img_fake, requires_grad=True)
+        pred_interpolated = netD.forward(interpolated)
+        pred_interpolated = pred_interpolated[-1]
+
+        # compute gradients
+        grad = torch.autograd.grad(outputs=pred_interpolated,
+                                   inputs=interpolated,
+                                   grad_outputs=torch.ones(pred_interpolated.size()).cuda(),
+                                   retain_graph=True,
+                                   create_graph=True,
+                                   only_inputs=True)[0]
+
+        # penalize gradients
+        grad = grad.view(grad.size(0), -1)
+        grad_l2norm = torch.sqrt(torch.sum(grad ** 2, dim=1))
+        loss_d_gp = torch.mean((grad_l2norm - 1) ** 2)
+
+        return loss_d_gp
+
+    def cosin_metric(self, x1, x2):
+        #return np.dot(x1, x2) / (np.linalg.norm(x1) * np.linalg.norm(x2))
+        return torch.sum(x1 * x2, dim=1) / (torch.norm(x1, dim=1) * torch.norm(x2, dim=1))
+
+    def forward(self, img_id, img_att, latent_id, latent_att, for_G=False):
+        loss_D_fake, loss_D_real, loss_D_GP = 0, 0, 0
+        loss_G_GAN, loss_G_GAN_Feat, loss_G_VGG, loss_G_ID, loss_G_Rec = 0,0,0,0,0
+
+        img_fake = self.netG.forward(img_att, latent_id)
+        if not self.isTrain:
+            return img_fake
+        img_fake_downsample = self.downsample(img_fake)
+        img_att_downsample = self.downsample(img_att)
+
+
+
+        # D_Fake
+        fea1_fake = self.netD1.forward(img_fake.detach())
+        fea2_fake = self.netD2.forward(img_fake_downsample.detach())
+        pred_fake = [fea1_fake, fea2_fake]
+        loss_D_fake = self.criterionGAN(pred_fake, False, for_discriminator=True)
+
+
+        # D_Feal
+        fea1_real = self.netD1.forward(img_att)
+        fea2_real = self.netD2.forward(img_att_downsample)
+        pred_real = [fea1_real, fea2_real]
+        fea_real = [fea1_real, fea2_real]
+        loss_D_real = self.criterionGAN(pred_real, True, for_discriminator=True)
+        #print('=====================D_Real========================')
+
+        # D_GP
+
+        loss_D_GP = 0
+
+        # G_GAN
+        fea1_fake = self.netD1.forward(img_fake)
+        fea2_fake = self.netD2.forward(img_fake_downsample)
+        #pred_fake = [fea1_fake[-1], fea2_fake[-1]]
+        pred_fake = [fea1_fake, fea2_fake]
+        fea_fake = [fea1_fake, fea2_fake]
+        loss_G_GAN = self.criterionGAN(pred_fake, True, for_discriminator=False)
+
+        # GAN feature matching loss
+        n_layers_D = 4
+        num_D = 2
+        if not self.opt.no_ganFeat_loss:
+            feat_weights = 4.0 / (n_layers_D + 1)
+            D_weights = 1.0 / num_D
+            for i in range(num_D):
+                for j in range(0, len(fea_fake[i]) - 1):
+                    loss_G_GAN_Feat += D_weights * feat_weights * \
+                                       self.criterionFeat(fea_fake[i][j],
+                                                          fea_real[i][j].detach()) * self.opt.lambda_feat
+
+
+        #G_ID
+        img_fake_down = F.interpolate(img_fake, scale_factor=0.5)
+        img_fake_down = self.spNorm(img_fake_down)
+        latent_fake = self.netArc(img_fake_down)
+        loss_G_ID = (1 - self.cosin_metric(latent_fake, latent_id))
+        #print('=====================G_ID========================')
+        #print(loss_G_ID)
+
+        #G_Rec
+        loss_G_Rec = self.criterionRec(img_fake, img_att) * self.opt.lambda_rec
+
+        # Only return the fake_B image if necessary to save BW
+        return [self.loss_filter(loss_G_GAN, loss_G_GAN_Feat, loss_G_VGG, loss_G_ID, loss_G_Rec, loss_D_GP, loss_D_real, loss_D_fake),
+                img_fake]
+
+
+    def save(self, which_epoch):
+        self.save_network(self.netG, 'G', which_epoch, self.gpu_ids)
+        self.save_network(self.netD1, 'D1', which_epoch, self.gpu_ids)
+        self.save_network(self.netD2, 'D2', which_epoch, self.gpu_ids)
+        '''if self.gen_features:
+            self.save_network(self.netE, 'E', which_epoch, self.gpu_ids)'''
+
+    def update_fixed_params(self):
+        # after fixing the global generator for a number of iterations, also start finetuning it
+        params = list(self.netG.parameters())
+        if self.gen_features:
+            params += list(self.netE.parameters())
+        self.optimizer_G = torch.optim.Adam(params, lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
+        if self.opt.verbose:
+            print('------------ Now also finetuning global generator -----------')
+
+    def update_learning_rate(self):
+        lrd = self.opt.lr / self.opt.niter_decay
+        lr = self.old_lr - lrd
+        for param_group in self.optimizer_D.param_groups:
+            param_group['lr'] = lr
+        for param_group in self.optimizer_G.param_groups:
+            param_group['lr'] = lr
+        if self.opt.verbose:
+            print('update learning rate: %f -> %f' % (self.old_lr, lr))
+        self.old_lr = lr
+
+

+ 215 - 0
models/fs_networks.py

@@ -0,0 +1,215 @@
+"""
+Copyright (C) 2019 NVIDIA Corporation.  All rights reserved.
+Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
+"""
+
+import torch
+import torch.nn as nn
+
+
+class InstanceNorm(nn.Module):
+    def __init__(self, epsilon=1e-8):
+        """
+            @notice: avoid in-place ops.
+            https://discuss.pytorch.org/t/encounter-the-runtimeerror-one-of-the-variables-needed-for-gradient-computation-has-been-modified-by-an-inplace-operation/836/3
+        """
+        super(InstanceNorm, self).__init__()
+        self.epsilon = epsilon
+
+    def forward(self, x):
+        x   = x - torch.mean(x, (2, 3), True)
+        tmp = torch.mul(x, x) # or x ** 2
+        tmp = torch.rsqrt(torch.mean(tmp, (2, 3), True) + self.epsilon)
+        return x * tmp
+
+class ApplyStyle(nn.Module):
+    """
+        @ref: https://github.com/lernapparat/lernapparat/blob/master/style_gan/pytorch_style_gan.ipynb
+    """
+    def __init__(self, latent_size, channels):
+        super(ApplyStyle, self).__init__()
+        self.linear = nn.Linear(latent_size, channels * 2)
+
+    def forward(self, x, latent):
+        style = self.linear(latent)  # style => [batch_size, n_channels*2]
+        shape = [-1, 2, x.size(1), 1, 1]
+        style = style.view(shape)    # [batch_size, 2, n_channels, ...]
+        #x = x * (style[:, 0] + 1.) + style[:, 1]
+        x = x * (style[:, 0] * 1 + 1.) + style[:, 1] * 1
+        return x
+
+class ResnetBlock_Adain(nn.Module):
+    def __init__(self, dim, latent_size, padding_type, activation=nn.ReLU(True)):
+        super(ResnetBlock_Adain, self).__init__()
+
+        p = 0
+        conv1 = []
+        if padding_type == 'reflect':
+            conv1 += [nn.ReflectionPad2d(1)]
+        elif padding_type == 'replicate':
+            conv1 += [nn.ReplicationPad2d(1)]
+        elif padding_type == 'zero':
+            p = 1
+        else:
+            raise NotImplementedError('padding [%s] is not implemented' % padding_type)
+        conv1 += [nn.Conv2d(dim, dim, kernel_size=3, padding = p), InstanceNorm()]
+        self.conv1 = nn.Sequential(*conv1)
+        self.style1 = ApplyStyle(latent_size, dim)
+        self.act1 = activation
+
+        p = 0
+        conv2 = []
+        if padding_type == 'reflect':
+            conv2 += [nn.ReflectionPad2d(1)]
+        elif padding_type == 'replicate':
+            conv2 += [nn.ReplicationPad2d(1)]
+        elif padding_type == 'zero':
+            p = 1
+        else:
+            raise NotImplementedError('padding [%s] is not implemented' % padding_type)
+        conv2 += [nn.Conv2d(dim, dim, kernel_size=3, padding=p), InstanceNorm()]
+        self.conv2 = nn.Sequential(*conv2)
+        self.style2 = ApplyStyle(latent_size, dim)
+
+
+    def forward(self, x, dlatents_in_slice):
+        y = self.conv1(x)
+        y = self.style1(y, dlatents_in_slice)
+        y = self.act1(y)
+        y = self.conv2(y)
+        y = self.style2(y, dlatents_in_slice)
+        out = x + y
+        return out
+
+
+
+class Generator_Adain_Upsample(nn.Module):
+    def __init__(self, input_nc, output_nc, latent_size, n_blocks=6, deep=False,
+                 norm_layer=nn.BatchNorm2d,
+                 padding_type='reflect'):
+        assert (n_blocks >= 0)
+        super(Generator_Adain_Upsample, self).__init__()
+        activation = nn.ReLU(True)
+        self.deep = deep
+
+        self.first_layer = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(input_nc, 64, kernel_size=7, padding=0),
+                                         norm_layer(64), activation)
+        ### downsample
+        self.down1 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),
+                                   norm_layer(128), activation)
+        self.down2 = nn.Sequential(nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1),
+                                   norm_layer(256), activation)
+        self.down3 = nn.Sequential(nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1),
+                                   norm_layer(512), activation)
+        if self.deep:
+            self.down4 = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
+                                       norm_layer(512), activation)
+
+        ### resnet blocks
+        BN = []
+        for i in range(n_blocks):
+            BN += [
+                ResnetBlock_Adain(512, latent_size=latent_size, padding_type=padding_type, activation=activation)]
+        self.BottleNeck = nn.Sequential(*BN)
+
+        if self.deep:
+            self.up4 = nn.Sequential(
+                nn.Upsample(scale_factor=2, mode='bilinear'),
+                nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
+                nn.BatchNorm2d(512), activation
+            )
+        self.up3 = nn.Sequential(
+            nn.Upsample(scale_factor=2, mode='bilinear'),
+            nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1),
+            nn.BatchNorm2d(256), activation
+        )
+        self.up2 = nn.Sequential(
+            nn.Upsample(scale_factor=2, mode='bilinear'),
+            nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),
+            nn.BatchNorm2d(128), activation
+        )
+        self.up1 = nn.Sequential(
+            nn.Upsample(scale_factor=2, mode='bilinear'),
+            nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1),
+            nn.BatchNorm2d(64), activation
+        )
+        self.last_layer = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(64, output_nc, kernel_size=7, padding=0),
+                                        nn.Tanh())
+
+    def forward(self, input, dlatents):
+        x = input  # 3*224*224
+
+        skip1 = self.first_layer(x)
+        skip2 = self.down1(skip1)
+        skip3 = self.down2(skip2)
+        if self.deep:
+            skip4 = self.down3(skip3)
+            x = self.down4(skip4)
+        else:
+            x = self.down3(skip3)
+
+        for i in range(len(self.BottleNeck)):
+            x = self.BottleNeck[i](x, dlatents)
+
+        if self.deep:
+            x = self.up4(x)
+        x = self.up3(x)
+        x = self.up2(x)
+        x = self.up1(x)
+        x = self.last_layer(x)
+        x = (x + 1) / 2
+
+        return x
+
+class Discriminator(nn.Module):
+    def __init__(self, input_nc, norm_layer=nn.BatchNorm2d, use_sigmoid=False):
+        super(Discriminator, self).__init__()
+
+        kw = 4
+        padw = 1
+        self.down1 = nn.Sequential(
+            nn.Conv2d(input_nc, 64, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)
+        )
+        self.down2 = nn.Sequential(
+            nn.Conv2d(64, 128, kernel_size=kw, stride=2, padding=padw),
+            norm_layer(128), nn.LeakyReLU(0.2, True)
+        )
+        self.down3 = nn.Sequential(
+            nn.Conv2d(128, 256, kernel_size=kw, stride=2, padding=padw),
+            norm_layer(256), nn.LeakyReLU(0.2, True)
+        )
+        self.down4 = nn.Sequential(
+            nn.Conv2d(256, 512, kernel_size=kw, stride=2, padding=padw),
+            norm_layer(512), nn.LeakyReLU(0.2, True)
+        )
+        self.conv1 = nn.Sequential(
+            nn.Conv2d(512, 512, kernel_size=kw, stride=1, padding=padw),
+            norm_layer(512),
+            nn.LeakyReLU(0.2, True)
+        )
+
+        if use_sigmoid:
+            self.conv2 = nn.Sequential(
+                nn.Conv2d(512, 1, kernel_size=kw, stride=1, padding=padw), nn.Sigmoid()
+            )
+        else:
+            self.conv2 = nn.Sequential(
+                nn.Conv2d(512, 1, kernel_size=kw, stride=1, padding=padw)
+            )
+
+    def forward(self, input):
+        out = []
+        x = self.down1(input)
+        out.append(x)
+        x = self.down2(x)
+        out.append(x)
+        x = self.down3(x)
+        out.append(x)
+        x = self.down4(x)
+        out.append(x)
+        x = self.conv1(x)
+        out.append(x)
+        x = self.conv2(x)
+        out.append(x)
+        
+        return out

+ 181 - 0
models/models.py

@@ -0,0 +1,181 @@
+import math
+import torch
+import torch.nn.functional as F
+from torch import nn
+from torch.nn import Parameter
+from .config import device, num_classes
+
+
+def create_model(opt):
+    if opt.model == 'pix2pixHD':
+        #from .pix2pixHD_model import Pix2PixHDModel, InferenceModel
+        from .fs_model import fsModel
+        model = fsModel()
+    else:
+        from .ui_model import UIModel
+        model = UIModel()
+
+    model.initialize(opt)
+    if opt.verbose:
+        print("model [%s] was created" % (model.name()))
+
+    if opt.isTrain and len(opt.gpu_ids) and not opt.fp16:
+        model = torch.nn.DataParallel(model, device_ids=opt.gpu_ids)
+
+    return model
+
+
+
+class SEBlock(nn.Module):
+    def __init__(self, channel, reduction=16):
+        super(SEBlock, self).__init__()
+        self.avg_pool = nn.AdaptiveAvgPool2d(1)
+        self.fc = nn.Sequential(
+            nn.Linear(channel, channel // reduction),
+            nn.PReLU(),
+            nn.Linear(channel // reduction, channel),
+            nn.Sigmoid()
+        )
+
+    def forward(self, x):
+        b, c, _, _ = x.size()
+        y = self.avg_pool(x).view(b, c)
+        y = self.fc(y).view(b, c, 1, 1)
+        return x * y
+
+
+class IRBlock(nn.Module):
+    expansion = 1
+
+    def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True):
+        super(IRBlock, self).__init__()
+        self.bn0 = nn.BatchNorm2d(inplanes)
+        self.conv1 = conv3x3(inplanes, inplanes)
+        self.bn1 = nn.BatchNorm2d(inplanes)
+        self.prelu = nn.PReLU()
+        self.conv2 = conv3x3(inplanes, planes, stride)
+        self.bn2 = nn.BatchNorm2d(planes)
+        self.downsample = downsample
+        self.stride = stride
+        self.use_se = use_se
+        if self.use_se:
+            self.se = SEBlock(planes)
+
+    def forward(self, x):
+        residual = x
+        out = self.bn0(x)
+        out = self.conv1(out)
+        out = self.bn1(out)
+        out = self.prelu(out)
+
+        out = self.conv2(out)
+        out = self.bn2(out)
+        if self.use_se:
+            out = self.se(out)
+
+        if self.downsample is not None:
+            residual = self.downsample(x)
+
+        out += residual
+        out = self.prelu(out)
+
+        return out
+
+
+class ResNet(nn.Module):
+
+    def __init__(self, block, layers, use_se=True):
+        self.inplanes = 64
+        self.use_se = use_se
+        super(ResNet, self).__init__()
+        self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, bias=False)
+        self.bn1 = nn.BatchNorm2d(64)
+        self.prelu = nn.PReLU()
+        self.maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
+        self.layer1 = self._make_layer(block, 64, layers[0])
+        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
+        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
+        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
+        self.bn2 = nn.BatchNorm2d(512)
+        self.dropout = nn.Dropout()
+        self.fc = nn.Linear(512 * 7 * 7, 512)
+        self.bn3 = nn.BatchNorm1d(512)
+
+        for m in self.modules():
+            if isinstance(m, nn.Conv2d):
+                nn.init.xavier_normal_(m.weight)
+            elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
+                nn.init.constant_(m.weight, 1)
+                nn.init.constant_(m.bias, 0)
+            elif isinstance(m, nn.Linear):
+                nn.init.xavier_normal_(m.weight)
+                nn.init.constant_(m.bias, 0)
+
+    def _make_layer(self, block, planes, blocks, stride=1):
+        downsample = None
+        if stride != 1 or self.inplanes != planes * block.expansion:
+            downsample = nn.Sequential(
+                nn.Conv2d(self.inplanes, planes * block.expansion,
+                          kernel_size=1, stride=stride, bias=False),
+                nn.BatchNorm2d(planes * block.expansion),
+            )
+
+        layers = []
+        layers.append(block(self.inplanes, planes, stride, downsample, use_se=self.use_se))
+        self.inplanes = planes
+        for i in range(1, blocks):
+            layers.append(block(self.inplanes, planes, use_se=self.use_se))
+
+        return nn.Sequential(*layers)
+
+    def forward(self, x):
+        x = self.conv1(x)
+        x = self.bn1(x)
+        x = self.prelu(x)
+        x = self.maxpool(x)
+
+        x = self.layer1(x)
+        x = self.layer2(x)
+        x = self.layer3(x)
+        x = self.layer4(x)
+
+        x = self.bn2(x)
+        x = self.dropout(x)
+        x = x.view(x.size(0), -1)
+        x = self.fc(x)
+        x = self.bn3(x)
+
+        return x
+
+
+class ArcMarginModel(nn.Module):
+    def __init__(self, args):
+        super(ArcMarginModel, self).__init__()
+
+        self.weight = Parameter(torch.FloatTensor(num_classes, args.emb_size))
+        nn.init.xavier_uniform_(self.weight)
+
+        self.easy_margin = args.easy_margin
+        self.m = args.margin_m
+        self.s = args.margin_s
+
+        self.cos_m = math.cos(self.m)
+        self.sin_m = math.sin(self.m)
+        self.th = math.cos(math.pi - self.m)
+        self.mm = math.sin(math.pi - self.m) * self.m
+
+    def forward(self, input, label):
+        x = F.normalize(input)
+        W = F.normalize(self.weight)
+        cosine = F.linear(x, W)
+        sine = torch.sqrt(1.0 - torch.pow(cosine, 2))
+        phi = cosine * self.cos_m - sine * self.sin_m  # cos(theta + m)
+        if self.easy_margin:
+            phi = torch.where(cosine > 0, phi, cosine)
+        else:
+            phi = torch.where(cosine > self.th, phi, cosine - self.mm)
+        one_hot = torch.zeros(cosine.size(), device=device)
+        one_hot.scatter_(1, label.view(-1, 1).long(), 1)
+        output = (one_hot * phi) + ((1.0 - one_hot) * cosine)
+        output *= self.s
+        return output

+ 845 - 0
models/networks.py

@@ -0,0 +1,845 @@
+import torch
+import torch.nn as nn
+import functools
+from torch.autograd import Variable
+import numpy as np
+from torchvision import transforms
+import torch.nn.functional as F
+
+###############################################################################
+# Functions
+###############################################################################
+def weights_init(m):
+    classname = m.__class__.__name__
+    if classname.find('Conv') != -1:
+        m.weight.data.normal_(0.0, 0.02)
+    elif classname.find('BatchNorm2d') != -1:
+        m.weight.data.normal_(1.0, 0.02)
+        m.bias.data.fill_(0)
+
+def get_norm_layer(norm_type='instance'):
+    if norm_type == 'batch':
+        norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
+    elif norm_type == 'instance':
+        norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
+    else:
+        raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
+    return norm_layer
+
+def define_G(input_nc, output_nc, ngf, netG, n_downsample_global=3, n_blocks_global=9, n_local_enhancers=1, 
+             n_blocks_local=3, norm='instance', gpu_ids=[]):    
+    norm_layer = get_norm_layer(norm_type=norm)     
+    if netG == 'global':    
+        netG = GlobalGenerator(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, norm_layer)       
+    elif netG == 'local':        
+        netG = LocalEnhancer(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, 
+                                  n_local_enhancers, n_blocks_local, norm_layer)
+    elif netG == 'encoder':
+        netG = Encoder(input_nc, output_nc, ngf, n_downsample_global, norm_layer)
+    else:
+        raise('generator not implemented!')
+    print(netG)
+    if len(gpu_ids) > 0:
+        assert(torch.cuda.is_available())   
+        netG.cuda(gpu_ids[0])
+    netG.apply(weights_init)
+    return netG
+
+def define_G_Adain(input_nc, output_nc, latent_size, ngf, netG, n_downsample_global=2, n_blocks_global=4, norm='instance', gpu_ids=[]):
+    norm_layer = get_norm_layer(norm_type=norm)
+    netG = Generator_Adain(input_nc, output_nc, latent_size, ngf, n_downsample_global, n_blocks_global, norm_layer)
+    print(netG)
+    if len(gpu_ids) > 0:
+        assert(torch.cuda.is_available())
+        netG.cuda(gpu_ids[0])
+    netG.apply(weights_init)
+    return netG
+
+def define_G_Adain_Mask(input_nc, output_nc, latent_size, ngf, netG, n_downsample_global=2, n_blocks_global=4, norm='instance', gpu_ids=[]):
+    norm_layer = get_norm_layer(norm_type=norm)
+    netG = Generator_Adain_Mask(input_nc, output_nc, latent_size, ngf, n_downsample_global, n_blocks_global, norm_layer)
+    print(netG)
+    if len(gpu_ids) > 0:
+        assert(torch.cuda.is_available())
+        netG.cuda(gpu_ids[0])
+    netG.apply(weights_init)
+    return netG
+
+def define_G_Adain_Upsample(input_nc, output_nc, latent_size, ngf, netG, n_downsample_global=2, n_blocks_global=4, norm='instance', gpu_ids=[]):
+    norm_layer = get_norm_layer(norm_type=norm)
+    netG = Generator_Adain_Upsample(input_nc, output_nc, latent_size, ngf, n_downsample_global, n_blocks_global, norm_layer)
+    print(netG)
+    if len(gpu_ids) > 0:
+        assert(torch.cuda.is_available())
+        netG.cuda(gpu_ids[0])
+    netG.apply(weights_init)
+    return netG
+
+def define_G_Adain_2(input_nc, output_nc, latent_size, ngf, netG, n_downsample_global=2, n_blocks_global=4, norm='instance', gpu_ids=[]):
+    norm_layer = get_norm_layer(norm_type=norm)
+    netG = Generator_Adain_2(input_nc, output_nc, latent_size, ngf, n_downsample_global, n_blocks_global, norm_layer)
+    print(netG)
+    if len(gpu_ids) > 0:
+        assert(torch.cuda.is_available())
+        netG.cuda(gpu_ids[0])
+    netG.apply(weights_init)
+    return netG
+
+def define_D(input_nc, ndf, n_layers_D, norm='instance', use_sigmoid=False, num_D=1, getIntermFeat=False, gpu_ids=[]):        
+    norm_layer = get_norm_layer(norm_type=norm)   
+    netD = MultiscaleDiscriminator(input_nc, ndf, n_layers_D, norm_layer, use_sigmoid, num_D, getIntermFeat)   
+    print(netD)
+    if len(gpu_ids) > 0:
+        assert(torch.cuda.is_available())
+        netD.cuda(gpu_ids[0])
+    netD.apply(weights_init)
+    return netD
+
+def print_network(net):
+    if isinstance(net, list):
+        net = net[0]
+    num_params = 0
+    for param in net.parameters():
+        num_params += param.numel()
+    print(net)
+    print('Total number of parameters: %d' % num_params)
+
+##############################################################################
+# Losses
+##############################################################################
+class GANLoss(nn.Module):
+    def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0,
+                 tensor=torch.FloatTensor, opt=None):
+        super(GANLoss, self).__init__()
+        self.real_label = target_real_label
+        self.fake_label = target_fake_label
+        self.real_label_tensor = None
+        self.fake_label_tensor = None
+        self.zero_tensor = None
+        self.Tensor = tensor
+        self.gan_mode = gan_mode
+        self.opt = opt
+        if gan_mode == 'ls':
+            pass
+        elif gan_mode == 'original':
+            pass
+        elif gan_mode == 'w':
+            pass
+        elif gan_mode == 'hinge':
+            pass
+        else:
+            raise ValueError('Unexpected gan_mode {}'.format(gan_mode))
+
+    def get_target_tensor(self, input, target_is_real):
+        if target_is_real:
+            if self.real_label_tensor is None:
+                self.real_label_tensor = self.Tensor(1).fill_(self.real_label)
+                self.real_label_tensor.requires_grad_(False)
+            return self.real_label_tensor.expand_as(input)
+        else:
+            if self.fake_label_tensor is None:
+                self.fake_label_tensor = self.Tensor(1).fill_(self.fake_label)
+                self.fake_label_tensor.requires_grad_(False)
+            return self.fake_label_tensor.expand_as(input)
+
+    def get_zero_tensor(self, input):
+        if self.zero_tensor is None:
+            self.zero_tensor = self.Tensor(1).fill_(0)
+            self.zero_tensor.requires_grad_(False)
+        return self.zero_tensor.expand_as(input)
+
+    def loss(self, input, target_is_real, for_discriminator=True):
+        if self.gan_mode == 'original':  # cross entropy loss
+            target_tensor = self.get_target_tensor(input, target_is_real)
+            loss = F.binary_cross_entropy_with_logits(input, target_tensor)
+            return loss
+        elif self.gan_mode == 'ls':
+            target_tensor = self.get_target_tensor(input, target_is_real)
+            return F.mse_loss(input, target_tensor)
+        elif self.gan_mode == 'hinge':
+            if for_discriminator:
+                if target_is_real:
+                    minval = torch.min(input - 1, self.get_zero_tensor(input))
+                    loss = -torch.mean(minval)
+                else:
+                    minval = torch.min(-input - 1, self.get_zero_tensor(input))
+                    loss = -torch.mean(minval)
+            else:
+                assert target_is_real, "The generator's hinge loss must be aiming for real"
+                loss = -torch.mean(input)
+            return loss
+        else:
+            # wgan
+            if target_is_real:
+                return -input.mean()
+            else:
+                return input.mean()
+
+    def __call__(self, input, target_is_real, for_discriminator=True):
+        # computing loss is a bit complicated because |input| may not be
+        # a tensor, but list of tensors in case of multiscale discriminator
+        if isinstance(input, list):
+            loss = 0
+            for pred_i in input:
+                if isinstance(pred_i, list):
+                    pred_i = pred_i[-1]
+                loss_tensor = self.loss(pred_i, target_is_real, for_discriminator)
+                bs = 1 if len(loss_tensor.size()) == 0 else loss_tensor.size(0)
+                new_loss = torch.mean(loss_tensor.view(bs, -1), dim=1)
+                loss += new_loss
+            return loss / len(input)
+        else:
+            return self.loss(input, target_is_real, for_discriminator)
+
+class VGGLoss(nn.Module):
+    def __init__(self, gpu_ids):
+        super(VGGLoss, self).__init__()        
+        self.vgg = Vgg19().cuda()
+        self.criterion = nn.L1Loss()
+        self.weights = [1.0/32, 1.0/16, 1.0/8, 1.0/4, 1.0]        
+
+    def forward(self, x, y):              
+        x_vgg, y_vgg = self.vgg(x), self.vgg(y)
+        loss = 0
+        for i in range(len(x_vgg)):
+            loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())        
+        return loss
+
+##############################################################################
+# Generator
+##############################################################################
+class LocalEnhancer(nn.Module):
+    def __init__(self, input_nc, output_nc, ngf=32, n_downsample_global=3, n_blocks_global=9, 
+                 n_local_enhancers=1, n_blocks_local=3, norm_layer=nn.BatchNorm2d, padding_type='reflect'):        
+        super(LocalEnhancer, self).__init__()
+        self.n_local_enhancers = n_local_enhancers
+        
+        ###### global generator model #####           
+        ngf_global = ngf * (2**n_local_enhancers)
+        model_global = GlobalGenerator(input_nc, output_nc, ngf_global, n_downsample_global, n_blocks_global, norm_layer).model        
+        model_global = [model_global[i] for i in range(len(model_global)-3)] # get rid of final convolution layers        
+        self.model = nn.Sequential(*model_global)                
+
+        ###### local enhancer layers #####
+        for n in range(1, n_local_enhancers+1):
+            ### downsample            
+            ngf_global = ngf * (2**(n_local_enhancers-n))
+            model_downsample = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf_global, kernel_size=7, padding=0), 
+                                norm_layer(ngf_global), nn.ReLU(True),
+                                nn.Conv2d(ngf_global, ngf_global * 2, kernel_size=3, stride=2, padding=1), 
+                                norm_layer(ngf_global * 2), nn.ReLU(True)]
+            ### residual blocks
+            model_upsample = []
+            for i in range(n_blocks_local):
+                model_upsample += [ResnetBlock(ngf_global * 2, padding_type=padding_type, norm_layer=norm_layer)]
+
+            ### upsample
+            model_upsample += [nn.ConvTranspose2d(ngf_global * 2, ngf_global, kernel_size=3, stride=2, padding=1, output_padding=1), 
+                               norm_layer(ngf_global), nn.ReLU(True)]      
+
+            ### final convolution
+            if n == n_local_enhancers:                
+                model_upsample += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]                       
+            
+            setattr(self, 'model'+str(n)+'_1', nn.Sequential(*model_downsample))
+            setattr(self, 'model'+str(n)+'_2', nn.Sequential(*model_upsample))                  
+        
+        self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
+
+    def forward(self, input): 
+        ### create input pyramid
+        input_downsampled = [input]
+        for i in range(self.n_local_enhancers):
+            input_downsampled.append(self.downsample(input_downsampled[-1]))
+
+        ### output at coarest level
+        output_prev = self.model(input_downsampled[-1])        
+        ### build up one layer at a time
+        for n_local_enhancers in range(1, self.n_local_enhancers+1):
+            model_downsample = getattr(self, 'model'+str(n_local_enhancers)+'_1')
+            model_upsample = getattr(self, 'model'+str(n_local_enhancers)+'_2')            
+            input_i = input_downsampled[self.n_local_enhancers-n_local_enhancers]            
+            output_prev = model_upsample(model_downsample(input_i) + output_prev)
+        return output_prev
+
+class GlobalGenerator(nn.Module):
+    def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=nn.BatchNorm2d, 
+                 padding_type='reflect'):
+        assert(n_blocks >= 0)
+        super(GlobalGenerator, self).__init__()        
+        activation = nn.ReLU(True)        
+
+        model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation]
+        ### downsample
+        for i in range(n_downsampling):
+            mult = 2**i
+            model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
+                      norm_layer(ngf * mult * 2), activation]
+
+        ### resnet blocks
+        mult = 2**n_downsampling
+        for i in range(n_blocks):
+            model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer)]
+        
+        ### upsample         
+        for i in range(n_downsampling):
+            mult = 2**(n_downsampling - i)
+            model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),
+                       norm_layer(int(ngf * mult / 2)), activation]
+        model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]        
+        self.model = nn.Sequential(*model)
+            
+    def forward(self, input):
+        return self.model(input)             
+        
+# Define a resnet block
+class ResnetBlock(nn.Module):
+    def __init__(self, dim, padding_type, norm_layer, activation=nn.ReLU(True), use_dropout=False):
+        super(ResnetBlock, self).__init__()
+        self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout)
+
+    def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout):
+        conv_block = []
+        p = 0
+        if padding_type == 'reflect':
+            conv_block += [nn.ReflectionPad2d(1)]
+        elif padding_type == 'replicate':
+            conv_block += [nn.ReplicationPad2d(1)]
+        elif padding_type == 'zero':
+            p = 1
+        else:
+            raise NotImplementedError('padding [%s] is not implemented' % padding_type)
+
+        conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
+                       norm_layer(dim),
+                       activation]
+        if use_dropout:
+            conv_block += [nn.Dropout(0.5)]
+
+        p = 0
+        if padding_type == 'reflect':
+            conv_block += [nn.ReflectionPad2d(1)]
+        elif padding_type == 'replicate':
+            conv_block += [nn.ReplicationPad2d(1)]
+        elif padding_type == 'zero':
+            p = 1
+        else:
+            raise NotImplementedError('padding [%s] is not implemented' % padding_type)
+        conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p),
+                       norm_layer(dim)]
+
+        return nn.Sequential(*conv_block)
+
+    def forward(self, x):
+        out = x + self.conv_block(x)
+        return out
+
+class InstanceNorm(nn.Module):
+    def __init__(self, epsilon=1e-8):
+        """
+            @notice: avoid in-place ops.
+            https://discuss.pytorch.org/t/encounter-the-runtimeerror-one-of-the-variables-needed-for-gradient-computation-has-been-modified-by-an-inplace-operation/836/3
+        """
+        super(InstanceNorm, self).__init__()
+        self.epsilon = epsilon
+
+    def forward(self, x):
+        x   = x - torch.mean(x, (2, 3), True)
+        tmp = torch.mul(x, x) # or x ** 2
+        tmp = torch.rsqrt(torch.mean(tmp, (2, 3), True) + self.epsilon)
+        return x * tmp
+
+class SpecificNorm(nn.Module):
+    def __init__(self, epsilon=1e-8):
+        """
+            @notice: avoid in-place ops.
+            https://discuss.pytorch.org/t/encounter-the-runtimeerror-one-of-the-variables-needed-for-gradient-computation-has-been-modified-by-an-inplace-operation/836/3
+        """
+        super(SpecificNorm, self).__init__()
+        self.mean = np.array([0.485, 0.456, 0.406])
+        self.mean = torch.from_numpy(self.mean).float().cuda()
+        self.mean = self.mean.view([1, 3, 1, 1])
+
+        self.std = np.array([0.229, 0.224, 0.225])
+        self.std = torch.from_numpy(self.std).float().cuda()
+        self.std = self.std.view([1, 3, 1, 1])
+
+    def forward(self, x):
+        mean = self.mean.expand([1, 3, x.shape[2], x.shape[3]])
+        std = self.std.expand([1, 3, x.shape[2], x.shape[3]])
+
+        x = (x - mean) / std
+
+        return x
+
+class ApplyStyle(nn.Module):
+    """
+        @ref: https://github.com/lernapparat/lernapparat/blob/master/style_gan/pytorch_style_gan.ipynb
+    """
+    def __init__(self, latent_size, channels):
+        super(ApplyStyle, self).__init__()
+        self.linear = nn.Linear(latent_size, channels * 2)
+
+    def forward(self, x, latent):
+        style = self.linear(latent)  # style => [batch_size, n_channels*2]
+        shape = [-1, 2, x.size(1), 1, 1]
+        style = style.view(shape)    # [batch_size, 2, n_channels, ...]
+        x = x * (style[:, 0] + 1.) + style[:, 1]
+        return x
+
+class ResnetBlock_Adain(nn.Module):
+    def __init__(self, dim, latent_size, padding_type, activation=nn.ReLU(True)):
+        super(ResnetBlock_Adain, self).__init__()
+
+        p = 0
+        conv1 = []
+        if padding_type == 'reflect':
+            conv1 += [nn.ReflectionPad2d(1)]
+        elif padding_type == 'replicate':
+            conv1 += [nn.ReplicationPad2d(1)]
+        elif padding_type == 'zero':
+            p = 1
+        else:
+            raise NotImplementedError('padding [%s] is not implemented' % padding_type)
+        conv1 += [nn.Conv2d(dim, dim, kernel_size=3, padding = p), InstanceNorm()]
+        self.conv1 = nn.Sequential(*conv1)
+        self.style1 = ApplyStyle(latent_size, dim)
+        self.act1 = activation
+
+        p = 0
+        conv2 = []
+        if padding_type == 'reflect':
+            conv2 += [nn.ReflectionPad2d(1)]
+        elif padding_type == 'replicate':
+            conv2 += [nn.ReplicationPad2d(1)]
+        elif padding_type == 'zero':
+            p = 1
+        else:
+            raise NotImplementedError('padding [%s] is not implemented' % padding_type)
+        conv2 += [nn.Conv2d(dim, dim, kernel_size=3, padding=p), InstanceNorm()]
+        self.conv2 = nn.Sequential(*conv2)
+        self.style2 = ApplyStyle(latent_size, dim)
+
+
+    def forward(self, x, dlatents_in_slice):
+        y = self.conv1(x)
+        y = self.style1(y, dlatents_in_slice)
+        y = self.act1(y)
+        y = self.conv2(y)
+        y = self.style2(y, dlatents_in_slice)
+        out = x + y
+        return out
+
+class UpBlock_Adain(nn.Module):
+    def __init__(self, dim_in, dim_out, latent_size, padding_type, activation=nn.ReLU(True)):
+        super(UpBlock_Adain, self).__init__()
+
+        p = 0
+        conv1 = [nn.Upsample(scale_factor=2, mode='bilinear')]
+        if padding_type == 'reflect':
+            conv1 += [nn.ReflectionPad2d(1)]
+        elif padding_type == 'replicate':
+            conv1 += [nn.ReplicationPad2d(1)]
+        elif padding_type == 'zero':
+            p = 1
+        else:
+            raise NotImplementedError('padding [%s] is not implemented' % padding_type)
+        conv1 += [nn.Conv2d(dim_in, dim_out, kernel_size=3, padding = p), InstanceNorm()]
+        self.conv1 = nn.Sequential(*conv1)
+        self.style1 = ApplyStyle(latent_size, dim_out)
+        self.act1 = activation
+
+
+    def forward(self, x, dlatents_in_slice):
+        y = self.conv1(x)
+        y = self.style1(y, dlatents_in_slice)
+        y = self.act1(y)
+        return y
+
+class Encoder(nn.Module):
+    def __init__(self, input_nc, output_nc, ngf=32, n_downsampling=4, norm_layer=nn.BatchNorm2d):
+        super(Encoder, self).__init__()        
+        self.output_nc = output_nc        
+
+        model = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), 
+                 norm_layer(ngf), nn.ReLU(True)]             
+        ### downsample
+        for i in range(n_downsampling):
+            mult = 2**i
+            model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
+                      norm_layer(ngf * mult * 2), nn.ReLU(True)]
+
+        ### upsample         
+        for i in range(n_downsampling):
+            mult = 2**(n_downsampling - i)
+            model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),
+                       norm_layer(int(ngf * mult / 2)), nn.ReLU(True)]        
+
+        model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
+        self.model = nn.Sequential(*model) 
+
+    def forward(self, input, inst):
+        outputs = self.model(input)
+
+        # instance-wise average pooling
+        outputs_mean = outputs.clone()
+        inst_list = np.unique(inst.cpu().numpy().astype(int))        
+        for i in inst_list:
+            for b in range(input.size()[0]):
+                indices = (inst[b:b+1] == int(i)).nonzero() # n x 4            
+                for j in range(self.output_nc):
+                    output_ins = outputs[indices[:,0] + b, indices[:,1] + j, indices[:,2], indices[:,3]]                    
+                    mean_feat = torch.mean(output_ins).expand_as(output_ins)                                        
+                    outputs_mean[indices[:,0] + b, indices[:,1] + j, indices[:,2], indices[:,3]] = mean_feat                       
+        return outputs_mean
+
+
+class Generator_Adain(nn.Module):
+    def __init__(self, input_nc, output_nc, latent_size, ngf=64, n_downsampling=2, n_blocks=4, norm_layer=nn.BatchNorm2d,
+                 padding_type='reflect'):
+        assert (n_blocks >= 0)
+        super(Generator_Adain, self).__init__()
+        activation = nn.ReLU(True)
+
+        Enc = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation]
+        ### downsample
+        for i in range(n_downsampling):
+            mult = 2 ** i
+            Enc += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
+                      norm_layer(ngf * mult * 2), activation]
+        self.Encoder = nn.Sequential(*Enc)
+
+        ### resnet blocks
+        BN = []
+        mult = 2 ** n_downsampling
+        for i in range(n_blocks):
+            BN += [ResnetBlock_Adain(ngf*mult, latent_size=latent_size, padding_type=padding_type, activation=activation)]
+        self.BottleNeck = nn.Sequential(*BN)
+        '''self.ResBlockAdain1 = ResnetBlock_Adain(ngf * mult, latent_size=latent_size, padding_type=padding_type,
+                                                activation=activation)
+        self.ResBlockAdain2 = ResnetBlock_Adain(ngf * mult, latent_size=latent_size, padding_type=padding_type,
+                                                activation=activation)
+        self.ResBlockAdain3 = ResnetBlock_Adain(ngf * mult, latent_size=latent_size, padding_type=padding_type,
+                                                activation=activation)
+        self.ResBlockAdain4 = ResnetBlock_Adain(ngf * mult, latent_size=latent_size, padding_type=padding_type,
+                                                activation=activation)'''
+
+        ### upsample
+        Dec = []
+        for i in range(n_downsampling):
+            mult = 2 ** (n_downsampling - i)
+            Dec += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1,
+                                         output_padding=1),
+                      norm_layer(int(ngf * mult / 2)), activation]
+        Dec += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
+
+        self.Decoder = nn.Sequential(*Dec)
+        #self.model = nn.Sequential(*model)
+        self.spNorm = SpecificNorm()
+
+    def forward(self, input, dlatents):
+        x = input
+        x = self.Encoder(x)
+
+
+        for i in range(len(self.BottleNeck)):
+            x = self.BottleNeck[i](x, dlatents)
+        '''x = self.ResBlockAdain1(x, dlatents)
+        x = self.ResBlockAdain2(x, dlatents)
+        x = self.ResBlockAdain3(x, dlatents)
+        x = self.ResBlockAdain4(x, dlatents)'''
+
+        x = self.Decoder(x)
+
+        x = (x + 1) / 2
+        x = self.spNorm(x)
+
+        return x
+
+class Generator_Adain_Mask(nn.Module):
+    def __init__(self, input_nc, output_nc, latent_size, ngf=64, n_downsampling=2, n_blocks=4, norm_layer=nn.BatchNorm2d,
+                 padding_type='reflect'):
+        assert (n_blocks >= 0)
+        super(Generator_Adain_Mask, self).__init__()
+        activation = nn.ReLU(True)
+
+        Enc = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation]
+        ### downsample
+        for i in range(n_downsampling):
+            mult = 2 ** i
+            Enc += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
+                      norm_layer(ngf * mult * 2), activation]
+        self.Encoder = nn.Sequential(*Enc)
+
+        ### resnet blocks
+        BN = []
+        mult = 2 ** n_downsampling
+        for i in range(n_blocks):
+            BN += [ResnetBlock_Adain(ngf*mult, latent_size=latent_size, padding_type=padding_type, activation=activation)]
+        self.BottleNeck = nn.Sequential(*BN)
+
+        ### upsample
+        Dec = []
+        for i in range(n_downsampling):
+            mult = 2 ** (n_downsampling - i)
+            Dec += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1,
+                                         output_padding=1),
+                      norm_layer(int(ngf * mult / 2)), activation]
+        Fake_out = [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
+        Mast_out = [nn.ReflectionPad2d(3), nn.Conv2d(ngf, 1, kernel_size=7, padding=0), nn.Sigmoid()]
+
+        self.Decoder = nn.Sequential(*Dec)
+        #self.model = nn.Sequential(*model)
+        self.spNorm = SpecificNorm()
+        self.Fake_out = nn.Sequential(*Fake_out)
+        self.Mask_out = nn.Sequential(*Mast_out)
+
+    def forward(self, input, dlatents):
+        x = input
+        x = self.Encoder(x)
+
+
+        for i in range(len(self.BottleNeck)):
+            x = self.BottleNeck[i](x, dlatents)
+
+        x = self.Decoder(x)
+
+        fake_out = self.Fake_out(x)
+        mask_out = self.Mask_out(x)
+
+        fake_out = (fake_out + 1) / 2
+        fake_out = self.spNorm(fake_out)
+
+        generated = fake_out * mask_out + input * (1-mask_out)
+
+        return generated, mask_out
+
+class Generator_Adain_Upsample(nn.Module):
+    def __init__(self, input_nc, output_nc, latent_size, ngf=64, n_downsampling=2, n_blocks=4, norm_layer=nn.BatchNorm2d,
+                 padding_type='reflect'):
+        assert (n_blocks >= 0)
+        super(Generator_Adain_Upsample, self).__init__()
+        activation = nn.ReLU(True)
+
+        Enc = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation]
+        ### downsample
+        for i in range(n_downsampling):
+            mult = 2 ** i
+            Enc += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
+                      norm_layer(ngf * mult * 2), activation]
+        self.Encoder = nn.Sequential(*Enc)
+
+        ### resnet blocks
+        BN = []
+        mult = 2 ** n_downsampling
+        for i in range(n_blocks):
+            BN += [ResnetBlock_Adain(ngf*mult, latent_size=latent_size, padding_type=padding_type, activation=activation)]
+        self.BottleNeck = nn.Sequential(*BN)
+
+        ### upsample
+        Dec = []
+        for i in range(n_downsampling):
+            mult = 2 ** (n_downsampling - i)
+            '''Dec += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1,
+                                         output_padding=1),
+                      norm_layer(int(ngf * mult / 2)), activation]'''
+            Dec += [nn.Upsample(scale_factor=2, mode='bilinear'),
+                    nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=1),
+                    norm_layer(int(ngf * mult / 2)), activation]
+        Dec += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
+
+        self.Decoder = nn.Sequential(*Dec)
+        self.spNorm = SpecificNorm()
+
+    def forward(self, input, dlatents):
+        x = input
+        x = self.Encoder(x)
+
+
+        for i in range(len(self.BottleNeck)):
+            x = self.BottleNeck[i](x, dlatents)
+
+        x = self.Decoder(x)
+
+        x = (x + 1) / 2
+        x = self.spNorm(x)
+
+        return x
+
+class Generator_Adain_2(nn.Module):
+    def __init__(self, input_nc, output_nc, latent_size, ngf=64, n_downsampling=2, n_blocks=4, norm_layer=nn.BatchNorm2d,
+                 padding_type='reflect'):
+        assert (n_blocks >= 0)
+        super(Generator_Adain_2, self).__init__()
+        activation = nn.ReLU(True)
+
+        Enc = [nn.ReflectionPad2d(3), nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation]
+        ### downsample
+        for i in range(n_downsampling):
+            mult = 2 ** i
+            Enc += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
+                      norm_layer(ngf * mult * 2), activation]
+        self.Encoder = nn.Sequential(*Enc)
+
+        ### resnet blocks
+        BN = []
+        mult = 2 ** n_downsampling
+        for i in range(n_blocks):
+            BN += [ResnetBlock_Adain(ngf*mult, latent_size=latent_size, padding_type=padding_type, activation=activation)]
+        self.BottleNeck = nn.Sequential(*BN)
+
+        ### upsample
+        Dec = []
+        for i in range(n_downsampling):
+            mult = 2 ** (n_downsampling - i)
+            Dec += [UpBlock_Adain(dim_in=ngf * mult, dim_out=int(ngf * mult / 2), latent_size=latent_size, padding_type=padding_type)]
+        layer_out = [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]
+
+        self.Decoder = nn.Sequential(*Dec)
+        #self.model = nn.Sequential(*model)
+        self.spNorm = SpecificNorm()
+        self.layer_out = nn.Sequential(*layer_out)
+
+    def forward(self, input, dlatents):
+        x = input
+        x = self.Encoder(x)
+
+
+        for i in range(len(self.BottleNeck)):
+            x = self.BottleNeck[i](x, dlatents)
+
+        for i in range(len(self.Decoder)):
+            x = self.Decoder[i](x, dlatents)
+
+        x = self.layer_out(x)
+
+        x = (x + 1) / 2
+        x = self.spNorm(x)
+
+        return x
+
+class MultiscaleDiscriminator(nn.Module):
+    def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, 
+                 use_sigmoid=False, num_D=3, getIntermFeat=False):
+        super(MultiscaleDiscriminator, self).__init__()
+        self.num_D = num_D
+        self.n_layers = n_layers
+        self.getIntermFeat = getIntermFeat
+     
+        for i in range(num_D):
+            netD = NLayerDiscriminator(input_nc, ndf, n_layers, norm_layer, use_sigmoid, getIntermFeat)
+            if getIntermFeat:                                
+                for j in range(n_layers+2):
+                    setattr(self, 'scale'+str(i)+'_layer'+str(j), getattr(netD, 'model'+str(j)))                                   
+            else:
+                setattr(self, 'layer'+str(i), netD.model)
+
+        self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
+
+    def singleD_forward(self, model, input):
+        if self.getIntermFeat:
+            result = [input]
+            for i in range(len(model)):
+                result.append(model[i](result[-1]))
+            return result[1:]
+        else:
+            return [model(input)]
+
+    def forward(self, input):        
+        num_D = self.num_D
+        result = []
+        input_downsampled = input
+        for i in range(num_D):
+            if self.getIntermFeat:
+                model = [getattr(self, 'scale'+str(num_D-1-i)+'_layer'+str(j)) for j in range(self.n_layers+2)]
+            else:
+                model = getattr(self, 'layer'+str(num_D-1-i))
+            result.append(self.singleD_forward(model, input_downsampled))
+            if i != (num_D-1):
+                input_downsampled = self.downsample(input_downsampled)
+        return result
+        
+# Defines the PatchGAN discriminator with the specified arguments.
+class NLayerDiscriminator(nn.Module):
+    def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, getIntermFeat=False):
+        super(NLayerDiscriminator, self).__init__()
+        self.getIntermFeat = getIntermFeat
+        self.n_layers = n_layers
+
+        kw = 4
+        padw = 1
+        sequence = [[nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]]
+
+        nf = ndf
+        for n in range(1, n_layers):
+            nf_prev = nf
+            nf = min(nf * 2, 512)
+            sequence += [[
+                nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=2, padding=padw),
+                norm_layer(nf), nn.LeakyReLU(0.2, True)
+            ]]
+
+        nf_prev = nf
+        nf = min(nf * 2, 512)
+        sequence += [[
+            nn.Conv2d(nf_prev, nf, kernel_size=kw, stride=1, padding=padw),
+            norm_layer(nf),
+            nn.LeakyReLU(0.2, True)
+        ]]
+
+        if use_sigmoid:
+            sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw), nn.Sigmoid()]]
+        else:
+            sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
+
+        if getIntermFeat:
+            for n in range(len(sequence)):
+                setattr(self, 'model'+str(n), nn.Sequential(*sequence[n]))
+        else:
+            sequence_stream = []
+            for n in range(len(sequence)):
+                sequence_stream += sequence[n]
+            self.model = nn.Sequential(*sequence_stream)
+
+    def forward(self, input):
+        if self.getIntermFeat:
+            res = [input]
+            for n in range(self.n_layers+2):
+                model = getattr(self, 'model'+str(n))
+                res.append(model(res[-1]))
+            return res[1:]
+        else:
+            return self.model(input)        
+
+from torchvision import models
+class Vgg19(torch.nn.Module):
+    def __init__(self, requires_grad=False):
+        super(Vgg19, self).__init__()
+        vgg_pretrained_features = models.vgg19(pretrained=True).features
+        self.slice1 = torch.nn.Sequential()
+        self.slice2 = torch.nn.Sequential()
+        self.slice3 = torch.nn.Sequential()
+        self.slice4 = torch.nn.Sequential()
+        self.slice5 = torch.nn.Sequential()
+        for x in range(2):
+            self.slice1.add_module(str(x), vgg_pretrained_features[x])
+        for x in range(2, 7):
+            self.slice2.add_module(str(x), vgg_pretrained_features[x])
+        for x in range(7, 12):
+            self.slice3.add_module(str(x), vgg_pretrained_features[x])
+        for x in range(12, 21):
+            self.slice4.add_module(str(x), vgg_pretrained_features[x])
+        for x in range(21, 30):
+            self.slice5.add_module(str(x), vgg_pretrained_features[x])
+        if not requires_grad:
+            for param in self.parameters():
+                param.requires_grad = False
+
+    def forward(self, X):
+        h_relu1 = self.slice1(X)
+        h_relu2 = self.slice2(h_relu1)        
+        h_relu3 = self.slice3(h_relu2)        
+        h_relu4 = self.slice4(h_relu3)        
+        h_relu5 = self.slice5(h_relu4)                
+        out = [h_relu1, h_relu2, h_relu3, h_relu4, h_relu5]
+        return out

+ 304 - 0
models/pix2pixHD_model.py

@@ -0,0 +1,304 @@
+import numpy as np
+import torch
+import os
+from torch.autograd import Variable
+from util.image_pool import ImagePool
+from .base_model import BaseModel
+from . import networks
+
+class Pix2PixHDModel(BaseModel):
+    def name(self):
+        return 'Pix2PixHDModel'
+    
+    def init_loss_filter(self, use_gan_feat_loss, use_vgg_loss):
+        flags = (True, use_gan_feat_loss, use_vgg_loss, True, True)
+        def loss_filter(g_gan, g_gan_feat, g_vgg, d_real, d_fake):
+            return [l for (l,f) in zip((g_gan,g_gan_feat,g_vgg,d_real,d_fake),flags) if f]
+        return loss_filter
+    
+    def initialize(self, opt):
+        BaseModel.initialize(self, opt)
+        if opt.resize_or_crop != 'none' or not opt.isTrain: # when training at full res this causes OOM
+            torch.backends.cudnn.benchmark = True
+        self.isTrain = opt.isTrain
+        self.use_features = opt.instance_feat or opt.label_feat
+        self.gen_features = self.use_features and not self.opt.load_features
+        input_nc = opt.label_nc if opt.label_nc != 0 else opt.input_nc
+
+        ##### define networks        
+        # Generator network
+        netG_input_nc = input_nc        
+        if not opt.no_instance:
+            netG_input_nc += 1
+        if self.use_features:
+            netG_input_nc += opt.feat_num                  
+        self.netG = networks.define_G(netG_input_nc, opt.output_nc, opt.ngf, opt.netG, 
+                                      opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers, 
+                                      opt.n_blocks_local, opt.norm, gpu_ids=self.gpu_ids)        
+
+        # Discriminator network
+        if self.isTrain:
+            use_sigmoid = opt.no_lsgan
+            netD_input_nc = input_nc + opt.output_nc
+            if not opt.no_instance:
+                netD_input_nc += 1
+            self.netD = networks.define_D(netD_input_nc, opt.ndf, opt.n_layers_D, opt.norm, use_sigmoid, 
+                                          opt.num_D, not opt.no_ganFeat_loss, gpu_ids=self.gpu_ids)
+
+        ### Encoder network
+        if self.gen_features:          
+            self.netE = networks.define_G(opt.output_nc, opt.feat_num, opt.nef, 'encoder', 
+                                          opt.n_downsample_E, norm=opt.norm, gpu_ids=self.gpu_ids)  
+        if self.opt.verbose:
+                print('---------- Networks initialized -------------')
+
+        # load networks
+        if not self.isTrain or opt.continue_train or opt.load_pretrain:
+            pretrained_path = '' if not self.isTrain else opt.load_pretrain
+            self.load_network(self.netG, 'G', opt.which_epoch, pretrained_path)            
+            if self.isTrain:
+                self.load_network(self.netD, 'D', opt.which_epoch, pretrained_path)  
+            if self.gen_features:
+                self.load_network(self.netE, 'E', opt.which_epoch, pretrained_path)              
+
+        # set loss functions and optimizers
+        if self.isTrain:
+            if opt.pool_size > 0 and (len(self.gpu_ids)) > 1:
+                raise NotImplementedError("Fake Pool Not Implemented for MultiGPU")
+            self.fake_pool = ImagePool(opt.pool_size)
+            self.old_lr = opt.lr
+
+            # define loss functions
+            self.loss_filter = self.init_loss_filter(not opt.no_ganFeat_loss, not opt.no_vgg_loss)
+            
+            self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan, tensor=self.Tensor)   
+            self.criterionFeat = torch.nn.L1Loss()
+            if not opt.no_vgg_loss:             
+                self.criterionVGG = networks.VGGLoss(self.gpu_ids)
+                
+        
+            # Names so we can breakout loss
+            self.loss_names = self.loss_filter('G_GAN','G_GAN_Feat','G_VGG','D_real', 'D_fake')
+
+            # initialize optimizers
+            # optimizer G
+            if opt.niter_fix_global > 0:                
+                import sys
+                if sys.version_info >= (3,0):
+                    finetune_list = set()
+                else:
+                    from sets import Set
+                    finetune_list = Set()
+
+                params_dict = dict(self.netG.named_parameters())
+                params = []
+                for key, value in params_dict.items():       
+                    if key.startswith('model' + str(opt.n_local_enhancers)):                    
+                        params += [value]
+                        finetune_list.add(key.split('.')[0])  
+                print('------------- Only training the local enhancer network (for %d epochs) ------------' % opt.niter_fix_global)
+                print('The layers that are finetuned are ', sorted(finetune_list))                         
+            else:
+                params = list(self.netG.parameters())
+            if self.gen_features:              
+                params += list(self.netE.parameters())         
+            self.optimizer_G = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999))                            
+
+            # optimizer D                        
+            params = list(self.netD.parameters())    
+            self.optimizer_D = torch.optim.Adam(params, lr=opt.lr, betas=(opt.beta1, 0.999))
+
+    def encode_input(self, label_map, inst_map=None, real_image=None, feat_map=None, infer=False):             
+        if self.opt.label_nc == 0:
+            input_label = label_map.data.cuda()
+        else:
+            # create one-hot vector for label map 
+            size = label_map.size()
+            oneHot_size = (size[0], self.opt.label_nc, size[2], size[3])
+            input_label = torch.cuda.FloatTensor(torch.Size(oneHot_size)).zero_()
+            input_label = input_label.scatter_(1, label_map.data.long().cuda(), 1.0)
+            if self.opt.data_type == 16:
+                input_label = input_label.half()
+
+        # get edges from instance map
+        if not self.opt.no_instance:
+            inst_map = inst_map.data.cuda()
+            edge_map = self.get_edges(inst_map)
+            input_label = torch.cat((input_label, edge_map), dim=1)         
+        input_label = Variable(input_label, volatile=infer)
+
+        # real images for training
+        if real_image is not None:
+            real_image = Variable(real_image.data.cuda())
+
+        # instance map for feature encoding
+        if self.use_features:
+            # get precomputed feature maps
+            if self.opt.load_features:
+                feat_map = Variable(feat_map.data.cuda())
+            if self.opt.label_feat:
+                inst_map = label_map.cuda()
+
+        return input_label, inst_map, real_image, feat_map
+
+    def discriminate(self, input_label, test_image, use_pool=False):
+        input_concat = torch.cat((input_label, test_image.detach()), dim=1)
+        if use_pool:            
+            fake_query = self.fake_pool.query(input_concat)
+            return self.netD.forward(fake_query)
+        else:
+            return self.netD.forward(input_concat)
+
+    def forward(self, label, inst, image, feat, infer=False):
+        # Encode Inputs
+        input_label, inst_map, real_image, feat_map = self.encode_input(label, inst, image, feat)  
+
+        # Fake Generation
+        if self.use_features:
+            if not self.opt.load_features:
+                feat_map = self.netE.forward(real_image, inst_map)                     
+            input_concat = torch.cat((input_label, feat_map), dim=1)                        
+        else:
+            input_concat = input_label
+        fake_image = self.netG.forward(input_concat)
+
+        # Fake Detection and Loss
+        pred_fake_pool = self.discriminate(input_label, fake_image, use_pool=True)
+        loss_D_fake = self.criterionGAN(pred_fake_pool, False)        
+
+        # Real Detection and Loss        
+        pred_real = self.discriminate(input_label, real_image)
+        loss_D_real = self.criterionGAN(pred_real, True)
+
+        # GAN loss (Fake Passability Loss)        
+        pred_fake = self.netD.forward(torch.cat((input_label, fake_image), dim=1))        
+        loss_G_GAN = self.criterionGAN(pred_fake, True)               
+        
+        # GAN feature matching loss
+        loss_G_GAN_Feat = 0
+        if not self.opt.no_ganFeat_loss:
+            feat_weights = 4.0 / (self.opt.n_layers_D + 1)
+            D_weights = 1.0 / self.opt.num_D
+            for i in range(self.opt.num_D):
+                for j in range(len(pred_fake[i])-1):
+                    loss_G_GAN_Feat += D_weights * feat_weights * \
+                        self.criterionFeat(pred_fake[i][j], pred_real[i][j].detach()) * self.opt.lambda_feat
+                   
+        # VGG feature matching loss
+        loss_G_VGG = 0
+        if not self.opt.no_vgg_loss:
+            loss_G_VGG = self.criterionVGG(fake_image, real_image) * self.opt.lambda_feat
+        
+        # Only return the fake_B image if necessary to save BW
+        return [ self.loss_filter( loss_G_GAN, loss_G_GAN_Feat, loss_G_VGG, loss_D_real, loss_D_fake ), None if not infer else fake_image ]
+
+    def inference(self, label, inst, image=None):
+        # Encode Inputs        
+        image = Variable(image) if image is not None else None
+        input_label, inst_map, real_image, _ = self.encode_input(Variable(label), Variable(inst), image, infer=True)
+
+        # Fake Generation
+        if self.use_features:
+            if self.opt.use_encoded_image:
+                # encode the real image to get feature map
+                feat_map = self.netE.forward(real_image, inst_map)
+            else:
+                # sample clusters from precomputed features             
+                feat_map = self.sample_features(inst_map)
+            input_concat = torch.cat((input_label, feat_map), dim=1)                        
+        else:
+            input_concat = input_label        
+           
+        if torch.__version__.startswith('0.4'):
+            with torch.no_grad():
+                fake_image = self.netG.forward(input_concat)
+        else:
+            fake_image = self.netG.forward(input_concat)
+        return fake_image
+
+    def sample_features(self, inst): 
+        # read precomputed feature clusters 
+        cluster_path = os.path.join(self.opt.checkpoints_dir, self.opt.name, self.opt.cluster_path)        
+        features_clustered = np.load(cluster_path, encoding='latin1').item()
+
+        # randomly sample from the feature clusters
+        inst_np = inst.cpu().numpy().astype(int)                                      
+        feat_map = self.Tensor(inst.size()[0], self.opt.feat_num, inst.size()[2], inst.size()[3])
+        for i in np.unique(inst_np):    
+            label = i if i < 1000 else i//1000
+            if label in features_clustered:
+                feat = features_clustered[label]
+                cluster_idx = np.random.randint(0, feat.shape[0]) 
+                                            
+                idx = (inst == int(i)).nonzero()
+                for k in range(self.opt.feat_num):                                    
+                    feat_map[idx[:,0], idx[:,1] + k, idx[:,2], idx[:,3]] = feat[cluster_idx, k]
+        if self.opt.data_type==16:
+            feat_map = feat_map.half()
+        return feat_map
+
+    def encode_features(self, image, inst):
+        image = Variable(image.cuda(), volatile=True)
+        feat_num = self.opt.feat_num
+        h, w = inst.size()[2], inst.size()[3]
+        block_num = 32
+        feat_map = self.netE.forward(image, inst.cuda())
+        inst_np = inst.cpu().numpy().astype(int)
+        feature = {}
+        for i in range(self.opt.label_nc):
+            feature[i] = np.zeros((0, feat_num+1))
+        for i in np.unique(inst_np):
+            label = i if i < 1000 else i//1000
+            idx = (inst == int(i)).nonzero()
+            num = idx.size()[0]
+            idx = idx[num//2,:]
+            val = np.zeros((1, feat_num+1))                        
+            for k in range(feat_num):
+                val[0, k] = feat_map[idx[0], idx[1] + k, idx[2], idx[3]].data[0]            
+            val[0, feat_num] = float(num) / (h * w // block_num)
+            feature[label] = np.append(feature[label], val, axis=0)
+        return feature
+
+    def get_edges(self, t):
+        edge = torch.cuda.ByteTensor(t.size()).zero_()
+        edge[:,:,:,1:] = edge[:,:,:,1:] | (t[:,:,:,1:] != t[:,:,:,:-1])
+        edge[:,:,:,:-1] = edge[:,:,:,:-1] | (t[:,:,:,1:] != t[:,:,:,:-1])
+        edge[:,:,1:,:] = edge[:,:,1:,:] | (t[:,:,1:,:] != t[:,:,:-1,:])
+        edge[:,:,:-1,:] = edge[:,:,:-1,:] | (t[:,:,1:,:] != t[:,:,:-1,:])
+        if self.opt.data_type==16:
+            return edge.half()
+        else:
+            return edge.float()
+
+    def save(self, which_epoch):
+        self.save_network(self.netG, 'G', which_epoch, self.gpu_ids)
+        self.save_network(self.netD, 'D', which_epoch, self.gpu_ids)
+        if self.gen_features:
+            self.save_network(self.netE, 'E', which_epoch, self.gpu_ids)
+
+    def update_fixed_params(self):
+        # after fixing the global generator for a number of iterations, also start finetuning it
+        params = list(self.netG.parameters())
+        if self.gen_features:
+            params += list(self.netE.parameters())           
+        self.optimizer_G = torch.optim.Adam(params, lr=self.opt.lr, betas=(self.opt.beta1, 0.999))
+        if self.opt.verbose:
+            print('------------ Now also finetuning global generator -----------')
+
+    def update_learning_rate(self):
+        lrd = self.opt.lr / self.opt.niter_decay
+        lr = self.old_lr - lrd        
+        for param_group in self.optimizer_D.param_groups:
+            param_group['lr'] = lr
+        for param_group in self.optimizer_G.param_groups:
+            param_group['lr'] = lr
+        if self.opt.verbose:
+            print('update learning rate: %f -> %f' % (self.old_lr, lr))
+        self.old_lr = lr
+
+class InferenceModel(Pix2PixHDModel):
+    def forward(self, inp):
+        label, inst = inp
+        return self.inference(label, inst)
+
+        

+ 347 - 0
models/ui_model.py

@@ -0,0 +1,347 @@
+import torch
+from torch.autograd import Variable
+from collections import OrderedDict
+import numpy as np
+import os
+from PIL import Image
+import util.util as util
+from .base_model import BaseModel
+from . import networks
+
+class UIModel(BaseModel):
+    def name(self):
+        return 'UIModel'
+
+    def initialize(self, opt):
+        assert(not opt.isTrain)
+        BaseModel.initialize(self, opt)
+        self.use_features = opt.instance_feat or opt.label_feat
+
+        netG_input_nc = opt.label_nc
+        if not opt.no_instance:
+            netG_input_nc += 1            
+        if self.use_features:   
+            netG_input_nc += opt.feat_num           
+
+        self.netG = networks.define_G(netG_input_nc, opt.output_nc, opt.ngf, opt.netG, 
+                                      opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers, 
+                                      opt.n_blocks_local, opt.norm, gpu_ids=self.gpu_ids)            
+        self.load_network(self.netG, 'G', opt.which_epoch)
+
+        print('---------- Networks initialized -------------')
+
+    def toTensor(self, img, normalize=False):
+        tensor = torch.from_numpy(np.array(img, np.int32, copy=False))
+        tensor = tensor.view(1, img.size[1], img.size[0], len(img.mode))    
+        tensor = tensor.transpose(1, 2).transpose(1, 3).contiguous()
+        if normalize:
+            return (tensor.float()/255.0 - 0.5) / 0.5        
+        return tensor.float()
+
+    def load_image(self, label_path, inst_path, feat_path):
+        opt = self.opt
+        # read label map
+        label_img = Image.open(label_path)    
+        if label_path.find('face') != -1:
+            label_img = label_img.convert('L')
+        ow, oh = label_img.size    
+        w = opt.loadSize
+        h = int(w * oh / ow)    
+        label_img = label_img.resize((w, h), Image.NEAREST)
+        label_map = self.toTensor(label_img)           
+        
+        # onehot vector input for label map
+        self.label_map = label_map.cuda()
+        oneHot_size = (1, opt.label_nc, h, w)
+        input_label = self.Tensor(torch.Size(oneHot_size)).zero_()
+        self.input_label = input_label.scatter_(1, label_map.long().cuda(), 1.0)
+
+        # read instance map
+        if not opt.no_instance:
+            inst_img = Image.open(inst_path)        
+            inst_img = inst_img.resize((w, h), Image.NEAREST)            
+            self.inst_map = self.toTensor(inst_img).cuda()
+            self.edge_map = self.get_edges(self.inst_map)          
+            self.net_input = Variable(torch.cat((self.input_label, self.edge_map), dim=1), volatile=True)
+        else:
+            self.net_input = Variable(self.input_label, volatile=True)  
+        
+        self.features_clustered = np.load(feat_path).item()
+        self.object_map = self.inst_map if opt.instance_feat else self.label_map 
+                       
+        object_np = self.object_map.cpu().numpy().astype(int) 
+        self.feat_map = self.Tensor(1, opt.feat_num, h, w).zero_()                 
+        self.cluster_indices = np.zeros(self.opt.label_nc, np.uint8)
+        for i in np.unique(object_np):    
+            label = i if i < 1000 else i//1000
+            if label in self.features_clustered:
+                feat = self.features_clustered[label]
+                np.random.seed(i+1)
+                cluster_idx = np.random.randint(0, feat.shape[0])
+                self.cluster_indices[label] = cluster_idx
+                idx = (self.object_map == i).nonzero()                    
+                self.set_features(idx, feat, cluster_idx)
+
+        self.net_input_original = self.net_input.clone()        
+        self.label_map_original = self.label_map.clone()
+        self.feat_map_original = self.feat_map.clone()
+        if not opt.no_instance:
+            self.inst_map_original = self.inst_map.clone()        
+
+    def reset(self):
+        self.net_input = self.net_input_prev = self.net_input_original.clone()        
+        self.label_map = self.label_map_prev = self.label_map_original.clone()
+        self.feat_map = self.feat_map_prev = self.feat_map_original.clone()
+        if not self.opt.no_instance:
+            self.inst_map = self.inst_map_prev = self.inst_map_original.clone()
+        self.object_map = self.inst_map if self.opt.instance_feat else self.label_map 
+
+    def undo(self):        
+        self.net_input = self.net_input_prev
+        self.label_map = self.label_map_prev
+        self.feat_map = self.feat_map_prev
+        if not self.opt.no_instance:
+            self.inst_map = self.inst_map_prev
+        self.object_map = self.inst_map if self.opt.instance_feat else self.label_map 
+            
+    # get boundary map from instance map
+    def get_edges(self, t):
+        edge = torch.cuda.ByteTensor(t.size()).zero_()
+        edge[:,:,:,1:] = edge[:,:,:,1:] | (t[:,:,:,1:] != t[:,:,:,:-1])
+        edge[:,:,:,:-1] = edge[:,:,:,:-1] | (t[:,:,:,1:] != t[:,:,:,:-1])
+        edge[:,:,1:,:] = edge[:,:,1:,:] | (t[:,:,1:,:] != t[:,:,:-1,:])
+        edge[:,:,:-1,:] = edge[:,:,:-1,:] | (t[:,:,1:,:] != t[:,:,:-1,:])
+        return edge.float()
+
+    # change the label at the source position to the label at the target position
+    def change_labels(self, click_src, click_tgt): 
+        y_src, x_src = click_src[0], click_src[1]
+        y_tgt, x_tgt = click_tgt[0], click_tgt[1]
+        label_src = int(self.label_map[0, 0, y_src, x_src])
+        inst_src = self.inst_map[0, 0, y_src, x_src]
+        label_tgt = int(self.label_map[0, 0, y_tgt, x_tgt])
+        inst_tgt = self.inst_map[0, 0, y_tgt, x_tgt]
+
+        idx_src = (self.inst_map == inst_src).nonzero()         
+        # need to change 3 things: label map, instance map, and feature map
+        if idx_src.shape:
+            # backup current maps
+            self.backup_current_state() 
+
+            # change both the label map and the network input
+            self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
+            self.net_input[idx_src[:,0], idx_src[:,1] + label_src, idx_src[:,2], idx_src[:,3]] = 0
+            self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1                                    
+            
+            # update the instance map (and the network input)
+            if inst_tgt > 1000:
+                # if different instances have different ids, give the new object a new id
+                tgt_indices = (self.inst_map > label_tgt * 1000) & (self.inst_map < (label_tgt+1) * 1000)
+                inst_tgt = self.inst_map[tgt_indices].max() + 1
+            self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = inst_tgt
+            self.net_input[:,-1,:,:] = self.get_edges(self.inst_map)
+
+            # also copy the source features to the target position      
+            idx_tgt = (self.inst_map == inst_tgt).nonzero()    
+            if idx_tgt.shape:
+                self.copy_features(idx_src, idx_tgt[0,:])
+
+        self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
+
+    # add strokes of target label in the image
+    def add_strokes(self, click_src, label_tgt, bw, save):
+        # get the region of the new strokes (bw is the brush width)        
+        size = self.net_input.size()
+        h, w = size[2], size[3]
+        idx_src = torch.LongTensor(bw**2, 4).fill_(0)
+        for i in range(bw):
+            idx_src[i*bw:(i+1)*bw, 2] = min(h-1, max(0, click_src[0]-bw//2 + i))
+            for j in range(bw):
+                idx_src[i*bw+j, 3] = min(w-1, max(0, click_src[1]-bw//2 + j))
+        idx_src = idx_src.cuda()
+        
+        # again, need to update 3 things
+        if idx_src.shape:
+            # backup current maps
+            if save:
+                self.backup_current_state()
+
+            # update the label map (and the network input) in the stroke region            
+            self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
+            for k in range(self.opt.label_nc):
+                self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0
+            self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1                 
+
+            # update the instance map (and the network input)
+            self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
+            self.net_input[:,-1,:,:] = self.get_edges(self.inst_map)
+            
+            # also update the features if available
+            if self.opt.instance_feat:                                            
+                feat = self.features_clustered[label_tgt]
+                #np.random.seed(label_tgt+1)   
+                #cluster_idx = np.random.randint(0, feat.shape[0])
+                cluster_idx = self.cluster_indices[label_tgt]
+                self.set_features(idx_src, feat, cluster_idx)                                                  
+        
+        self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
+
+    # add an object to the clicked position with selected style
+    def add_objects(self, click_src, label_tgt, mask, style_id=0):
+        y, x = click_src[0], click_src[1]
+        mask = np.transpose(mask, (2, 0, 1))[np.newaxis,...]        
+        idx_src = torch.from_numpy(mask).cuda().nonzero()        
+        idx_src[:,2] += y
+        idx_src[:,3] += x
+
+        # backup current maps
+        self.backup_current_state()
+
+        # update label map
+        self.label_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt        
+        for k in range(self.opt.label_nc):
+            self.net_input[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = 0
+        self.net_input[idx_src[:,0], idx_src[:,1] + label_tgt, idx_src[:,2], idx_src[:,3]] = 1            
+
+        # update instance map
+        self.inst_map[idx_src[:,0], idx_src[:,1], idx_src[:,2], idx_src[:,3]] = label_tgt
+        self.net_input[:,-1,:,:] = self.get_edges(self.inst_map)
+                
+        # update feature map
+        self.set_features(idx_src, self.feat, style_id)                
+        
+        self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
+
+    def single_forward(self, net_input, feat_map):
+        net_input = torch.cat((net_input, feat_map), dim=1)
+        fake_image = self.netG.forward(net_input)
+
+        if fake_image.size()[0] == 1:
+            return fake_image.data[0]        
+        return fake_image.data
+
+
+    # generate all outputs for different styles
+    def style_forward(self, click_pt, style_id=-1):           
+        if click_pt is None:            
+            self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))
+            self.crop = None
+            self.mask = None        
+        else:                       
+            instToChange = int(self.object_map[0, 0, click_pt[0], click_pt[1]])
+            self.instToChange = instToChange
+            label = instToChange if instToChange < 1000 else instToChange//1000        
+            self.feat = self.features_clustered[label]
+            self.fake_image = []
+            self.mask = self.object_map == instToChange
+            idx = self.mask.nonzero()
+            self.get_crop_region(idx)            
+            if idx.size():                
+                if style_id == -1:
+                    (min_y, min_x, max_y, max_x) = self.crop
+                    ### original
+                    for cluster_idx in range(self.opt.multiple_output):
+                        self.set_features(idx, self.feat, cluster_idx)
+                        fake_image = self.single_forward(self.net_input, self.feat_map)
+                        fake_image = util.tensor2im(fake_image[:,min_y:max_y,min_x:max_x])
+                        self.fake_image.append(fake_image)    
+                    """### To speed up previewing different style results, either crop or downsample the label maps
+                    if instToChange > 1000:
+                        (min_y, min_x, max_y, max_x) = self.crop                                                
+                        ### crop                                                
+                        _, _, h, w = self.net_input.size()
+                        offset = 512
+                        y_start, x_start = max(0, min_y-offset), max(0, min_x-offset)
+                        y_end, x_end = min(h, (max_y + offset)), min(w, (max_x + offset))
+                        y_region = slice(y_start, y_start+(y_end-y_start)//16*16)
+                        x_region = slice(x_start, x_start+(x_end-x_start)//16*16)
+                        net_input = self.net_input[:,:,y_region,x_region]                    
+                        for cluster_idx in range(self.opt.multiple_output):  
+                            self.set_features(idx, self.feat, cluster_idx)
+                            fake_image = self.single_forward(net_input, self.feat_map[:,:,y_region,x_region])                            
+                            fake_image = util.tensor2im(fake_image[:,min_y-y_start:max_y-y_start,min_x-x_start:max_x-x_start])
+                            self.fake_image.append(fake_image)
+                    else:
+                        ### downsample
+                        (min_y, min_x, max_y, max_x) = [crop//2 for crop in self.crop]                    
+                        net_input = self.net_input[:,:,::2,::2]                    
+                        size = net_input.size()
+                        net_input_batch = net_input.expand(self.opt.multiple_output, size[1], size[2], size[3])             
+                        for cluster_idx in range(self.opt.multiple_output):  
+                            self.set_features(idx, self.feat, cluster_idx)
+                            feat_map = self.feat_map[:,:,::2,::2]
+                            if cluster_idx == 0:
+                                feat_map_batch = feat_map
+                            else:
+                                feat_map_batch = torch.cat((feat_map_batch, feat_map), dim=0)
+                        fake_image_batch = self.single_forward(net_input_batch, feat_map_batch)
+                        for i in range(self.opt.multiple_output):
+                            self.fake_image.append(util.tensor2im(fake_image_batch[i,:,min_y:max_y,min_x:max_x]))"""
+                                        
+                else:
+                    self.set_features(idx, self.feat, style_id)
+                    self.cluster_indices[label] = style_id
+                    self.fake_image = util.tensor2im(self.single_forward(self.net_input, self.feat_map))        
+
+    def backup_current_state(self):
+        self.net_input_prev = self.net_input.clone()
+        self.label_map_prev = self.label_map.clone() 
+        self.inst_map_prev = self.inst_map.clone() 
+        self.feat_map_prev = self.feat_map.clone() 
+
+    # crop the ROI and get the mask of the object
+    def get_crop_region(self, idx):
+        size = self.net_input.size()
+        h, w = size[2], size[3]
+        min_y, min_x = idx[:,2].min(), idx[:,3].min()
+        max_y, max_x = idx[:,2].max(), idx[:,3].max()             
+        crop_min = 128
+        if max_y - min_y < crop_min:
+            min_y = max(0, (max_y + min_y) // 2 - crop_min // 2)
+            max_y = min(h-1, min_y + crop_min)
+        if max_x - min_x < crop_min:
+            min_x = max(0, (max_x + min_x) // 2 - crop_min // 2)
+            max_x = min(w-1, min_x + crop_min)
+        self.crop = (min_y, min_x, max_y, max_x)           
+        self.mask = self.mask[:,:, min_y:max_y, min_x:max_x]
+
+    # update the feature map once a new object is added or the label is changed
+    def update_features(self, cluster_idx, mask=None, click_pt=None):        
+        self.feat_map_prev = self.feat_map.clone()
+        # adding a new object
+        if mask is not None:
+            y, x = click_pt[0], click_pt[1]
+            mask = np.transpose(mask, (2,0,1))[np.newaxis,...]        
+            idx = torch.from_numpy(mask).cuda().nonzero()        
+            idx[:,2] += y
+            idx[:,3] += x    
+        # changing the label of an existing object 
+        else:            
+            idx = (self.object_map == self.instToChange).nonzero()              
+
+        # update feature map
+        self.set_features(idx, self.feat, cluster_idx)        
+
+    # set the class features to the target feature
+    def set_features(self, idx, feat, cluster_idx):        
+        for k in range(self.opt.feat_num):
+            self.feat_map[idx[:,0], idx[:,1] + k, idx[:,2], idx[:,3]] = feat[cluster_idx, k] 
+
+    # copy the features at the target position to the source position
+    def copy_features(self, idx_src, idx_tgt):        
+        for k in range(self.opt.feat_num):
+            val = self.feat_map[idx_tgt[0], idx_tgt[1] + k, idx_tgt[2], idx_tgt[3]]
+            self.feat_map[idx_src[:,0], idx_src[:,1] + k, idx_src[:,2], idx_src[:,3]] = val 
+
+    def get_current_visuals(self, getLabel=False):                              
+        mask = self.mask     
+        if self.mask is not None:
+            mask = np.transpose(self.mask[0].cpu().float().numpy(), (1,2,0)).astype(np.uint8)        
+
+        dict_list = [('fake_image', self.fake_image), ('mask', mask)]
+
+        if getLabel: # only output label map if needed to save bandwidth
+            label = util.tensor2label(self.net_input.data[0], self.opt.label_nc)                    
+            dict_list += [('label', label)]
+
+        return OrderedDict(dict_list)

+ 104 - 0
options/base_options.py

@@ -0,0 +1,104 @@
+import argparse
+import os
+from util import util
+import torch
+
+class BaseOptions():
+    def __init__(self):
+        self.parser = argparse.ArgumentParser()
+        self.initialized = False
+
+    def initialize(self):
+        # experiment specifics
+        self.parser.add_argument('--name', type=str, default='people', help='name of the experiment. It decides where to store samples and models')
+        self.parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0  0,1,2, 0,2. use -1 for CPU')
+        self.parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
+        self.parser.add_argument('--model', type=str, default='pix2pixHD', help='which model to use')
+        self.parser.add_argument('--norm', type=str, default='batch', help='instance normalization or batch normalization')
+        self.parser.add_argument('--use_dropout', action='store_true', help='use dropout for the generator')
+        self.parser.add_argument('--data_type', default=32, type=int, choices=[8, 16, 32], help="Supported data type i.e. 8, 16, 32 bit")
+        self.parser.add_argument('--verbose', action='store_true', default=False, help='toggles verbose')
+        self.parser.add_argument('--fp16', action='store_true', default=False, help='train with AMP')
+        self.parser.add_argument('--local_rank', type=int, default=0, help='local rank for distributed training')
+        self.parser.add_argument('--isTrain', type=bool, default=True, help='local rank for distributed training')
+
+        # input/output sizes       
+        self.parser.add_argument('--batchSize', type=int, default=8, help='input batch size')
+        self.parser.add_argument('--loadSize', type=int, default=1024, help='scale images to this size')
+        self.parser.add_argument('--fineSize', type=int, default=512, help='then crop to this size')
+        self.parser.add_argument('--label_nc', type=int, default=0, help='# of input label channels')
+        self.parser.add_argument('--input_nc', type=int, default=3, help='# of input image channels')
+        self.parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
+
+        # for setting inputs
+        self.parser.add_argument('--dataroot', type=str, default='./datasets/cityscapes/') 
+        self.parser.add_argument('--resize_or_crop', type=str, default='scale_width', help='scaling and cropping of images at load time [resize_and_crop|crop|scale_width|scale_width_and_crop]')
+        self.parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')        
+        self.parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data argumentation') 
+        self.parser.add_argument('--nThreads', default=2, type=int, help='# threads for loading data')                
+        self.parser.add_argument('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
+
+        # for displays
+        self.parser.add_argument('--display_winsize', type=int, default=512,  help='display window size')
+        self.parser.add_argument('--tf_log', action='store_true', help='if specified, use tensorboard logging. Requires tensorflow installed')
+
+        # for generator
+        self.parser.add_argument('--netG', type=str, default='global', help='selects model to use for netG')
+        self.parser.add_argument('--latent_size', type=int, default=512, help='latent size of Adain layer')
+        self.parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
+        self.parser.add_argument('--n_downsample_global', type=int, default=3, help='number of downsampling layers in netG')
+        self.parser.add_argument('--n_blocks_global', type=int, default=6, help='number of residual blocks in the global generator network')
+        self.parser.add_argument('--n_blocks_local', type=int, default=3, help='number of residual blocks in the local enhancer network')
+        self.parser.add_argument('--n_local_enhancers', type=int, default=1, help='number of local enhancers to use')        
+        self.parser.add_argument('--niter_fix_global', type=int, default=0, help='number of epochs that we only train the outmost local enhancer')        
+
+        # for instance-wise features
+        self.parser.add_argument('--no_instance', action='store_true', help='if specified, do *not* add instance map as input')        
+        self.parser.add_argument('--instance_feat', action='store_true', help='if specified, add encoded instance features as input')
+        self.parser.add_argument('--label_feat', action='store_true', help='if specified, add encoded label features as input')        
+        self.parser.add_argument('--feat_num', type=int, default=3, help='vector length for encoded features')        
+        self.parser.add_argument('--load_features', action='store_true', help='if specified, load precomputed feature maps')
+        self.parser.add_argument('--n_downsample_E', type=int, default=4, help='# of downsampling layers in encoder') 
+        self.parser.add_argument('--nef', type=int, default=16, help='# of encoder filters in the first conv layer')        
+        self.parser.add_argument('--n_clusters', type=int, default=10, help='number of clusters for features')
+        self.parser.add_argument('--image_size', type=int, default=224, help='number of clusters for features')
+        self.parser.add_argument('--norm_G', type=str, default='spectralspadesyncbatch3x3', help='instance normalization or batch normalization')
+        self.parser.add_argument('--semantic_nc', type=int, default=3, help='number of clusters for features')
+        self.initialized = True
+
+    def parse(self, save=True):
+        if not self.initialized:
+            self.initialize()
+        self.opt = self.parser.parse_args()
+        self.opt.isTrain = self.isTrain   # train or test
+
+        str_ids = self.opt.gpu_ids.split(',')
+        self.opt.gpu_ids = []
+        for str_id in str_ids:
+            id = int(str_id)
+            if id >= 0:
+                self.opt.gpu_ids.append(id)
+        
+        # set gpu ids
+        if len(self.opt.gpu_ids) > 0:
+            torch.cuda.set_device(self.opt.gpu_ids[0])
+
+        args = vars(self.opt)
+
+        print('------------ Options -------------')
+        for k, v in sorted(args.items()):
+            print('%s: %s' % (str(k), str(v)))
+        print('-------------- End ----------------')
+
+        # save to the disk
+        if self.opt.isTrain:
+            expr_dir = os.path.join(self.opt.checkpoints_dir, self.opt.name)
+            util.mkdirs(expr_dir)
+            if save and not self.opt.continue_train:
+                file_name = os.path.join(expr_dir, 'opt.txt')
+                with open(file_name, 'wt') as opt_file:
+                    opt_file.write('------------ Options -------------\n')
+                    for k, v in sorted(args.items()):
+                        opt_file.write('%s: %s\n' % (str(k), str(v)))
+                    opt_file.write('-------------- End ----------------\n')
+        return self.opt

+ 22 - 0
options/test_options.py

@@ -0,0 +1,22 @@
+from .base_options import BaseOptions
+
+class TestOptions(BaseOptions):
+    def initialize(self):
+        BaseOptions.initialize(self)
+        self.parser.add_argument('--ntest', type=int, default=float("inf"), help='# of test examples.')
+        self.parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
+        self.parser.add_argument('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
+        self.parser.add_argument('--phase', type=str, default='test', help='train, val, test, etc')
+        self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
+        self.parser.add_argument('--how_many', type=int, default=50, help='how many test images to run')       
+        self.parser.add_argument('--cluster_path', type=str, default='features_clustered_010.npy', help='the path for clustered results of encoded features')
+        self.parser.add_argument('--use_encoded_image', action='store_true', help='if specified, encode the real image to get the feature map')
+        self.parser.add_argument("--export_onnx", type=str, help="export ONNX model to a given file")
+        self.parser.add_argument("--engine", type=str, help="run serialized TRT engine")
+        self.parser.add_argument("--onnx", type=str, help="run ONNX model via TRT")        
+        self.parser.add_argument("--Arc_path", type=str, default='models/BEST_checkpoint.tar', help="run ONNX model via TRT")
+        self.parser.add_argument("--pic_a_path", type=str, default='crop_224/gdg.jpg', help="people a")
+        self.parser.add_argument("--pic_b_path", type=str, default='crop_224/zrf.jpg', help="people b")
+        self.parser.add_argument("--output_path", type=str, default='output/', help="people b")
+
+        self.isTrain = False

+ 39 - 0
options/train_options.py

@@ -0,0 +1,39 @@
+from .base_options import BaseOptions
+
+class TrainOptions(BaseOptions):
+    def initialize(self):
+        BaseOptions.initialize(self)
+        # for displays
+        self.parser.add_argument('--display_freq', type=int, default=99, help='frequency of showing training results on screen')
+        self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
+        self.parser.add_argument('--save_latest_freq', type=int, default=10000, help='frequency of saving the latest results')
+        self.parser.add_argument('--save_epoch_freq', type=int, default=10000, help='frequency of saving checkpoints at the end of epochs')
+        self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
+        self.parser.add_argument('--debug', action='store_true', help='only do one epoch and displays at each iteration')
+
+        # for training
+        self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
+        self.parser.add_argument('--load_pretrain', type=str, default='', help='load the pretrained model from the specified location')
+        self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
+        self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
+        self.parser.add_argument('--niter', type=int, default=10000, help='# of iter at starting learning rate')
+        self.parser.add_argument('--niter_decay', type=int, default=10000, help='# of iter to linearly decay learning rate to zero')
+        self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
+        self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
+
+        # for discriminators        
+        self.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use')
+        self.parser.add_argument('--n_layers_D', type=int, default=4, help='only used if which_model_netD==n_layers')
+        self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')    
+        self.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss')
+        self.parser.add_argument('--lambda_id', type=float, default=20.0, help='weight for id loss')
+        self.parser.add_argument('--lambda_rec', type=float, default=10.0, help='weight for reconstruction loss')
+        self.parser.add_argument('--lambda_GP', type=float, default=10.0, help='weight for gradient penalty loss')
+        self.parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss')
+        self.parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss')        
+        self.parser.add_argument('--gan_mode', type=str, default='hinge', help='(ls|original|hinge)')
+        self.parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images')
+        self.parser.add_argument('--times_G', type=int, default=1,
+                                 help='time of training generator before traning discriminator')
+
+        self.isTrain = True

+ 85 - 0
test_one_image.py

@@ -0,0 +1,85 @@
+
+import cv2
+import torch
+import fractions
+import numpy as np
+from PIL import Image
+import torch.nn.functional as F
+from torchvision import transforms
+from models.models import create_model
+from options.test_options import TestOptions
+
+
+def lcm(a, b): return abs(a * b) / fractions.gcd(a, b) if a and b else 0
+
+transformer = transforms.Compose([
+        transforms.ToTensor(),
+        #transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
+    ])
+
+transformer_Arcface = transforms.Compose([
+        transforms.ToTensor(),
+        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
+    ])
+
+detransformer = transforms.Compose([
+        transforms.Normalize([0, 0, 0], [1/0.229, 1/0.224, 1/0.225]),
+        transforms.Normalize([-0.485, -0.456, -0.406], [1, 1, 1])
+    ])
+
+opt = TestOptions().parse()
+
+start_epoch, epoch_iter = 1, 0
+
+torch.nn.Module.dump_patches = True
+model = create_model(opt)
+model.eval()
+
+
+pic_a = opt.pic_a_path
+img_a = Image.open(pic_a).convert('RGB')
+img_a = transformer_Arcface(img_a)
+img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
+
+pic_b = opt.pic_b_path
+
+img_b = Image.open(pic_b).convert('RGB')
+img_b = transformer(img_b)
+img_att = img_b.view(-1, img_b.shape[0], img_b.shape[1], img_b.shape[2])
+
+# convert numpy to tensor
+img_id = img_id.cuda()
+img_att = img_att.cuda()
+
+#create latent id
+img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
+latend_id = model.netArc(img_id_downsample)
+latend_id = latend_id.detach().to('cpu')
+latend_id = latend_id/np.linalg.norm(latend_id)
+latend_id = latend_id.to('cuda')
+
+
+############## Forward Pass ######################
+img_fake = model(img_id, img_att, latend_id, latend_id, True)
+
+
+for i in range(img_id.shape[0]):
+    if i == 0:
+        row1 = img_id[i]
+        row2 = img_att[i]
+        row3 = img_fake[i]
+    else:
+        row1 = torch.cat([row1, img_id[i]], dim=2)
+        row2 = torch.cat([row2, img_att[i]], dim=2)
+        row3 = torch.cat([row3, img_fake[i]], dim=2)
+
+#full = torch.cat([row1, row2, row3], dim=1).detach()
+full = row3.detach()
+full = full.permute(1, 2, 0)
+output = full.to('cpu')
+output = np.array(output)
+output = output[..., ::-1]
+
+output = output*255
+
+cv2.imwrite(opt.output_path + 'result.jpg',output)

+ 148 - 0
train.py

@@ -0,0 +1,148 @@
+import time
+import os
+import numpy as np
+import torch
+from torch.autograd import Variable
+from collections import OrderedDict
+from subprocess import call
+import fractions
+from options.train_options import TrainOptions
+from data.data_loader import CreateDataLoader
+from data.dataset_class import FaceDataSet
+from torch.utils.data import DataLoader
+from models.models import create_model
+import util.util as util
+from util.visualizer import Visualizer
+import cv2
+from torchvision import transforms
+
+def lcm(a,b): return abs(a * b)/fractions.gcd(a,b) if a and b else 0
+
+
+detransformer = transforms.Compose([
+        transforms.Normalize([0, 0, 0], [1/0.229, 1/0.224, 1/0.225]),
+        transforms.Normalize([-0.485, -0.456, -0.406], [1, 1, 1])
+    ])
+
+opt = TrainOptions().parse()
+iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')
+
+if opt.continue_train:
+    try:
+        start_epoch, epoch_iter = np.loadtxt(iter_path , delimiter=',', dtype=int)
+    except:
+        start_epoch, epoch_iter = 1, 0
+    print('Resuming from epoch %d at iteration %d' % (start_epoch, epoch_iter))        
+else:    
+    start_epoch, epoch_iter = 1, 0
+
+opt.print_freq = lcm(opt.print_freq, opt.batchSize)    
+if opt.debug:
+    opt.display_freq = 1
+    opt.print_freq = 1
+    opt.niter = 1
+    opt.niter_decay = 0
+    opt.max_dataset_size = 10
+
+
+dataset = FaceDataSet('people_list.txt', opt.batchSize)
+data_loader = DataLoader(dataset, batch_size = opt.batchSize, shuffle=True)
+dataset_size = len(data_loader)
+
+device = torch.device("cuda:0")
+
+
+model = create_model(opt)
+visualizer = Visualizer(opt)
+
+optimizer_G, optimizer_D = model.module.optimizer_G, model.module.optimizer_D
+
+total_steps = (start_epoch-1) * 8608 + epoch_iter
+
+display_delta = total_steps % opt.display_freq
+print_delta = total_steps % opt.print_freq
+save_delta = total_steps % opt.save_latest_freq
+
+loss_avg = 0
+refresh_count = 0
+
+for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):
+    epoch_start_time = time.time()
+    if epoch != start_epoch:
+        epoch_iter = epoch_iter % dataset_size
+    for i, (img_id, img_att, latent_id, latent_att, data_type) in enumerate(data_loader):
+        if total_steps % opt.print_freq == print_delta:
+            iter_start_time = time.time()
+        total_steps += opt.batchSize
+        epoch_iter += opt.batchSize
+
+        # convert numpy to tensor
+        img_id = img_id.to(device)
+        img_att = img_att.to(device)
+        latent_id = latent_id.to(device)
+        latent_att = latent_att.to(device)
+
+
+        # whether to collect output images
+        save_fake = total_steps % opt.display_freq == display_delta
+
+        ############## Forward Pass ######################
+
+        losses, img_fake = model(img_id, img_att, latent_id, latent_att, for_G=True)
+
+        # update Generator weights
+        losses = [ torch.mean(x) if not isinstance(x, int) else x for x in losses ]
+        loss_dict = dict(zip(model.module.loss_names, losses))
+
+        loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat', 0) + loss_dict['G_ID'] * opt.lambda_id
+        if data_type[0] == 0:
+            loss_G += loss_dict['G_Rec']
+
+        optimizer_G.zero_grad()
+        loss_G.backward(retain_graph=True)
+        optimizer_G.step()
+
+        loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5 + loss_dict['D_GP']
+        optimizer_D.zero_grad()
+        loss_D.backward()
+        optimizer_D.step()
+
+        ############## Display results and errors ##########
+        ### print out errors
+        if total_steps % opt.print_freq == print_delta:
+            errors = {k: v.data.item() if not isinstance(v, int) else v for k, v in loss_dict.items()}
+            t = (time.time() - iter_start_time) / opt.print_freq
+            visualizer.print_current_errors(epoch, epoch_iter, errors, t)
+            visualizer.plot_current_errors(errors, total_steps)
+
+        ### display output images
+        if save_fake:
+            '''visuals = OrderedDict([('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
+                                   ('synthesized_image', util.tensor2im(generated.data[0])),
+                                   ('real_image', util.tensor2im(data['image'][0]))])'''
+            for i in range(img_id.shape[0]):
+                if i == 0:
+                    row1 = img_id[i]
+                    row2 = img_att[i]
+                    row3 = img_fake[i]
+                else:
+                    row1 = torch.cat([row1, img_id[i]], dim=2)
+                    row2 = torch.cat([row2, img_att[i]], dim=2)
+                    row3 = torch.cat([row3, img_fake[i]], dim=2)
+            full = torch.cat([row1, row2, row3], dim=1).detach()
+            full = full.permute(1, 2, 0)
+            output = full.to('cpu')
+            output = np.array(output)*255
+            output = output[..., ::-1]
+            cv2.imwrite('samples/step_'+str(total_steps)+'.jpg', output)
+
+        ### save latest model
+        if total_steps % opt.save_latest_freq == save_delta:
+            print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
+            model.module.save('latest')            
+            np.savetxt(iter_path, (epoch, epoch_iter), delimiter=',', fmt='%d')
+       
+    # end of epoch 
+    iter_end_time = time.time()
+    print('End of epoch %d / %d \t Time Taken: %d sec' %
+          (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))

+ 63 - 0
util/html.py

@@ -0,0 +1,63 @@
+import dominate
+from dominate.tags import *
+import os
+
+
+class HTML:
+    def __init__(self, web_dir, title, refresh=0):
+        self.title = title
+        self.web_dir = web_dir
+        self.img_dir = os.path.join(self.web_dir, 'images')
+        if not os.path.exists(self.web_dir):
+            os.makedirs(self.web_dir)
+        if not os.path.exists(self.img_dir):
+            os.makedirs(self.img_dir)
+
+        self.doc = dominate.document(title=title)
+        if refresh > 0:
+            with self.doc.head:
+                meta(http_equiv="refresh", content=str(refresh))
+
+    def get_image_dir(self):
+        return self.img_dir
+
+    def add_header(self, str):
+        with self.doc:
+            h3(str)
+
+    def add_table(self, border=1):
+        self.t = table(border=border, style="table-layout: fixed;")
+        self.doc.add(self.t)
+
+    def add_images(self, ims, txts, links, width=512):
+        self.add_table()
+        with self.t:
+            with tr():
+                for im, txt, link in zip(ims, txts, links):
+                    with td(style="word-wrap: break-word;", halign="center", valign="top"):
+                        with p():
+                            with a(href=os.path.join('images', link)):
+                                img(style="width:%dpx" % (width), src=os.path.join('images', im))
+                            br()
+                            p(txt)
+
+    def save(self):
+        html_file = '%s/index.html' % self.web_dir
+        f = open(html_file, 'wt')
+        f.write(self.doc.render())
+        f.close()
+
+
+if __name__ == '__main__':
+    html = HTML('web/', 'test_html')
+    html.add_header('hello world')
+
+    ims = []
+    txts = []
+    links = []
+    for n in range(4):
+        ims.append('image_%d.jpg' % n)
+        txts.append('text_%d' % n)
+        links.append('image_%d.jpg' % n)
+    html.add_images(ims, txts, links)
+    html.save()

+ 31 - 0
util/image_pool.py

@@ -0,0 +1,31 @@
+import random
+import torch
+from torch.autograd import Variable
+class ImagePool():
+    def __init__(self, pool_size):
+        self.pool_size = pool_size
+        if self.pool_size > 0:
+            self.num_imgs = 0
+            self.images = []
+
+    def query(self, images):
+        if self.pool_size == 0:
+            return images
+        return_images = []
+        for image in images.data:
+            image = torch.unsqueeze(image, 0)
+            if self.num_imgs < self.pool_size:
+                self.num_imgs = self.num_imgs + 1
+                self.images.append(image)
+                return_images.append(image)
+            else:
+                p = random.uniform(0, 1)
+                if p > 0.5:
+                    random_id = random.randint(0, self.pool_size-1)
+                    tmp = self.images[random_id].clone()
+                    self.images[random_id] = image
+                    return_images.append(tmp)
+                else:
+                    return_images.append(image)
+        return_images = Variable(torch.cat(return_images, 0))
+        return return_images

+ 100 - 0
util/util.py

@@ -0,0 +1,100 @@
+from __future__ import print_function
+import torch
+import numpy as np
+from PIL import Image
+import numpy as np
+import os
+
+# Converts a Tensor into a Numpy array
+# |imtype|: the desired type of the converted numpy array
+def tensor2im(image_tensor, imtype=np.uint8, normalize=True):
+    if isinstance(image_tensor, list):
+        image_numpy = []
+        for i in range(len(image_tensor)):
+            image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))
+        return image_numpy
+    image_numpy = image_tensor.cpu().float().numpy()
+    if normalize:
+        image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
+    else:
+        image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0      
+    image_numpy = np.clip(image_numpy, 0, 255)
+    if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3:        
+        image_numpy = image_numpy[:,:,0]
+    return image_numpy.astype(imtype)
+
+# Converts a one-hot tensor into a colorful label map
+def tensor2label(label_tensor, n_label, imtype=np.uint8):
+    if n_label == 0:
+        return tensor2im(label_tensor, imtype)
+    label_tensor = label_tensor.cpu().float()    
+    if label_tensor.size()[0] > 1:
+        label_tensor = label_tensor.max(0, keepdim=True)[1]
+    label_tensor = Colorize(n_label)(label_tensor)
+    label_numpy = np.transpose(label_tensor.numpy(), (1, 2, 0))
+    return label_numpy.astype(imtype)
+
+def save_image(image_numpy, image_path):
+    image_pil = Image.fromarray(image_numpy)
+    image_pil.save(image_path)
+
+def mkdirs(paths):
+    if isinstance(paths, list) and not isinstance(paths, str):
+        for path in paths:
+            mkdir(path)
+    else:
+        mkdir(paths)
+
+def mkdir(path):
+    if not os.path.exists(path):
+        os.makedirs(path)
+
+###############################################################################
+# Code from
+# https://github.com/ycszen/pytorch-seg/blob/master/transform.py
+# Modified so it complies with the Citscape label map colors
+###############################################################################
+def uint82bin(n, count=8):
+    """returns the binary of integer n, count refers to amount of bits"""
+    return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
+
+def labelcolormap(N):
+    if N == 35: # cityscape
+        cmap = np.array([(  0,  0,  0), (  0,  0,  0), (  0,  0,  0), (  0,  0,  0), (  0,  0,  0), (111, 74,  0), ( 81,  0, 81),
+                     (128, 64,128), (244, 35,232), (250,170,160), (230,150,140), ( 70, 70, 70), (102,102,156), (190,153,153),
+                     (180,165,180), (150,100,100), (150,120, 90), (153,153,153), (153,153,153), (250,170, 30), (220,220,  0),
+                     (107,142, 35), (152,251,152), ( 70,130,180), (220, 20, 60), (255,  0,  0), (  0,  0,142), (  0,  0, 70),
+                     (  0, 60,100), (  0,  0, 90), (  0,  0,110), (  0, 80,100), (  0,  0,230), (119, 11, 32), (  0,  0,142)], 
+                     dtype=np.uint8)
+    else:
+        cmap = np.zeros((N, 3), dtype=np.uint8)
+        for i in range(N):
+            r, g, b = 0, 0, 0
+            id = i
+            for j in range(7):
+                str_id = uint82bin(id)
+                r = r ^ (np.uint8(str_id[-1]) << (7-j))
+                g = g ^ (np.uint8(str_id[-2]) << (7-j))
+                b = b ^ (np.uint8(str_id[-3]) << (7-j))
+                id = id >> 3
+            cmap[i, 0] = r
+            cmap[i, 1] = g
+            cmap[i, 2] = b
+    return cmap
+
+class Colorize(object):
+    def __init__(self, n=35):
+        self.cmap = labelcolormap(n)
+        self.cmap = torch.from_numpy(self.cmap[:n])
+
+    def __call__(self, gray_image):
+        size = gray_image.size()
+        color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
+
+        for label in range(0, len(self.cmap)):
+            mask = (label == gray_image[0]).cpu()
+            color_image[0][mask] = self.cmap[label][0]
+            color_image[1][mask] = self.cmap[label][1]
+            color_image[2][mask] = self.cmap[label][2]
+
+        return color_image

+ 131 - 0
util/visualizer.py

@@ -0,0 +1,131 @@
+import numpy as np
+import os
+import ntpath
+import time
+from . import util
+from . import html
+import scipy.misc
+try:
+    from StringIO import StringIO  # Python 2.7
+except ImportError:
+    from io import BytesIO         # Python 3.x
+
+class Visualizer():
+    def __init__(self, opt):
+        # self.opt = opt
+        self.tf_log = opt.tf_log
+        self.use_html = opt.isTrain and not opt.no_html
+        self.win_size = opt.display_winsize
+        self.name = opt.name
+        if self.tf_log:
+            import tensorflow as tf
+            self.tf = tf
+            self.log_dir = os.path.join(opt.checkpoints_dir, opt.name, 'logs')
+            self.writer = tf.summary.FileWriter(self.log_dir)
+
+        if self.use_html:
+            self.web_dir = os.path.join(opt.checkpoints_dir, opt.name, 'web')
+            self.img_dir = os.path.join(self.web_dir, 'images')
+            print('create web directory %s...' % self.web_dir)
+            util.mkdirs([self.web_dir, self.img_dir])
+        self.log_name = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
+        with open(self.log_name, "a") as log_file:
+            now = time.strftime("%c")
+            log_file.write('================ Training Loss (%s) ================\n' % now)
+
+    # |visuals|: dictionary of images to display or save
+    def display_current_results(self, visuals, epoch, step):
+        if self.tf_log: # show images in tensorboard output
+            img_summaries = []
+            for label, image_numpy in visuals.items():
+                # Write the image to a string
+                try:
+                    s = StringIO()
+                except:
+                    s = BytesIO()
+                scipy.misc.toimage(image_numpy).save(s, format="jpeg")
+                # Create an Image object
+                img_sum = self.tf.Summary.Image(encoded_image_string=s.getvalue(), height=image_numpy.shape[0], width=image_numpy.shape[1])
+                # Create a Summary value
+                img_summaries.append(self.tf.Summary.Value(tag=label, image=img_sum))
+
+            # Create and write Summary
+            summary = self.tf.Summary(value=img_summaries)
+            self.writer.add_summary(summary, step)
+
+        if self.use_html: # save images to a html file
+            for label, image_numpy in visuals.items():
+                if isinstance(image_numpy, list):
+                    for i in range(len(image_numpy)):
+                        img_path = os.path.join(self.img_dir, 'epoch%.3d_%s_%d.jpg' % (epoch, label, i))
+                        util.save_image(image_numpy[i], img_path)
+                else:
+                    img_path = os.path.join(self.img_dir, 'epoch%.3d_%s.jpg' % (epoch, label))
+                    util.save_image(image_numpy, img_path)
+
+            # update website
+            webpage = html.HTML(self.web_dir, 'Experiment name = %s' % self.name, refresh=30)
+            for n in range(epoch, 0, -1):
+                webpage.add_header('epoch [%d]' % n)
+                ims = []
+                txts = []
+                links = []
+
+                for label, image_numpy in visuals.items():
+                    if isinstance(image_numpy, list):
+                        for i in range(len(image_numpy)):
+                            img_path = 'epoch%.3d_%s_%d.jpg' % (n, label, i)
+                            ims.append(img_path)
+                            txts.append(label+str(i))
+                            links.append(img_path)
+                    else:
+                        img_path = 'epoch%.3d_%s.jpg' % (n, label)
+                        ims.append(img_path)
+                        txts.append(label)
+                        links.append(img_path)
+                if len(ims) < 10:
+                    webpage.add_images(ims, txts, links, width=self.win_size)
+                else:
+                    num = int(round(len(ims)/2.0))
+                    webpage.add_images(ims[:num], txts[:num], links[:num], width=self.win_size)
+                    webpage.add_images(ims[num:], txts[num:], links[num:], width=self.win_size)
+            webpage.save()
+
+    # errors: dictionary of error labels and values
+    def plot_current_errors(self, errors, step):
+        if self.tf_log:
+            for tag, value in errors.items():
+                summary = self.tf.Summary(value=[self.tf.Summary.Value(tag=tag, simple_value=value)])
+                self.writer.add_summary(summary, step)
+
+    # errors: same format as |errors| of plotCurrentErrors
+    def print_current_errors(self, epoch, i, errors, t):
+        message = '(epoch: %d, iters: %d, time: %.3f) ' % (epoch, i, t)
+        for k, v in errors.items():
+            if v != 0:
+                message += '%s: %.3f ' % (k, v)
+
+        print(message)
+        with open(self.log_name, "a") as log_file:
+            log_file.write('%s\n' % message)
+
+    # save image to the disk
+    def save_images(self, webpage, visuals, image_path):
+        image_dir = webpage.get_image_dir()
+        short_path = ntpath.basename(image_path[0])
+        name = os.path.splitext(short_path)[0]
+
+        webpage.add_header(name)
+        ims = []
+        txts = []
+        links = []
+
+        for label, image_numpy in visuals.items():
+            image_name = '%s_%s.jpg' % (name, label)
+            save_path = os.path.join(image_dir, image_name)
+            util.save_image(image_numpy, save_path)
+
+            ims.append(image_name)
+            txts.append(label)
+            links.append(image_name)
+        webpage.add_images(ims, txts, links, width=self.win_size)