train.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. import time
  2. import os
  3. import numpy as np
  4. import torch
  5. from torch.autograd import Variable
  6. from collections import OrderedDict
  7. from subprocess import call
  8. import fractions
  9. from options.train_options import TrainOptions
  10. from data.data_loader import CreateDataLoader
  11. from data.dataset_class import FaceDataSet
  12. from torch.utils.data import DataLoader
  13. from models.models import create_model
  14. import util.util as util
  15. from util.visualizer import Visualizer
  16. import cv2
  17. from torchvision import transforms
  18. def lcm(a,b): return abs(a * b)/fractions.gcd(a,b) if a and b else 0
  19. detransformer = transforms.Compose([
  20. transforms.Normalize([0, 0, 0], [1/0.229, 1/0.224, 1/0.225]),
  21. transforms.Normalize([-0.485, -0.456, -0.406], [1, 1, 1])
  22. ])
  23. opt = TrainOptions().parse()
  24. iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')
  25. if opt.continue_train:
  26. try:
  27. start_epoch, epoch_iter = np.loadtxt(iter_path , delimiter=',', dtype=int)
  28. except:
  29. start_epoch, epoch_iter = 1, 0
  30. print('Resuming from epoch %d at iteration %d' % (start_epoch, epoch_iter))
  31. else:
  32. start_epoch, epoch_iter = 1, 0
  33. opt.print_freq = lcm(opt.print_freq, opt.batchSize)
  34. if opt.debug:
  35. opt.display_freq = 1
  36. opt.print_freq = 1
  37. opt.niter = 1
  38. opt.niter_decay = 0
  39. opt.max_dataset_size = 10
  40. dataset = FaceDataSet('people_list.txt', opt.batchSize)
  41. data_loader = DataLoader(dataset, batch_size = opt.batchSize, shuffle=True)
  42. dataset_size = len(data_loader)
  43. device = torch.device("cuda:0")
  44. model = create_model(opt)
  45. visualizer = Visualizer(opt)
  46. optimizer_G, optimizer_D = model.module.optimizer_G, model.module.optimizer_D
  47. total_steps = (start_epoch-1) * 8608 + epoch_iter
  48. display_delta = total_steps % opt.display_freq
  49. print_delta = total_steps % opt.print_freq
  50. save_delta = total_steps % opt.save_latest_freq
  51. loss_avg = 0
  52. refresh_count = 0
  53. for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):
  54. epoch_start_time = time.time()
  55. if epoch != start_epoch:
  56. epoch_iter = epoch_iter % dataset_size
  57. for i, (img_id, img_att, latent_id, latent_att, data_type) in enumerate(data_loader):
  58. if total_steps % opt.print_freq == print_delta:
  59. iter_start_time = time.time()
  60. total_steps += opt.batchSize
  61. epoch_iter += opt.batchSize
  62. # convert numpy to tensor
  63. img_id = img_id.to(device)
  64. img_att = img_att.to(device)
  65. latent_id = latent_id.to(device)
  66. latent_att = latent_att.to(device)
  67. # whether to collect output images
  68. save_fake = total_steps % opt.display_freq == display_delta
  69. ############## Forward Pass ######################
  70. losses, img_fake = model(img_id, img_att, latent_id, latent_att, for_G=True)
  71. # update Generator weights
  72. losses = [ torch.mean(x) if not isinstance(x, int) else x for x in losses ]
  73. loss_dict = dict(zip(model.module.loss_names, losses))
  74. loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat', 0) + loss_dict['G_ID'] * opt.lambda_id
  75. if data_type[0] == 0:
  76. loss_G += loss_dict['G_Rec']
  77. optimizer_G.zero_grad()
  78. loss_G.backward(retain_graph=True)
  79. optimizer_G.step()
  80. loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5 + loss_dict['D_GP']
  81. optimizer_D.zero_grad()
  82. loss_D.backward()
  83. optimizer_D.step()
  84. ############## Display results and errors ##########
  85. ### print out errors
  86. if total_steps % opt.print_freq == print_delta:
  87. errors = {k: v.data.item() if not isinstance(v, int) else v for k, v in loss_dict.items()}
  88. t = (time.time() - iter_start_time) / opt.print_freq
  89. visualizer.print_current_errors(epoch, epoch_iter, errors, t)
  90. visualizer.plot_current_errors(errors, total_steps)
  91. ### display output images
  92. if save_fake:
  93. '''visuals = OrderedDict([('input_label', util.tensor2label(data['label'][0], opt.label_nc)),
  94. ('synthesized_image', util.tensor2im(generated.data[0])),
  95. ('real_image', util.tensor2im(data['image'][0]))])'''
  96. for i in range(img_id.shape[0]):
  97. if i == 0:
  98. row1 = img_id[i]
  99. row2 = img_att[i]
  100. row3 = img_fake[i]
  101. else:
  102. row1 = torch.cat([row1, img_id[i]], dim=2)
  103. row2 = torch.cat([row2, img_att[i]], dim=2)
  104. row3 = torch.cat([row3, img_fake[i]], dim=2)
  105. full = torch.cat([row1, row2, row3], dim=1).detach()
  106. full = full.permute(1, 2, 0)
  107. output = full.to('cpu')
  108. output = np.array(output)*255
  109. output = output[..., ::-1]
  110. cv2.imwrite('samples/step_'+str(total_steps)+'.jpg', output)
  111. ### save latest model
  112. if total_steps % opt.save_latest_freq == save_delta:
  113. print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps))
  114. model.module.save('latest')
  115. np.savetxt(iter_path, (epoch, epoch_iter), delimiter=',', fmt='%d')
  116. # end of epoch
  117. iter_end_time = time.time()
  118. print('End of epoch %d / %d \t Time Taken: %d sec' %
  119. (epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))