train_options.py 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839
  1. from .base_options import BaseOptions
  2. class TrainOptions(BaseOptions):
  3. def initialize(self):
  4. BaseOptions.initialize(self)
  5. # for displays
  6. self.parser.add_argument('--display_freq', type=int, default=99, help='frequency of showing training results on screen')
  7. self.parser.add_argument('--print_freq', type=int, default=100, help='frequency of showing training results on console')
  8. self.parser.add_argument('--save_latest_freq', type=int, default=10000, help='frequency of saving the latest results')
  9. self.parser.add_argument('--save_epoch_freq', type=int, default=10000, help='frequency of saving checkpoints at the end of epochs')
  10. self.parser.add_argument('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
  11. self.parser.add_argument('--debug', action='store_true', help='only do one epoch and displays at each iteration')
  12. # for training
  13. self.parser.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')
  14. self.parser.add_argument('--load_pretrain', type=str, default='', help='load the pretrained model from the specified location')
  15. self.parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
  16. self.parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
  17. self.parser.add_argument('--niter', type=int, default=10000, help='# of iter at starting learning rate')
  18. self.parser.add_argument('--niter_decay', type=int, default=10000, help='# of iter to linearly decay learning rate to zero')
  19. self.parser.add_argument('--beta1', type=float, default=0.5, help='momentum term of adam')
  20. self.parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
  21. # for discriminators
  22. self.parser.add_argument('--num_D', type=int, default=2, help='number of discriminators to use')
  23. self.parser.add_argument('--n_layers_D', type=int, default=4, help='only used if which_model_netD==n_layers')
  24. self.parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
  25. self.parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss')
  26. self.parser.add_argument('--lambda_id', type=float, default=20.0, help='weight for id loss')
  27. self.parser.add_argument('--lambda_rec', type=float, default=10.0, help='weight for reconstruction loss')
  28. self.parser.add_argument('--lambda_GP', type=float, default=10.0, help='weight for gradient penalty loss')
  29. self.parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss')
  30. self.parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss')
  31. self.parser.add_argument('--gan_mode', type=str, default='hinge', help='(ls|original|hinge)')
  32. self.parser.add_argument('--pool_size', type=int, default=0, help='the size of image buffer that stores previously generated images')
  33. self.parser.add_argument('--times_G', type=int, default=1,
  34. help='time of training generator before traning discriminator')
  35. self.isTrain = True