base_dataset.py 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. import torch.utils.data as data
  2. from PIL import Image
  3. import torchvision.transforms as transforms
  4. import numpy as np
  5. import random
  6. class BaseDataset(data.Dataset):
  7. def __init__(self):
  8. super(BaseDataset, self).__init__()
  9. def name(self):
  10. return 'BaseDataset'
  11. def initialize(self, opt):
  12. pass
  13. def get_params(opt, size):
  14. w, h = size
  15. new_h = h
  16. new_w = w
  17. if opt.resize_or_crop == 'resize_and_crop':
  18. new_h = new_w = opt.loadSize
  19. elif opt.resize_or_crop == 'scale_width_and_crop':
  20. new_w = opt.loadSize
  21. new_h = opt.loadSize * h // w
  22. x = random.randint(0, np.maximum(0, new_w - opt.fineSize))
  23. y = random.randint(0, np.maximum(0, new_h - opt.fineSize))
  24. flip = random.random() > 0.5
  25. return {'crop_pos': (x, y), 'flip': flip}
  26. def get_transform(opt, params, method=Image.BICUBIC, normalize=True):
  27. transform_list = []
  28. if 'resize' in opt.resize_or_crop:
  29. osize = [opt.loadSize, opt.loadSize]
  30. transform_list.append(transforms.Scale(osize, method))
  31. elif 'scale_width' in opt.resize_or_crop:
  32. transform_list.append(transforms.Lambda(lambda img: __scale_width(img, opt.loadSize, method)))
  33. if 'crop' in opt.resize_or_crop:
  34. transform_list.append(transforms.Lambda(lambda img: __crop(img, params['crop_pos'], opt.fineSize)))
  35. if opt.resize_or_crop == 'none':
  36. base = float(2 ** opt.n_downsample_global)
  37. if opt.netG == 'local':
  38. base *= (2 ** opt.n_local_enhancers)
  39. transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method)))
  40. if opt.isTrain and not opt.no_flip:
  41. transform_list.append(transforms.Lambda(lambda img: __flip(img, params['flip'])))
  42. transform_list += [transforms.ToTensor()]
  43. if normalize:
  44. transform_list += [transforms.Normalize((0.5, 0.5, 0.5),
  45. (0.5, 0.5, 0.5))]
  46. return transforms.Compose(transform_list)
  47. def normalize():
  48. return transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
  49. def __make_power_2(img, base, method=Image.BICUBIC):
  50. ow, oh = img.size
  51. h = int(round(oh / base) * base)
  52. w = int(round(ow / base) * base)
  53. if (h == oh) and (w == ow):
  54. return img
  55. return img.resize((w, h), method)
  56. def __scale_width(img, target_width, method=Image.BICUBIC):
  57. ow, oh = img.size
  58. if (ow == target_width):
  59. return img
  60. w = target_width
  61. h = int(target_width * oh / ow)
  62. return img.resize((w, h), method)
  63. def __crop(img, pos, size):
  64. ow, oh = img.size
  65. x1, y1 = pos
  66. tw = th = size
  67. if (ow > tw or oh > th):
  68. return img.crop((x1, y1, x1 + tw, y1 + th))
  69. return img
  70. def __flip(img, flip):
  71. if flip:
  72. return img.transpose(Image.FLIP_LEFT_RIGHT)
  73. return img