aligned_dataset.py 3.0 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576
  1. import os.path
  2. from data.base_dataset import BaseDataset, get_params, get_transform, normalize
  3. from data.image_folder import make_dataset
  4. from PIL import Image
  5. class AlignedDataset(BaseDataset):
  6. def initialize(self, opt):
  7. self.opt = opt
  8. self.root = opt.dataroot
  9. ### input A (label maps)
  10. dir_A = '_A' if self.opt.label_nc == 0 else '_label'
  11. self.dir_A = os.path.join(opt.dataroot, opt.phase + dir_A)
  12. self.A_paths = sorted(make_dataset(self.dir_A))
  13. ### input B (real images)
  14. if opt.isTrain or opt.use_encoded_image:
  15. dir_B = '_B' if self.opt.label_nc == 0 else '_img'
  16. self.dir_B = os.path.join(opt.dataroot, opt.phase + dir_B)
  17. self.B_paths = sorted(make_dataset(self.dir_B))
  18. ### instance maps
  19. if not opt.no_instance:
  20. self.dir_inst = os.path.join(opt.dataroot, opt.phase + '_inst')
  21. self.inst_paths = sorted(make_dataset(self.dir_inst))
  22. ### load precomputed instance-wise encoded features
  23. if opt.load_features:
  24. self.dir_feat = os.path.join(opt.dataroot, opt.phase + '_feat')
  25. print('----------- loading features from %s ----------' % self.dir_feat)
  26. self.feat_paths = sorted(make_dataset(self.dir_feat))
  27. self.dataset_size = len(self.A_paths)
  28. def __getitem__(self, index):
  29. ### input A (label maps)
  30. A_path = self.A_paths[index]
  31. A = Image.open(A_path)
  32. params = get_params(self.opt, A.size)
  33. if self.opt.label_nc == 0:
  34. transform_A = get_transform(self.opt, params)
  35. A_tensor = transform_A(A.convert('RGB'))
  36. else:
  37. transform_A = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
  38. A_tensor = transform_A(A) * 255.0
  39. B_tensor = inst_tensor = feat_tensor = 0
  40. ### input B (real images)
  41. if self.opt.isTrain or self.opt.use_encoded_image:
  42. B_path = self.B_paths[index]
  43. B = Image.open(B_path).convert('RGB')
  44. transform_B = get_transform(self.opt, params)
  45. B_tensor = transform_B(B)
  46. ### if using instance maps
  47. if not self.opt.no_instance:
  48. inst_path = self.inst_paths[index]
  49. inst = Image.open(inst_path)
  50. inst_tensor = transform_A(inst)
  51. if self.opt.load_features:
  52. feat_path = self.feat_paths[index]
  53. feat = Image.open(feat_path).convert('RGB')
  54. norm = normalize()
  55. feat_tensor = norm(transform_A(feat))
  56. input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor,
  57. 'feat': feat_tensor, 'path': A_path}
  58. return input_dict
  59. def __len__(self):
  60. return len(self.A_paths) // self.opt.batchSize * self.opt.batchSize
  61. def name(self):
  62. return 'AlignedDataset'