fs_networks.py 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. """
  2. Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
  3. Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
  4. """
  5. import torch
  6. import torch.nn as nn
  7. class InstanceNorm(nn.Module):
  8. def __init__(self, epsilon=1e-8):
  9. """
  10. @notice: avoid in-place ops.
  11. https://discuss.pytorch.org/t/encounter-the-runtimeerror-one-of-the-variables-needed-for-gradient-computation-has-been-modified-by-an-inplace-operation/836/3
  12. """
  13. super(InstanceNorm, self).__init__()
  14. self.epsilon = epsilon
  15. def forward(self, x):
  16. x = x - torch.mean(x, (2, 3), True)
  17. tmp = torch.mul(x, x) # or x ** 2
  18. tmp = torch.rsqrt(torch.mean(tmp, (2, 3), True) + self.epsilon)
  19. return x * tmp
  20. class ApplyStyle(nn.Module):
  21. """
  22. @ref: https://github.com/lernapparat/lernapparat/blob/master/style_gan/pytorch_style_gan.ipynb
  23. """
  24. def __init__(self, latent_size, channels):
  25. super(ApplyStyle, self).__init__()
  26. self.linear = nn.Linear(latent_size, channels * 2)
  27. def forward(self, x, latent):
  28. style = self.linear(latent) # style => [batch_size, n_channels*2]
  29. shape = [-1, 2, x.size(1), 1, 1]
  30. style = style.view(shape) # [batch_size, 2, n_channels, ...]
  31. #x = x * (style[:, 0] + 1.) + style[:, 1]
  32. x = x * (style[:, 0] * 1 + 1.) + style[:, 1] * 1
  33. return x
  34. class ResnetBlock_Adain(nn.Module):
  35. def __init__(self, dim, latent_size, padding_type, activation=nn.ReLU(True)):
  36. super(ResnetBlock_Adain, self).__init__()
  37. p = 0
  38. conv1 = []
  39. if padding_type == 'reflect':
  40. conv1 += [nn.ReflectionPad2d(1)]
  41. elif padding_type == 'replicate':
  42. conv1 += [nn.ReplicationPad2d(1)]
  43. elif padding_type == 'zero':
  44. p = 1
  45. else:
  46. raise NotImplementedError('padding [%s] is not implemented' % padding_type)
  47. conv1 += [nn.Conv2d(dim, dim, kernel_size=3, padding = p), InstanceNorm()]
  48. self.conv1 = nn.Sequential(*conv1)
  49. self.style1 = ApplyStyle(latent_size, dim)
  50. self.act1 = activation
  51. p = 0
  52. conv2 = []
  53. if padding_type == 'reflect':
  54. conv2 += [nn.ReflectionPad2d(1)]
  55. elif padding_type == 'replicate':
  56. conv2 += [nn.ReplicationPad2d(1)]
  57. elif padding_type == 'zero':
  58. p = 1
  59. else:
  60. raise NotImplementedError('padding [%s] is not implemented' % padding_type)
  61. conv2 += [nn.Conv2d(dim, dim, kernel_size=3, padding=p), InstanceNorm()]
  62. self.conv2 = nn.Sequential(*conv2)
  63. self.style2 = ApplyStyle(latent_size, dim)
  64. def forward(self, x, dlatents_in_slice):
  65. y = self.conv1(x)
  66. y = self.style1(y, dlatents_in_slice)
  67. y = self.act1(y)
  68. y = self.conv2(y)
  69. y = self.style2(y, dlatents_in_slice)
  70. out = x + y
  71. return out
  72. class Generator_Adain_Upsample(nn.Module):
  73. def __init__(self, input_nc, output_nc, latent_size, n_blocks=6, deep=False,
  74. norm_layer=nn.BatchNorm2d,
  75. padding_type='reflect'):
  76. assert (n_blocks >= 0)
  77. super(Generator_Adain_Upsample, self).__init__()
  78. activation = nn.ReLU(True)
  79. self.deep = deep
  80. self.first_layer = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(input_nc, 64, kernel_size=7, padding=0),
  81. norm_layer(64), activation)
  82. ### downsample
  83. self.down1 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),
  84. norm_layer(128), activation)
  85. self.down2 = nn.Sequential(nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1),
  86. norm_layer(256), activation)
  87. self.down3 = nn.Sequential(nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1),
  88. norm_layer(512), activation)
  89. if self.deep:
  90. self.down4 = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
  91. norm_layer(512), activation)
  92. ### resnet blocks
  93. BN = []
  94. for i in range(n_blocks):
  95. BN += [
  96. ResnetBlock_Adain(512, latent_size=latent_size, padding_type=padding_type, activation=activation)]
  97. self.BottleNeck = nn.Sequential(*BN)
  98. if self.deep:
  99. self.up4 = nn.Sequential(
  100. nn.Upsample(scale_factor=2, mode='bilinear'),
  101. nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
  102. nn.BatchNorm2d(512), activation
  103. )
  104. self.up3 = nn.Sequential(
  105. nn.Upsample(scale_factor=2, mode='bilinear'),
  106. nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1),
  107. nn.BatchNorm2d(256), activation
  108. )
  109. self.up2 = nn.Sequential(
  110. nn.Upsample(scale_factor=2, mode='bilinear'),
  111. nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),
  112. nn.BatchNorm2d(128), activation
  113. )
  114. self.up1 = nn.Sequential(
  115. nn.Upsample(scale_factor=2, mode='bilinear'),
  116. nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1),
  117. nn.BatchNorm2d(64), activation
  118. )
  119. self.last_layer = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(64, output_nc, kernel_size=7, padding=0),
  120. nn.Tanh())
  121. def forward(self, input, dlatents):
  122. x = input # 3*224*224
  123. skip1 = self.first_layer(x)
  124. skip2 = self.down1(skip1)
  125. skip3 = self.down2(skip2)
  126. if self.deep:
  127. skip4 = self.down3(skip3)
  128. x = self.down4(skip4)
  129. else:
  130. x = self.down3(skip3)
  131. for i in range(len(self.BottleNeck)):
  132. x = self.BottleNeck[i](x, dlatents)
  133. if self.deep:
  134. x = self.up4(x)
  135. x = self.up3(x)
  136. x = self.up2(x)
  137. x = self.up1(x)
  138. x = self.last_layer(x)
  139. x = (x + 1) / 2
  140. return x
  141. class Discriminator(nn.Module):
  142. def __init__(self, input_nc, norm_layer=nn.BatchNorm2d, use_sigmoid=False):
  143. super(Discriminator, self).__init__()
  144. kw = 4
  145. padw = 1
  146. self.down1 = nn.Sequential(
  147. nn.Conv2d(input_nc, 64, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)
  148. )
  149. self.down2 = nn.Sequential(
  150. nn.Conv2d(64, 128, kernel_size=kw, stride=2, padding=padw),
  151. norm_layer(128), nn.LeakyReLU(0.2, True)
  152. )
  153. self.down3 = nn.Sequential(
  154. nn.Conv2d(128, 256, kernel_size=kw, stride=2, padding=padw),
  155. norm_layer(256), nn.LeakyReLU(0.2, True)
  156. )
  157. self.down4 = nn.Sequential(
  158. nn.Conv2d(256, 512, kernel_size=kw, stride=2, padding=padw),
  159. norm_layer(512), nn.LeakyReLU(0.2, True)
  160. )
  161. self.conv1 = nn.Sequential(
  162. nn.Conv2d(512, 512, kernel_size=kw, stride=1, padding=padw),
  163. norm_layer(512),
  164. nn.LeakyReLU(0.2, True)
  165. )
  166. if use_sigmoid:
  167. self.conv2 = nn.Sequential(
  168. nn.Conv2d(512, 1, kernel_size=kw, stride=1, padding=padw), nn.Sigmoid()
  169. )
  170. else:
  171. self.conv2 = nn.Sequential(
  172. nn.Conv2d(512, 1, kernel_size=kw, stride=1, padding=padw)
  173. )
  174. def forward(self, input):
  175. out = []
  176. x = self.down1(input)
  177. out.append(x)
  178. x = self.down2(x)
  179. out.append(x)
  180. x = self.down3(x)
  181. out.append(x)
  182. x = self.down4(x)
  183. out.append(x)
  184. x = self.conv1(x)
  185. out.append(x)
  186. x = self.conv2(x)
  187. out.append(x)
  188. return out