Conv2d.patch 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. --- /usr/local/lib/python3.5/dist-packages/torch/nn/modules/conv.py
  2. +++ /usr/local/lib/python3.5/dist-packages/torch/nn/modules/conv.py
  3. @@ -15,8 +15,6 @@
  4. :math:`N` is a batch size, :math:`C` denotes a number of channels,
  5. :math:`H` is a height of input planes in pixels, and :math:`W` is
  6. width in pixels.
  7. -
  8. - This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
  9. * :attr:`stride` controls the stride for the cross-correlation, a single
  10. number or a tuple.
  11. @@ -39,7 +37,7 @@
  12. concatenated.
  13. * At groups= :attr:`in_channels`, each input channel is convolved with
  14. its own set of filters, of size:
  15. - :math:`\left\lfloor\frac{out\_channels}{in\_channels}\right\rfloor`.
  16. + :math:`\left\lfloor\frac{C_\text{out}}{C_\text{in}}\right\rfloor`.
  17. The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
  18. @@ -47,14 +45,14 @@
  19. - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
  20. and the second `int` for the width dimension
  21. - Note:
  22. + .. note::
  23. Depending of the size of your kernel, several (of the last)
  24. columns of the input might be lost, because it is a valid `cross-correlation`_,
  25. and not a full `cross-correlation`_.
  26. It is up to the user to add proper padding.
  27. - Note:
  28. + .. note::
  29. When `groups == in_channels` and `out_channels == K * in_channels`,
  30. where `K` is a positive integer, this operation is also termed in
  31. @@ -64,29 +62,17 @@
  32. a depthwise convolution with a depthwise multiplier `K`, can be constructed by arguments
  33. :math:`(in\_channels=C_{in}, out\_channels=C_{in} \times K, ..., groups=C_{in})`.
  34. - Note:
  35. - In some circumstances when using the CUDA backend with CuDNN, this operator
  36. - may select a nondeterministic algorithm to increase performance. If this is
  37. - undesirable, you can try to make the operation deterministic (potentially at
  38. - a performance cost) by setting ``torch.backends.cudnn.deterministic =
  39. - True``.
  40. - Please see the notes on :doc:`/notes/randomness` for background.
  41. -
  42. + .. include:: cudnn_deterministic.rst
  43. Args:
  44. in_channels (int): Number of channels in the input image
  45. out_channels (int): Number of channels produced by the convolution
  46. kernel_size (int or tuple): Size of the convolving kernel
  47. stride (int or tuple, optional): Stride of the convolution. Default: 1
  48. - padding (int or tuple, optional): Zero-padding added to both sides of
  49. - the input. Default: 0
  50. - padding_mode (string, optional): ``'zeros'``, ``'reflect'``,
  51. - ``'replicate'`` or ``'circular'``. Default: ``'zeros'``
  52. + padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0
  53. dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
  54. - groups (int, optional): Number of blocked connections from input
  55. - channels to output channels. Default: 1
  56. - bias (bool, optional): If ``True``, adds a learnable bias to the
  57. - output. Default: ``True``
  58. + groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
  59. + bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
  60. Shape:
  61. - Input: :math:`(N, C_{in}, H_{in}, W_{in})`
  62. @@ -102,18 +88,16 @@
  63. Attributes:
  64. weight (Tensor): the learnable weights of the module of shape
  65. - :math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},`
  66. - :math:`\text{kernel\_size[0]}, \text{kernel\_size[1]})`.
  67. - The values of these weights are sampled from
  68. - :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
  69. - :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
  70. - bias (Tensor): the learnable bias of the module of shape
  71. - (out_channels). If :attr:`bias` is ``True``,
  72. - then the values of these weights are
  73. - sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
  74. - :math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
  75. + (out_channels, in_channels, kernel_size[0], kernel_size[1]).
  76. + The values of these weights are sampled from
  77. + :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
  78. + :math:`k = \frac{1}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
  79. + bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``,
  80. + then the values of these weights are
  81. + sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
  82. + :math:`k = \frac{1}{C_\text{in} * \prod_{i=0}^{1}\text{kernel\_size}[i]}`
  83. - Examples:
  84. + Examples::
  85. >>> # With square kernels and equal stride
  86. >>> m = nn.Conv2d(16, 33, 3, stride=2)
  87. @@ -130,34 +114,18 @@
  88. .. _link:
  89. https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
  90. """
  91. - def __init__(
  92. - self,
  93. - in_channels: int,
  94. - out_channels: int,
  95. - kernel_size: _size_2_t,
  96. - stride: _size_2_t = 1,
  97. - padding: _size_2_t = 0,
  98. - dilation: _size_2_t = 1,
  99. - groups: int = 1,
  100. - bias: bool = True,
  101. - padding_mode: str = 'zeros' # TODO: refine this type
  102. - ):
  103. + def __init__(self, in_channels, out_channels, kernel_size, stride=1,
  104. + padding=0, dilation=1, groups=1, bias=True):
  105. kernel_size = _pair(kernel_size)
  106. stride = _pair(stride)
  107. padding = _pair(padding)
  108. dilation = _pair(dilation)
  109. super(Conv2d, self).__init__(
  110. in_channels, out_channels, kernel_size, stride, padding, dilation,
  111. - False, _pair(0), groups, bias, padding_mode)
  112. + False, _pair(0), groups, bias)
  113. - def _conv_forward(self, input, weight):
  114. - if self.padding_mode != 'zeros':
  115. - return F.conv2d(F.pad(input, self._reversed_padding_repeated_twice, mode=self.padding_mode),
  116. - weight, self.bias, self.stride,
  117. - _pair(0), self.dilation, self.groups)
  118. - return F.conv2d(input, weight, self.bias, self.stride,
  119. + @weak_script_method
  120. + def forward(self, input):
  121. + return F.conv2d(input, self.weight, self.bias, self.stride,
  122. self.padding, self.dilation, self.groups)
  123. - def forward(self, input: Tensor) -> Tensor:
  124. - return self._conv_forward(input, self.weight)
  125. -