Linear.patch 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364
  1. --- /usr/local/lib/python3.5/dist-packages/torch/nn/modules/linear.py
  2. +++ /usr/local/lib/python3.5/dist-packages/torch/nn/modules/linear.py
  3. @@ -1,19 +1,17 @@
  4. class Linear(Module):
  5. r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
  6. -
  7. - This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
  8. Args:
  9. in_features: size of each input sample
  10. out_features: size of each output sample
  11. - bias: If set to ``False``, the layer will not learn an additive bias.
  12. + bias: If set to False, the layer will not learn an additive bias.
  13. Default: ``True``
  14. Shape:
  15. - - Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
  16. - additional dimensions and :math:`H_{in} = \text{in\_features}`
  17. - - Output: :math:`(N, *, H_{out})` where all but the last dimension
  18. - are the same shape as the input and :math:`H_{out} = \text{out\_features}`.
  19. + - Input: :math:`(N, *, \text{in\_features})` where :math:`*` means any number of
  20. + additional dimensions
  21. + - Output: :math:`(N, *, \text{out\_features})` where all but the last dimension
  22. + are the same shape as the input.
  23. Attributes:
  24. weight: the learnable weights of the module of shape
  25. @@ -33,12 +31,9 @@
  26. >>> print(output.size())
  27. torch.Size([128, 30])
  28. """
  29. - __constants__ = ['in_features', 'out_features']
  30. - in_features: int
  31. - out_features: int
  32. - weight: Tensor
  33. + __constants__ = ['bias']
  34. - def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
  35. + def __init__(self, in_features, out_features, bias=True):
  36. super(Linear, self).__init__()
  37. self.in_features = in_features
  38. self.out_features = out_features
  39. @@ -49,17 +44,18 @@
  40. self.register_parameter('bias', None)
  41. self.reset_parameters()
  42. - def reset_parameters(self) -> None:
  43. + def reset_parameters(self):
  44. init.kaiming_uniform_(self.weight, a=math.sqrt(5))
  45. if self.bias is not None:
  46. fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
  47. bound = 1 / math.sqrt(fan_in)
  48. init.uniform_(self.bias, -bound, bound)
  49. - def forward(self, input: Tensor) -> Tensor:
  50. + @weak_script_method
  51. + def forward(self, input):
  52. return F.linear(input, self.weight, self.bias)
  53. - def extra_repr(self) -> str:
  54. + def extra_repr(self):
  55. return 'in_features={}, out_features={}, bias={}'.format(
  56. self.in_features, self.out_features, self.bias is not None
  57. )