12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364 |
- --- /usr/local/lib/python3.5/dist-packages/torch/nn/modules/linear.py
- +++ /usr/local/lib/python3.5/dist-packages/torch/nn/modules/linear.py
- @@ -1,19 +1,17 @@
- class Linear(Module):
- r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
- -
- - This module supports :ref:`TensorFloat32<tf32_on_ampere>`.
-
- Args:
- in_features: size of each input sample
- out_features: size of each output sample
- - bias: If set to ``False``, the layer will not learn an additive bias.
- + bias: If set to False, the layer will not learn an additive bias.
- Default: ``True``
-
- Shape:
- - - Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
- - additional dimensions and :math:`H_{in} = \text{in\_features}`
- - - Output: :math:`(N, *, H_{out})` where all but the last dimension
- - are the same shape as the input and :math:`H_{out} = \text{out\_features}`.
- + - Input: :math:`(N, *, \text{in\_features})` where :math:`*` means any number of
- + additional dimensions
- + - Output: :math:`(N, *, \text{out\_features})` where all but the last dimension
- + are the same shape as the input.
-
- Attributes:
- weight: the learnable weights of the module of shape
- @@ -33,12 +31,9 @@
- >>> print(output.size())
- torch.Size([128, 30])
- """
- - __constants__ = ['in_features', 'out_features']
- - in_features: int
- - out_features: int
- - weight: Tensor
- + __constants__ = ['bias']
-
- - def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None:
- + def __init__(self, in_features, out_features, bias=True):
- super(Linear, self).__init__()
- self.in_features = in_features
- self.out_features = out_features
- @@ -49,17 +44,18 @@
- self.register_parameter('bias', None)
- self.reset_parameters()
-
- - def reset_parameters(self) -> None:
- + def reset_parameters(self):
- init.kaiming_uniform_(self.weight, a=math.sqrt(5))
- if self.bias is not None:
- fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
- bound = 1 / math.sqrt(fan_in)
- init.uniform_(self.bias, -bound, bound)
-
- - def forward(self, input: Tensor) -> Tensor:
- + @weak_script_method
- + def forward(self, input):
- return F.linear(input, self.weight, self.bias)
-
- - def extra_repr(self) -> str:
- + def extra_repr(self):
- return 'in_features={}, out_features={}, bias={}'.format(
- self.in_features, self.out_features, self.bias is not None
- )
|