DataParallel.patch 4.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697
  1. --- /usr/local/lib/python3.5/dist-packages/torch/nn/parallel/data_parallel.py
  2. +++ /usr/local/lib/python3.5/dist-packages/torch/nn/parallel/data_parallel.py
  3. @@ -10,16 +10,13 @@
  4. The batch size should be larger than the number of GPUs used.
  5. - .. warning::
  6. - It is recommended to use :class:`~torch.nn.parallel.DistributedDataParallel`,
  7. - instead of this class, to do multi-GPU training, even if there is only a single
  8. - node. See: :ref:`cuda-nn-ddp-instead` and :ref:`ddp`.
  9. + See also: :ref:`cuda-nn-dataparallel-instead`
  10. Arbitrary positional and keyword inputs are allowed to be passed into
  11. - DataParallel but some types are specially handled. tensors will be
  12. - **scattered** on dim specified (default 0). tuple, list and dict types will
  13. - be shallow copied. The other types will be shared among different threads
  14. - and can be corrupted if written to in the model's forward pass.
  15. + DataParallel EXCEPT Tensors. All tensors will be scattered on dim
  16. + specified (default 0). Primitive types will be broadcasted, but all
  17. + other types will be a shallow copy and can be corrupted if written to in
  18. + the model's forward pass.
  19. The parallelized :attr:`module` must have its parameters and buffers on
  20. ``device_ids[0]`` before running this :class:`~torch.nn.DataParallel`
  21. @@ -27,9 +24,9 @@
  22. .. warning::
  23. In each forward, :attr:`module` is **replicated** on each device, so any
  24. - updates to the running module in ``forward`` will be lost. For example,
  25. + updates to the runing module in ``forward`` will be lost. For example,
  26. if :attr:`module` has a counter attribute that is incremented in each
  27. - ``forward``, it will always stay at the initial value because the update
  28. + ``forward``, it will always stay at the initial value becasue the update
  29. is done on the replicas which are destroyed after ``forward``. However,
  30. :class:`~torch.nn.DataParallel` guarantees that the replica on
  31. ``device[0]`` will have its parameters and buffers sharing storage with
  32. @@ -74,7 +71,7 @@
  33. Example::
  34. >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
  35. - >>> output = net(input_var) # input_var can be on any device, including CPU
  36. + >>> output = net(input_var)
  37. """
  38. # TODO: update notes/cuda.rst when this class handles 8+ GPUs well
  39. @@ -82,15 +79,13 @@
  40. def __init__(self, module, device_ids=None, output_device=None, dim=0):
  41. super(DataParallel, self).__init__()
  42. - device_type = _get_available_device_type()
  43. - if device_type is None:
  44. + if not torch.cuda.is_available():
  45. self.module = module
  46. self.device_ids = []
  47. return
  48. if device_ids is None:
  49. - device_ids = _get_all_device_indices()
  50. -
  51. + device_ids = list(range(torch.cuda.device_count()))
  52. if output_device is None:
  53. output_device = device_ids[0]
  54. @@ -98,23 +93,15 @@
  55. self.module = module
  56. self.device_ids = list(map(lambda x: _get_device_index(x, True), device_ids))
  57. self.output_device = _get_device_index(output_device, True)
  58. - self.src_device_obj = torch.device(device_type, self.device_ids[0])
  59. _check_balance(self.device_ids)
  60. if len(self.device_ids) == 1:
  61. - self.module.to(self.src_device_obj)
  62. + self.module.cuda(device_ids[0])
  63. def forward(self, *inputs, **kwargs):
  64. if not self.device_ids:
  65. return self.module(*inputs, **kwargs)
  66. -
  67. - for t in chain(self.module.parameters(), self.module.buffers()):
  68. - if t.device != self.src_device_obj:
  69. - raise RuntimeError("module must have its parameters and buffers "
  70. - "on device {} (device_ids[0]) but found one of "
  71. - "them on device: {}".format(self.src_device_obj, t.device))
  72. -
  73. inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
  74. if len(self.device_ids) == 1:
  75. return self.module(*inputs[0], **kwargs[0])
  76. @@ -123,7 +110,7 @@
  77. return self.gather(outputs, self.output_device)
  78. def replicate(self, module, device_ids):
  79. - return replicate(module, device_ids, not torch.is_grad_enabled())
  80. + return replicate(module, device_ids)
  81. def scatter(self, inputs, kwargs, device_ids):
  82. return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)