DataParallel.patch 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105
  1. --- /usr/local/lib/python3.5/dist-packages/torch/nn/parallel/data_parallel.py
  2. +++ /usr/local/lib/python3.5/dist-packages/torch/nn/parallel/data_parallel.py
  3. @@ -10,16 +10,13 @@
  4. The batch size should be larger than the number of GPUs used.
  5. - .. warning::
  6. - It is recommended to use :class:`~torch.nn.parallel.DistributedDataParallel`,
  7. - instead of this class, to do multi-GPU training, even if there is only a single
  8. - node. See: :ref:`cuda-nn-ddp-instead` and :ref:`ddp`.
  9. + See also: :ref:`cuda-nn-dataparallel-instead`
  10. Arbitrary positional and keyword inputs are allowed to be passed into
  11. - DataParallel but some types are specially handled. tensors will be
  12. - **scattered** on dim specified (default 0). tuple, list and dict types will
  13. - be shallow copied. The other types will be shared among different threads
  14. - and can be corrupted if written to in the model's forward pass.
  15. + DataParallel EXCEPT Tensors. All tensors will be scattered on dim
  16. + specified (default 0). Primitive types will be broadcasted, but all
  17. + other types will be a shallow copy and can be corrupted if written to in
  18. + the model's forward pass.
  19. The parallelized :attr:`module` must have its parameters and buffers on
  20. ``device_ids[0]`` before running this :class:`~torch.nn.DataParallel`
  21. @@ -27,9 +24,9 @@
  22. .. warning::
  23. In each forward, :attr:`module` is **replicated** on each device, so any
  24. - updates to the running module in ``forward`` will be lost. For example,
  25. + updates to the runing module in ``forward`` will be lost. For example,
  26. if :attr:`module` has a counter attribute that is incremented in each
  27. - ``forward``, it will always stay at the initial value because the update
  28. + ``forward``, it will always stay at the initial value becasue the update
  29. is done on the replicas which are destroyed after ``forward``. However,
  30. :class:`~torch.nn.DataParallel` guarantees that the replica on
  31. ``device[0]`` will have its parameters and buffers sharing storage with
  32. @@ -74,7 +71,7 @@
  33. Example::
  34. >>> net = torch.nn.DataParallel(model, device_ids=[0, 1, 2])
  35. - >>> output = net(input_var) # input_var can be on any device, including CPU
  36. + >>> output = net(input_var)
  37. """
  38. # TODO: update notes/cuda.rst when this class handles 8+ GPUs well
  39. @@ -82,46 +79,30 @@
  40. def __init__(self, module, device_ids=None, output_device=None, dim=0):
  41. super(DataParallel, self).__init__()
  42. - device_type = _get_available_device_type()
  43. - if device_type is None:
  44. + if not torch.cuda.is_available():
  45. self.module = module
  46. self.device_ids = []
  47. return
  48. if device_ids is None:
  49. - device_ids = _get_all_device_indices()
  50. -
  51. + device_ids = list(range(torch.cuda.device_count()))
  52. if output_device is None:
  53. output_device = device_ids[0]
  54. self.dim = dim
  55. self.module = module
  56. - self.device_ids = [_get_device_index(x, True) for x in device_ids]
  57. + self.device_ids = list(map(lambda x: _get_device_index(x, True), device_ids))
  58. self.output_device = _get_device_index(output_device, True)
  59. - self.src_device_obj = torch.device(device_type, self.device_ids[0])
  60. _check_balance(self.device_ids)
  61. if len(self.device_ids) == 1:
  62. - self.module.to(self.src_device_obj)
  63. + self.module.cuda(device_ids[0])
  64. def forward(self, *inputs, **kwargs):
  65. if not self.device_ids:
  66. return self.module(*inputs, **kwargs)
  67. -
  68. - for t in chain(self.module.parameters(), self.module.buffers()):
  69. - if t.device != self.src_device_obj:
  70. - raise RuntimeError("module must have its parameters and buffers "
  71. - "on device {} (device_ids[0]) but found one of "
  72. - "them on device: {}".format(self.src_device_obj, t.device))
  73. -
  74. inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
  75. - # for forward function without any inputs, empty list and dict will be created
  76. - # so the module can be executed on one device which is the first one in device_ids
  77. - if not inputs and not kwargs:
  78. - inputs = ((),)
  79. - kwargs = ({},)
  80. -
  81. if len(self.device_ids) == 1:
  82. return self.module(*inputs[0], **kwargs[0])
  83. replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
  84. @@ -129,7 +110,7 @@
  85. return self.gather(outputs, self.output_device)
  86. def replicate(self, module, device_ids):
  87. - return replicate(module, device_ids, not torch.is_grad_enabled())
  88. + return replicate(module, device_ids)
  89. def scatter(self, inputs, kwargs, device_ids):
  90. return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)