/aosp_15_r20/external/pytorch/test/distributed/ |
H A D | test_c10d_ucc.py | 33 from torch.nn.parallel import DistributedDataParallel 454 cpu_model = DistributedDataParallel( 465 gpu_model = DistributedDataParallel( 537 cpu_model = DistributedDataParallel( 546 gpu_model = DistributedDataParallel( 574 model = DistributedDataParallel( 617 model = DistributedDataParallel( 696 ddp_withload = DistributedDataParallel( 700 ddp_withoutload = DistributedDataParallel( 774 ddp_model = DistributedDataParallel( [all …]
|
H A D | test_c10d_gloo.py | 42 from torch.nn.parallel import DistributedDataParallel 1595 cpu_model = DistributedDataParallel( 1606 gpu_model = DistributedDataParallel( 1670 cpu_model = DistributedDataParallel( 1679 gpu_model = DistributedDataParallel( 1707 model = DistributedDataParallel( 1750 model = DistributedDataParallel( 1801 DistributedDataParallel._set_params_and_buffers_to_ignore_for_model( 1805 DistributedDataParallel( 1871 ddp_withload = DistributedDataParallel( [all …]
|
H A D | test_c10d_nccl.py | 41 from torch.nn.parallel import DistributedDataParallel 301 model = DistributedDataParallel( 987 ddp_model = DistributedDataParallel( 994 ddp_model = DistributedDataParallel( 1002 ddp_model = DistributedDataParallel(model, process_group=process_group) 1008 ddp_model = DistributedDataParallel( 1018 ddp_model = DistributedDataParallel( 1080 model = DistributedDataParallel( 1156 DistributedDataParallel( 1204 model = DistributedDataParallel( [all …]
|
H A D | test_c10d_common.py | 30 from torch.nn.parallel import DistributedDataParallel 343 ddp_model = DistributedDataParallel( 375 ddp_model = DistributedDataParallel( 421 ddp_model = nn.parallel.DistributedDataParallel( 832 gpu_model = DistributedDataParallel( 849 gpu_model = DistributedDataParallel( 898 model = DistributedDataParallel( 947 model = DistributedDataParallel( 999 ddp = DistributedDataParallel(
|
H A D | test_c10d_pypg.py | 13 from torch.nn.parallel import DistributedDataParallel as DDP
|
/aosp_15_r20/external/pytorch/torch/testing/_internal/distributed/ |
H A D | distributed_test.py | 56 from torch.nn.parallel import DistributedDataParallel 4311 model_DDP = nn.parallel.DistributedDataParallel( 4361 model_DDP = nn.parallel.DistributedDataParallel( 4405 RuntimeError, lambda: nn.parallel.DistributedDataParallel(nn.Module()) 4423 ddp_model = nn.parallel.DistributedDataParallel( 4438 ddp_model = torch.nn.parallel.DistributedDataParallel(model) 4458 net = torch.nn.parallel.DistributedDataParallel( 4508 ddp_model = torch.nn.parallel.DistributedDataParallel( 4520 ddp_model = torch.nn.parallel.DistributedDataParallel( 4532 ddp_model = torch.nn.parallel.DistributedDataParallel( [all …]
|
H A D | ddp_under_dist_autograd_test.py | 16 from torch.nn.parallel import DistributedDataParallel 154 self.fc2 = DistributedDataParallel( 205 self.hybrid_module = DistributedDataParallel( 527 ddp_net = DistributedDataParallel(net) 595 ddp_model = DistributedDataParallel(model) 640 ddp_model = DistributedDataParallel(layer2) 687 ddp_layer2 = DistributedDataParallel(layer2, device_ids=[self.rank]) 697 ddp_layer4 = DistributedDataParallel(layer4, device_ids=[self.rank])
|
/aosp_15_r20/external/pytorch/docs/source/ |
H A D | ddp_comm_hooks.rst | 6 …DistributedDataParallel <https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDa… 18 :func:`torch.nn.parallel.DistributedDataParallel.register_comm_hook` 134 from torch.nn.parallel import DistributedDataParallel 170 ddp_model = DistributedDataParallel(model, device_ids=[rank]) 190 new_ddp_model = DistributedDataParallel(SimpleModel().to(rank), device_ids=[rank])
|
H A D | distributed.rst | 154 machines. The class :func:`torch.nn.parallel.DistributedDataParallel` builds on this 162 :func:`torch.nn.parallel.DistributedDataParallel` wrapper may still have advantages over other 659 … debug logging when models trained with :func:`torch.nn.parallel.DistributedDataParallel` are init… 690 ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank]) 759 …_DEBUG=INFO`` enhances crash logging in :func:`torch.nn.parallel.DistributedDataParallel` due to u… 760 must be passed into :func:`torch.nn.parallel.DistributedDataParallel` initialization if there are p… 761 to be used in loss computation as :func:`torch.nn.parallel.DistributedDataParallel` does not suppor… 762 models, thus when crashing with an error, :func:`torch.nn.parallel.DistributedDataParallel` will lo… 770 …the keyword argument `find_unused_parameters=True` to `torch.nn.parallel.DistributedDataParallel`,…
|
/aosp_15_r20/external/pytorch/docs/source/notes/ |
H A D | ddp.rst | 7 The implementation of :class:`torch.nn.parallel.DistributedDataParallel` 11 :class:`torch.nn.parallel.DistributedDataParallel` (DDP) transparently performs 18 Let us start with a simple :class:`torch.nn.parallel.DistributedDataParallel` 32 from torch.nn.parallel import DistributedDataParallel as DDP 83 :class:`torch.nn.parallel.DistributedDataParallel` by diving into details of 164 ``DistributedDataParallel`` uses ``ProcessGroup::broadcast()`` to send 172 DistributedDataParallel section in Implementation 177 the ``forward`` function for the ``nn.parallel.DistributedDataParallel``
|
H A D | amp_examples.rst | 312 DistributedDataParallel, one GPU per process 315 :class:`torch.nn.parallel.DistributedDataParallel`'s documentation recommends one GPU per process f… 316 performance. In this case, ``DistributedDataParallel`` does not spawn threads internally, 319 DistributedDataParallel, multiple GPUs per process 322 Here :class:`torch.nn.parallel.DistributedDataParallel` may spawn a side thread to run the forward …
|
H A D | cuda.rst | 818 Use nn.parallel.DistributedDataParallel instead of multiprocessing or nn.DataParallel 822 using :class:`~torch.nn.parallel.DistributedDataParallel` to utilize more 830 It is recommended to use :class:`~torch.nn.parallel.DistributedDataParallel`, 834 The difference between :class:`~torch.nn.parallel.DistributedDataParallel` and 835 :class:`~torch.nn.DataParallel` is: :class:`~torch.nn.parallel.DistributedDataParallel` 841 If you use :class:`~torch.nn.parallel.DistributedDataParallel`, you could use 1208 Usage with DistributedDataParallel 1236 model = DistributedDataParallel(model)
|
/aosp_15_r20/external/pytorch/torch/distributed/benchmarks/ |
H A D | README.md | 10 …ltiple trainers using [DistributedDataParallel](https://pytorch.org/docs/stable/nn.html#torch.nn.p… 11 …age (if needed) using [DistributedDataParallel](https://pytorch.org/docs/stable/nn.html#torch.nn.p… 18 …r and then executes the FC layer which is wrapped and replicated via DDP (DistributedDataParallel).
|
/aosp_15_r20/external/pytorch/torch/distributed/algorithms/_optimizer_overlap/ |
H A D | optimizer_overlap.py | 13 from torch.nn.parallel import DistributedDataParallel 46 def register_ddp(self, ddp: DistributedDataParallel) -> None: argument 69 def register_ddp(self, ddp_inst: DistributedDataParallel): argument
|
/aosp_15_r20/external/pytorch/torch/nn/parallel/ |
H A D | __init__.py | 5 from torch.nn.parallel.distributed import DistributedDataParallel 27 class DistributedDataParallelCPU(DistributedDataParallel):
|
H A D | distributed.py | 277 assert isinstance(ddp, DistributedDataParallel), ( 326 class DistributedDataParallel(Module, Joinable): class 1448 DistributedDataParallel._active_ddp_module = self 1452 DistributedDataParallel._active_ddp_module = None
|
/aosp_15_r20/external/pytorch/test/distributed/algorithms/ddp_comm_hooks/ |
H A D | test_ddp_hooks.py | 19 from torch.nn.parallel import DistributedDataParallel 95 gpu_model = DistributedDataParallel( 224 gpu_model = DistributedDataParallel(
|
/aosp_15_r20/external/pytorch/torch/distributed/algorithms/ddp_comm_hooks/ |
H A D | ddp_zero_hook.py | 9 from torch.nn.parallel.distributed import DistributedDataParallel 176 ddp: DistributedDataParallel, argument 338 ddp: DistributedDataParallel, argument
|
/aosp_15_r20/external/pytorch/test/distributed/fsdp/ |
H A D | test_fsdp_multiple_forward.py | 9 from torch.nn.parallel import DistributedDataParallel 54 model = DistributedDataParallel(model, device_ids=[self.rank])
|
H A D | test_fsdp_freezing_weights.py | 12 from torch.nn.parallel import DistributedDataParallel 179 model = DistributedDataParallel(model, **ddp_kwargs)
|
/aosp_15_r20/external/pytorch/torch/distributed/_composable/ |
H A D | replicate.py | 9 from torch.nn.parallel import DistributedDataParallel 115 self._ddp = DistributedDataParallel(self._param_list, **kwargs)
|
/aosp_15_r20/external/pytorch/benchmarks/distributed/rpc/parameter_server/trainer/ |
H A D | ddp_models.py | 1 from torch.nn.parallel import DistributedDataParallel as DDP
|
/aosp_15_r20/external/pytorch/test/distributed/checkpoint/e2e/ |
H A D | test_e2e_save_and_load.py | 35 from torch.nn.parallel import DistributedDataParallel 181 model = DistributedDataParallel(dummy_model)
|
/aosp_15_r20/external/pytorch/torch/distributed/pipelining/ |
H A D | stage.py | 15 from torch.nn.parallel import DistributedDataParallel 464 if isinstance(self.submod, DistributedDataParallel): 507 if isinstance(self.submod, DistributedDataParallel):
|
/aosp_15_r20/external/pytorch/benchmarks/dynamo/ |
H A D | distributed.py | 10 from torch.nn.parallel import DistributedDataParallel as DDP
|