Home
last modified time | relevance | path

Searched refs:DistributedDataParallel (Results 1 – 25 of 57) sorted by relevance

123

/aosp_15_r20/external/pytorch/test/distributed/
H A Dtest_c10d_ucc.py33 from torch.nn.parallel import DistributedDataParallel
454 cpu_model = DistributedDataParallel(
465 gpu_model = DistributedDataParallel(
537 cpu_model = DistributedDataParallel(
546 gpu_model = DistributedDataParallel(
574 model = DistributedDataParallel(
617 model = DistributedDataParallel(
696 ddp_withload = DistributedDataParallel(
700 ddp_withoutload = DistributedDataParallel(
774 ddp_model = DistributedDataParallel(
[all …]
H A Dtest_c10d_gloo.py42 from torch.nn.parallel import DistributedDataParallel
1595 cpu_model = DistributedDataParallel(
1606 gpu_model = DistributedDataParallel(
1670 cpu_model = DistributedDataParallel(
1679 gpu_model = DistributedDataParallel(
1707 model = DistributedDataParallel(
1750 model = DistributedDataParallel(
1801 DistributedDataParallel._set_params_and_buffers_to_ignore_for_model(
1805 DistributedDataParallel(
1871 ddp_withload = DistributedDataParallel(
[all …]
H A Dtest_c10d_nccl.py41 from torch.nn.parallel import DistributedDataParallel
301 model = DistributedDataParallel(
987 ddp_model = DistributedDataParallel(
994 ddp_model = DistributedDataParallel(
1002 ddp_model = DistributedDataParallel(model, process_group=process_group)
1008 ddp_model = DistributedDataParallel(
1018 ddp_model = DistributedDataParallel(
1080 model = DistributedDataParallel(
1156 DistributedDataParallel(
1204 model = DistributedDataParallel(
[all …]
H A Dtest_c10d_common.py30 from torch.nn.parallel import DistributedDataParallel
343 ddp_model = DistributedDataParallel(
375 ddp_model = DistributedDataParallel(
421 ddp_model = nn.parallel.DistributedDataParallel(
832 gpu_model = DistributedDataParallel(
849 gpu_model = DistributedDataParallel(
898 model = DistributedDataParallel(
947 model = DistributedDataParallel(
999 ddp = DistributedDataParallel(
H A Dtest_c10d_pypg.py13 from torch.nn.parallel import DistributedDataParallel as DDP
/aosp_15_r20/external/pytorch/torch/testing/_internal/distributed/
H A Ddistributed_test.py56 from torch.nn.parallel import DistributedDataParallel
4311 model_DDP = nn.parallel.DistributedDataParallel(
4361 model_DDP = nn.parallel.DistributedDataParallel(
4405 RuntimeError, lambda: nn.parallel.DistributedDataParallel(nn.Module())
4423 ddp_model = nn.parallel.DistributedDataParallel(
4438 ddp_model = torch.nn.parallel.DistributedDataParallel(model)
4458 net = torch.nn.parallel.DistributedDataParallel(
4508 ddp_model = torch.nn.parallel.DistributedDataParallel(
4520 ddp_model = torch.nn.parallel.DistributedDataParallel(
4532 ddp_model = torch.nn.parallel.DistributedDataParallel(
[all …]
H A Dddp_under_dist_autograd_test.py16 from torch.nn.parallel import DistributedDataParallel
154 self.fc2 = DistributedDataParallel(
205 self.hybrid_module = DistributedDataParallel(
527 ddp_net = DistributedDataParallel(net)
595 ddp_model = DistributedDataParallel(model)
640 ddp_model = DistributedDataParallel(layer2)
687 ddp_layer2 = DistributedDataParallel(layer2, device_ids=[self.rank])
697 ddp_layer4 = DistributedDataParallel(layer4, device_ids=[self.rank])
/aosp_15_r20/external/pytorch/docs/source/
H A Dddp_comm_hooks.rst6DistributedDataParallel <https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDa…
18 :func:`torch.nn.parallel.DistributedDataParallel.register_comm_hook`
134 from torch.nn.parallel import DistributedDataParallel
170 ddp_model = DistributedDataParallel(model, device_ids=[rank])
190 new_ddp_model = DistributedDataParallel(SimpleModel().to(rank), device_ids=[rank])
H A Ddistributed.rst154 machines. The class :func:`torch.nn.parallel.DistributedDataParallel` builds on this
162 :func:`torch.nn.parallel.DistributedDataParallel` wrapper may still have advantages over other
659 … debug logging when models trained with :func:`torch.nn.parallel.DistributedDataParallel` are init…
690 ddp_model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank])
759 …_DEBUG=INFO`` enhances crash logging in :func:`torch.nn.parallel.DistributedDataParallel` due to u…
760 must be passed into :func:`torch.nn.parallel.DistributedDataParallel` initialization if there are p…
761 to be used in loss computation as :func:`torch.nn.parallel.DistributedDataParallel` does not suppor…
762 models, thus when crashing with an error, :func:`torch.nn.parallel.DistributedDataParallel` will lo…
770 …the keyword argument `find_unused_parameters=True` to `torch.nn.parallel.DistributedDataParallel`,…
/aosp_15_r20/external/pytorch/docs/source/notes/
H A Dddp.rst7 The implementation of :class:`torch.nn.parallel.DistributedDataParallel`
11 :class:`torch.nn.parallel.DistributedDataParallel` (DDP) transparently performs
18 Let us start with a simple :class:`torch.nn.parallel.DistributedDataParallel`
32 from torch.nn.parallel import DistributedDataParallel as DDP
83 :class:`torch.nn.parallel.DistributedDataParallel` by diving into details of
164 ``DistributedDataParallel`` uses ``ProcessGroup::broadcast()`` to send
172 DistributedDataParallel section in Implementation
177 the ``forward`` function for the ``nn.parallel.DistributedDataParallel``
H A Damp_examples.rst312 DistributedDataParallel, one GPU per process
315 :class:`torch.nn.parallel.DistributedDataParallel`'s documentation recommends one GPU per process f…
316 performance. In this case, ``DistributedDataParallel`` does not spawn threads internally,
319 DistributedDataParallel, multiple GPUs per process
322 Here :class:`torch.nn.parallel.DistributedDataParallel` may spawn a side thread to run the forward …
H A Dcuda.rst818 Use nn.parallel.DistributedDataParallel instead of multiprocessing or nn.DataParallel
822 using :class:`~torch.nn.parallel.DistributedDataParallel` to utilize more
830 It is recommended to use :class:`~torch.nn.parallel.DistributedDataParallel`,
834 The difference between :class:`~torch.nn.parallel.DistributedDataParallel` and
835 :class:`~torch.nn.DataParallel` is: :class:`~torch.nn.parallel.DistributedDataParallel`
841 If you use :class:`~torch.nn.parallel.DistributedDataParallel`, you could use
1208 Usage with DistributedDataParallel
1236 model = DistributedDataParallel(model)
/aosp_15_r20/external/pytorch/torch/distributed/benchmarks/
H A DREADME.md10 …ltiple trainers using [DistributedDataParallel](https://pytorch.org/docs/stable/nn.html#torch.nn.p…
11 …age (if needed) using [DistributedDataParallel](https://pytorch.org/docs/stable/nn.html#torch.nn.p…
18 …r and then executes the FC layer which is wrapped and replicated via DDP (DistributedDataParallel).
/aosp_15_r20/external/pytorch/torch/distributed/algorithms/_optimizer_overlap/
H A Doptimizer_overlap.py13 from torch.nn.parallel import DistributedDataParallel
46 def register_ddp(self, ddp: DistributedDataParallel) -> None: argument
69 def register_ddp(self, ddp_inst: DistributedDataParallel): argument
/aosp_15_r20/external/pytorch/torch/nn/parallel/
H A D__init__.py5 from torch.nn.parallel.distributed import DistributedDataParallel
27 class DistributedDataParallelCPU(DistributedDataParallel):
H A Ddistributed.py277 assert isinstance(ddp, DistributedDataParallel), (
326 class DistributedDataParallel(Module, Joinable): class
1448 DistributedDataParallel._active_ddp_module = self
1452 DistributedDataParallel._active_ddp_module = None
/aosp_15_r20/external/pytorch/test/distributed/algorithms/ddp_comm_hooks/
H A Dtest_ddp_hooks.py19 from torch.nn.parallel import DistributedDataParallel
95 gpu_model = DistributedDataParallel(
224 gpu_model = DistributedDataParallel(
/aosp_15_r20/external/pytorch/torch/distributed/algorithms/ddp_comm_hooks/
H A Dddp_zero_hook.py9 from torch.nn.parallel.distributed import DistributedDataParallel
176 ddp: DistributedDataParallel, argument
338 ddp: DistributedDataParallel, argument
/aosp_15_r20/external/pytorch/test/distributed/fsdp/
H A Dtest_fsdp_multiple_forward.py9 from torch.nn.parallel import DistributedDataParallel
54 model = DistributedDataParallel(model, device_ids=[self.rank])
H A Dtest_fsdp_freezing_weights.py12 from torch.nn.parallel import DistributedDataParallel
179 model = DistributedDataParallel(model, **ddp_kwargs)
/aosp_15_r20/external/pytorch/torch/distributed/_composable/
H A Dreplicate.py9 from torch.nn.parallel import DistributedDataParallel
115 self._ddp = DistributedDataParallel(self._param_list, **kwargs)
/aosp_15_r20/external/pytorch/benchmarks/distributed/rpc/parameter_server/trainer/
H A Dddp_models.py1 from torch.nn.parallel import DistributedDataParallel as DDP
/aosp_15_r20/external/pytorch/test/distributed/checkpoint/e2e/
H A Dtest_e2e_save_and_load.py35 from torch.nn.parallel import DistributedDataParallel
181 model = DistributedDataParallel(dummy_model)
/aosp_15_r20/external/pytorch/torch/distributed/pipelining/
H A Dstage.py15 from torch.nn.parallel import DistributedDataParallel
464 if isinstance(self.submod, DistributedDataParallel):
507 if isinstance(self.submod, DistributedDataParallel):
/aosp_15_r20/external/pytorch/benchmarks/dynamo/
H A Ddistributed.py10 from torch.nn.parallel import DistributedDataParallel as DDP

123