Home
last modified time | relevance | path

Searched refs:parallelize (Results 1 – 25 of 80) sorted by relevance

1234

/aosp_15_r20/external/junit/src/main/java/org/junit/experimental/
H A DParallelComputer.java32 private static Runner parallelize(Runner runner) { in parallelize() method in ParallelComputer
58 return this.classes ? parallelize(suite) : suite; in getSuite()
65 return methods ? parallelize(runner) : runner; in getRunner()
/aosp_15_r20/external/pytorch/test/distributed/_composable/test_composability/
H A Dtest_2d_composability.py122 model.parallelize(tp_mesh, dp_mesh, False)
154 model.parallelize(
187 model.parallelize(
266 def parallelize(_model: Transformer, mesh: DeviceMesh, use_seq_parallel: bool): function
267 _model = Transformer.parallelize(_model, mesh["tp"], use_seq_parallel)
278 model_no_cp = parallelize(
293 model_cp = parallelize(Transformer(model_args), global_mesh, use_seq_parallel)
320 model_cp = parallelize(
/aosp_15_r20/external/pytorch/test/distributed/_composable/fsdp/
H A Dtest_fully_shard_training.py917 model.parallelize(
950 model.parallelize(
997 def parallelize(_model: Transformer, mesh: DeviceMesh, use_seq_parallel: bool): function
998 _model = Transformer.parallelize(_model, mesh["tp"], use_seq_parallel)
1008 model = parallelize(Transformer(model_args), global_mesh, True)
1051 def parallelize(_model: Transformer, mesh: DeviceMesh, use_seq_parallel: bool): function
1052 _model = Transformer.parallelize(_model, mesh["tp"], use_seq_parallel)
1063 model_no_cp = parallelize(
1078 model_cp = parallelize(Transformer(model_args), global_mesh, use_seq_parallel)
1105 model_cp = parallelize(
[all …]
H A Dtest_fully_shard_clip_grad_norm_.py138 model.parallelize(
/aosp_15_r20/external/tensorflow/tensorflow/core/framework/
H A Ddataset_options.proto116 // Whether to parallelize stateless map transformations.
128 // Whether to parallelize copying of batch elements. This optimization is
143 // Whether to parallelize stateless filter transformations.
/aosp_15_r20/external/pytorch/aten/src/ATen/native/
H A DTensorIteratorReduce.cpp127 void TensorIteratorBase::foreach_reduced_elt(loop_subiter_t loop, bool parallelize) { in foreach_reduced_elt() argument
139 at::in_parallel_region() || !parallelize) { in foreach_reduced_elt()
/aosp_15_r20/external/swiftshader/third_party/marl/src/
H A Dparallelize_test.cpp23 marl::parallelize([&] { a = true; }, [&] { b = true; }, [&] { c = true; }); in TEST_P()
/aosp_15_r20/external/swiftshader/third_party/marl/include/marl/
H A Dparallelize.h52 MARL_NO_EXPORT inline void parallelize(F0&& f0, FN&&... fn) { in parallelize() function
/aosp_15_r20/external/coreboot/payloads/libpayload/vboot/
H A DKconfig57 Use arm64 SIMD instructions (NEON) to parallelize two multiplications
/aosp_15_r20/external/swiftshader/third_party/marl/
H A DBUILD.gn37 "include/marl/parallelize.h",
/aosp_15_r20/external/grpc-grpc/examples/python/multiprocessing/
H A DREADME.md18 take advantage of this to parallelize their CPU-intensive operations.
/aosp_15_r20/external/pytorch/docs/source/
H A Ddistributed.tensor.parallel.rst14 The entrypoint to parallelize your ``nn.Module`` using Tensor Parallelism is:
/aosp_15_r20/developers/samples/android/renderScript/BasicRenderScript/
DREADME.md16 The RenderScript runtime will parallelize work across all processors available on a device, such as…
/aosp_15_r20/developers/build/prebuilts/gradle/BasicRenderScript/
H A DREADME.md16 The RenderScript runtime will parallelize work across all processors available on a device, such as…
/aosp_15_r20/external/AFLplusplus/docs/
H A Dthird_party_tools.md54 parallelize afl-tmin, startup, and data collection.
/aosp_15_r20/external/pytorch/torch/distributed/tensor/examples/
H A Dcomm_mode_features_example.py91 model = Transformer.parallelize(model, device_mesh, is_seq_parallel)
/aosp_15_r20/external/pytorch/test/distributed/_tensor/debug/
H A Dtest_comm_mode_features.py258 model2 = Transformer.parallelize(model2, device_mesh, is_seq_parallel)
/aosp_15_r20/external/webrtc/modules/audio_processing/test/py_quality_assessment/
H A DREADME.md58 - Check the `apm_quality_assessment.sh` as an example script to parallelize the
/aosp_15_r20/external/angle/src/libANGLE/renderer/metal/
H A DProgramMtl.mm227 // TODO: parallelize the above too. http://anglebug.com/41488637
/aosp_15_r20/external/pytorch/docs/source/notes/
H A Dwindows.rst41 As an alternative, we can use ``Ninja`` to parallelize CUDA
/aosp_15_r20/external/pytorch/test/distributed/tensor/parallel/
H A Dtest_tp_examples.py209 return Transformer.parallelize(
/aosp_15_r20/external/pdfium/docs/
H A Dsafetynet.md155 * --num-workers: how many workers to use to parallelize test case runs. Defaults
/aosp_15_r20/prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/lib/gcc/x86_64-w64-mingw32/4.8.3/plugin/include/
Dtimevar.def165 DEFTIMEVAR (TV_TREE_PARALLELIZE_LOOPS, "tree parallelize loops")
/aosp_15_r20/external/pytorch/torch/testing/_internal/distributed/_tensor/
H A Dcommon_dtensor.py209 def parallelize( member in Transformer
/aosp_15_r20/external/pytorch/aten/src/ATen/
H A DTensorIterator.h271 void foreach_reduced_elt(loop_subiter_t loop, bool parallelize = true);

1234