/aosp_15_r20/external/junit/src/main/java/org/junit/experimental/ |
H A D | ParallelComputer.java | 32 private static Runner parallelize(Runner runner) { in parallelize() method in ParallelComputer 58 return this.classes ? parallelize(suite) : suite; in getSuite() 65 return methods ? parallelize(runner) : runner; in getRunner()
|
/aosp_15_r20/external/pytorch/test/distributed/_composable/test_composability/ |
H A D | test_2d_composability.py | 122 model.parallelize(tp_mesh, dp_mesh, False) 154 model.parallelize( 187 model.parallelize( 266 def parallelize(_model: Transformer, mesh: DeviceMesh, use_seq_parallel: bool): function 267 _model = Transformer.parallelize(_model, mesh["tp"], use_seq_parallel) 278 model_no_cp = parallelize( 293 model_cp = parallelize(Transformer(model_args), global_mesh, use_seq_parallel) 320 model_cp = parallelize(
|
/aosp_15_r20/external/pytorch/test/distributed/_composable/fsdp/ |
H A D | test_fully_shard_training.py | 917 model.parallelize( 950 model.parallelize( 997 def parallelize(_model: Transformer, mesh: DeviceMesh, use_seq_parallel: bool): function 998 _model = Transformer.parallelize(_model, mesh["tp"], use_seq_parallel) 1008 model = parallelize(Transformer(model_args), global_mesh, True) 1051 def parallelize(_model: Transformer, mesh: DeviceMesh, use_seq_parallel: bool): function 1052 _model = Transformer.parallelize(_model, mesh["tp"], use_seq_parallel) 1063 model_no_cp = parallelize( 1078 model_cp = parallelize(Transformer(model_args), global_mesh, use_seq_parallel) 1105 model_cp = parallelize( [all …]
|
H A D | test_fully_shard_clip_grad_norm_.py | 138 model.parallelize(
|
/aosp_15_r20/external/tensorflow/tensorflow/core/framework/ |
H A D | dataset_options.proto | 116 // Whether to parallelize stateless map transformations. 128 // Whether to parallelize copying of batch elements. This optimization is 143 // Whether to parallelize stateless filter transformations.
|
/aosp_15_r20/external/pytorch/aten/src/ATen/native/ |
H A D | TensorIteratorReduce.cpp | 127 void TensorIteratorBase::foreach_reduced_elt(loop_subiter_t loop, bool parallelize) { in foreach_reduced_elt() argument 139 at::in_parallel_region() || !parallelize) { in foreach_reduced_elt()
|
/aosp_15_r20/external/swiftshader/third_party/marl/src/ |
H A D | parallelize_test.cpp | 23 marl::parallelize([&] { a = true; }, [&] { b = true; }, [&] { c = true; }); in TEST_P()
|
/aosp_15_r20/external/swiftshader/third_party/marl/include/marl/ |
H A D | parallelize.h | 52 MARL_NO_EXPORT inline void parallelize(F0&& f0, FN&&... fn) { in parallelize() function
|
/aosp_15_r20/external/coreboot/payloads/libpayload/vboot/ |
H A D | Kconfig | 57 Use arm64 SIMD instructions (NEON) to parallelize two multiplications
|
/aosp_15_r20/external/swiftshader/third_party/marl/ |
H A D | BUILD.gn | 37 "include/marl/parallelize.h",
|
/aosp_15_r20/external/grpc-grpc/examples/python/multiprocessing/ |
H A D | README.md | 18 take advantage of this to parallelize their CPU-intensive operations.
|
/aosp_15_r20/external/pytorch/docs/source/ |
H A D | distributed.tensor.parallel.rst | 14 The entrypoint to parallelize your ``nn.Module`` using Tensor Parallelism is:
|
/aosp_15_r20/developers/samples/android/renderScript/BasicRenderScript/ |
D | README.md | 16 The RenderScript runtime will parallelize work across all processors available on a device, such as…
|
/aosp_15_r20/developers/build/prebuilts/gradle/BasicRenderScript/ |
H A D | README.md | 16 The RenderScript runtime will parallelize work across all processors available on a device, such as…
|
/aosp_15_r20/external/AFLplusplus/docs/ |
H A D | third_party_tools.md | 54 parallelize afl-tmin, startup, and data collection.
|
/aosp_15_r20/external/pytorch/torch/distributed/tensor/examples/ |
H A D | comm_mode_features_example.py | 91 model = Transformer.parallelize(model, device_mesh, is_seq_parallel)
|
/aosp_15_r20/external/pytorch/test/distributed/_tensor/debug/ |
H A D | test_comm_mode_features.py | 258 model2 = Transformer.parallelize(model2, device_mesh, is_seq_parallel)
|
/aosp_15_r20/external/webrtc/modules/audio_processing/test/py_quality_assessment/ |
H A D | README.md | 58 - Check the `apm_quality_assessment.sh` as an example script to parallelize the
|
/aosp_15_r20/external/angle/src/libANGLE/renderer/metal/ |
H A D | ProgramMtl.mm | 227 // TODO: parallelize the above too. http://anglebug.com/41488637
|
/aosp_15_r20/external/pytorch/docs/source/notes/ |
H A D | windows.rst | 41 As an alternative, we can use ``Ninja`` to parallelize CUDA
|
/aosp_15_r20/external/pytorch/test/distributed/tensor/parallel/ |
H A D | test_tp_examples.py | 209 return Transformer.parallelize(
|
/aosp_15_r20/external/pdfium/docs/ |
H A D | safetynet.md | 155 * --num-workers: how many workers to use to parallelize test case runs. Defaults
|
/aosp_15_r20/prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/lib/gcc/x86_64-w64-mingw32/4.8.3/plugin/include/ |
D | timevar.def | 165 DEFTIMEVAR (TV_TREE_PARALLELIZE_LOOPS, "tree parallelize loops")
|
/aosp_15_r20/external/pytorch/torch/testing/_internal/distributed/_tensor/ |
H A D | common_dtensor.py | 209 def parallelize( member in Transformer
|
/aosp_15_r20/external/pytorch/aten/src/ATen/ |
H A D | TensorIterator.h | 271 void foreach_reduced_elt(loop_subiter_t loop, bool parallelize = true);
|