/aosp_15_r20/external/tensorflow/tensorflow/dtensor/mlir/expansions/ |
H A D | einsum_spmd_expander.cc | 174 const ShardingSpec& sharding_spec = layouts[index].dim(offset); in GetLabelToShardingSpec() local 236 const ShardingSpec& sharding_spec = loc->second; in GetSpecsFromLabelsAndMap() local 472 const ShardingSpec& sharding_spec = in MaybeRelayoutInputs() local
|
H A D | dtensor_op_spmd_expander.cc | 154 for (const std::string& sharding_spec : mask_layout.sharding_spec_strs()) { in ComputeRelayoutLayout() local 184 for (const std::string& sharding_spec : target_layout.sharding_spec_strs()) in ExpandOp() local
|
H A D | slice_spmd_expander.cc | 396 std::vector<std::string> sharding_spec; in ApplyNewAndShrinkMasksToLayout() local
|
H A D | meta_spmd_expander.cc | 111 const std::string& sharding_spec = in LayoutFromUnpackedTensors() local
|
/aosp_15_r20/external/tensorflow/tensorflow/dtensor/cc/ |
H A D | tensor_layout.cc | 682 const std::string& sharding_spec = dim.sharding_spec(); in GetLayout() local 694 const std::string& sharding_spec = dim.sharding_spec(); in GetLayout() local 885 const std::string& Layout::sharding_spec(int idx) const { in sharding_spec() function in tensorflow::dtensor::Layout 892 for (const auto& sharding_spec : sharding_specs_) { in num_shards() local 907 for (const auto& sharding_spec : sharding_specs_) { in IsFullyReplicated() local
|
/aosp_15_r20/external/pytorch/torch/distributed/_shard/sharding_spec/ |
H A D | api.py | 101 def _has_custom_op(sharding_spec, op): argument 113 sharding_spec, op: Callable, types, args, kwargs, process_group argument
|
/aosp_15_r20/external/pytorch/torch/distributed/_shard/sharded_tensor/ |
H A D | api.py | 140 sharding_spec=None, argument 883 sharding_spec=None, argument 1015 def sharding_spec(self) -> shard_spec.ShardingSpec: member in ShardedTensor
|
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/python/ |
H A D | sharded_device_array.cc | 121 py::object aval, ShardingSpec sharding_spec, py::list device_buffers, in Make() 224 bool weak_type) { in RegisterTypes()
|
H A D | pmap_lib.cc | 60 ShardingSpec sharding_spec; member 116 const ShardingSpec& sharding_spec = input_spec.sharding_spec; in ShardArg() local 805 py::class_<ShardingSpec> sharding_spec(pmap_lib, "ShardingSpec"); in BuildPmapSubmodule() local
|
H A D | sharded_device_array.h | 307 ShardedDeviceArray(pybind11::object aval, ShardingSpec sharding_spec, in ShardedDeviceArray()
|
/aosp_15_r20/external/pytorch/test/distributed/_shard/sharded_tensor/ |
H A D | test_sharded_tensor_reshard.py | 29 def _run_sharded_tensor_reshard(self, sharding_spec, reshard_spec, input_size): argument
|
H A D | test_sharded_tensor.py | 2282 def _generate_st_from_chunk_local_tensor(self, st_size, sharding_spec): argument
|
/aosp_15_r20/external/tensorflow/tensorflow/dtensor/proto/ |
H A D | layout.proto | 27 string sharding_spec = 2; field
|
/aosp_15_r20/external/tensorflow/tensorflow/dtensor/mlir/ |
H A D | propagate_default_layout.cc | 74 for (const std::string& sharding_spec : layout.sharding_spec_strs()) in PropagateDTensorLayoutForRelayout() local
|