Home
last modified time | relevance | path

Searched defs:sharding_spec (Results 1 – 14 of 14) sorted by relevance

/aosp_15_r20/external/tensorflow/tensorflow/dtensor/mlir/expansions/
H A Deinsum_spmd_expander.cc174 const ShardingSpec& sharding_spec = layouts[index].dim(offset); in GetLabelToShardingSpec() local
236 const ShardingSpec& sharding_spec = loc->second; in GetSpecsFromLabelsAndMap() local
472 const ShardingSpec& sharding_spec = in MaybeRelayoutInputs() local
H A Ddtensor_op_spmd_expander.cc154 for (const std::string& sharding_spec : mask_layout.sharding_spec_strs()) { in ComputeRelayoutLayout() local
184 for (const std::string& sharding_spec : target_layout.sharding_spec_strs()) in ExpandOp() local
H A Dslice_spmd_expander.cc396 std::vector<std::string> sharding_spec; in ApplyNewAndShrinkMasksToLayout() local
H A Dmeta_spmd_expander.cc111 const std::string& sharding_spec = in LayoutFromUnpackedTensors() local
/aosp_15_r20/external/tensorflow/tensorflow/dtensor/cc/
H A Dtensor_layout.cc682 const std::string& sharding_spec = dim.sharding_spec(); in GetLayout() local
694 const std::string& sharding_spec = dim.sharding_spec(); in GetLayout() local
885 const std::string& Layout::sharding_spec(int idx) const { in sharding_spec() function in tensorflow::dtensor::Layout
892 for (const auto& sharding_spec : sharding_specs_) { in num_shards() local
907 for (const auto& sharding_spec : sharding_specs_) { in IsFullyReplicated() local
/aosp_15_r20/external/pytorch/torch/distributed/_shard/sharding_spec/
H A Dapi.py101 def _has_custom_op(sharding_spec, op): argument
113 sharding_spec, op: Callable, types, args, kwargs, process_group argument
/aosp_15_r20/external/pytorch/torch/distributed/_shard/sharded_tensor/
H A Dapi.py140 sharding_spec=None, argument
883 sharding_spec=None, argument
1015 def sharding_spec(self) -> shard_spec.ShardingSpec: member in ShardedTensor
/aosp_15_r20/external/tensorflow/tensorflow/compiler/xla/python/
H A Dsharded_device_array.cc121 py::object aval, ShardingSpec sharding_spec, py::list device_buffers, in Make()
224 bool weak_type) { in RegisterTypes()
H A Dpmap_lib.cc60 ShardingSpec sharding_spec; member
116 const ShardingSpec& sharding_spec = input_spec.sharding_spec; in ShardArg() local
805 py::class_<ShardingSpec> sharding_spec(pmap_lib, "ShardingSpec"); in BuildPmapSubmodule() local
H A Dsharded_device_array.h307 ShardedDeviceArray(pybind11::object aval, ShardingSpec sharding_spec, in ShardedDeviceArray()
/aosp_15_r20/external/pytorch/test/distributed/_shard/sharded_tensor/
H A Dtest_sharded_tensor_reshard.py29 def _run_sharded_tensor_reshard(self, sharding_spec, reshard_spec, input_size): argument
H A Dtest_sharded_tensor.py2282 def _generate_st_from_chunk_local_tensor(self, st_size, sharding_spec): argument
/aosp_15_r20/external/tensorflow/tensorflow/dtensor/proto/
H A Dlayout.proto27 string sharding_spec = 2; field
/aosp_15_r20/external/tensorflow/tensorflow/dtensor/mlir/
H A Dpropagate_default_layout.cc74 for (const std::string& sharding_spec : layout.sharding_spec_strs()) in PropagateDTensorLayoutForRelayout() local